summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/sys_machdep.c6
-rw-r--r--sys/amd64/amd64/vm_machdep.c8
-rw-r--r--sys/amd64/linux/linux_proto.h8
-rw-r--r--sys/amd64/linux/linux_syscall.h2
-rw-r--r--sys/amd64/linux/linux_syscalls.c2
-rw-r--r--sys/amd64/linux/linux_sysent.c2
-rw-r--r--sys/amd64/linux/linux_systrace_args.c12
-rw-r--r--sys/amd64/linux/syscalls.master6
-rw-r--r--sys/amd64/linux32/linux32_proto.h8
-rw-r--r--sys/amd64/linux32/linux32_syscall.h2
-rw-r--r--sys/amd64/linux32/linux32_syscalls.c2
-rw-r--r--sys/amd64/linux32/linux32_sysent.c2
-rw-r--r--sys/amd64/linux32/linux32_systrace_args.c12
-rw-r--r--sys/amd64/linux32/syscalls.master6
-rw-r--r--sys/arm/allwinner/axp209.c2
-rw-r--r--sys/arm/broadcom/bcm2835/bcm2835_cpufreq.c2
-rw-r--r--sys/arm/freescale/imx/imx6_anatop.c2
-rw-r--r--sys/arm/freescale/imx/imx_gpio.c6
-rw-r--r--sys/arm/include/_align.h8
-rw-r--r--sys/arm/include/param.h8
-rw-r--r--sys/arm/mv/mpic.c6
-rw-r--r--sys/arm/mv/mv_ts.c2
-rw-r--r--sys/arm64/arm64/busdma_bounce.c3
-rw-r--r--sys/arm64/arm64/gic_v3_reg.h28
-rw-r--r--sys/arm64/cavium/thunder_pcie_pem.c14
-rw-r--r--sys/arm64/conf/DEFAULTS1
-rw-r--r--sys/arm64/include/resource.h3
-rw-r--r--sys/boot/efi/loader/main.c194
-rw-r--r--sys/boot/fdt/fdt_loader_cmd.c2
-rw-r--r--sys/boot/i386/libi386/biosdisk.c39
-rw-r--r--sys/boot/i386/zfsboot/zfsboot.c11
-rw-r--r--sys/cam/ata/ata_all.c259
-rw-r--r--sys/cam/ata/ata_all.h17
-rw-r--r--sys/cam/ata/ata_da.c1342
-rw-r--r--sys/cam/cam_ccb.h12
-rw-r--r--sys/cam/ctl/scsi_ctl.c23
-rw-r--r--sys/cam/scsi/scsi_all.c301
-rw-r--r--sys/cam/scsi/scsi_all.h96
-rw-r--r--sys/cam/scsi/scsi_da.c1806
-rw-r--r--sys/cam/scsi/scsi_da.h112
-rw-r--r--sys/compat/linux/linux_file.c226
-rw-r--r--sys/compat/linux/linux_futex.c235
-rw-r--r--sys/compat/linux/linux_socket.c29
-rw-r--r--sys/compat/linuxkpi/common/include/asm/atomic-long.h16
-rw-r--r--sys/compat/linuxkpi/common/include/asm/uaccess.h3
-rw-r--r--sys/compat/linuxkpi/common/include/linux/bitops.h43
-rw-r--r--sys/compat/linuxkpi/common/include/linux/cdev.h12
-rw-r--r--sys/compat/linuxkpi/common/include/linux/completion.h1
-rw-r--r--sys/compat/linuxkpi/common/include/linux/gfp.h13
-rw-r--r--sys/compat/linuxkpi/common/include/linux/io.h8
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kernel.h34
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kref.h19
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ktime.h32
-rw-r--r--sys/compat/linuxkpi/common/include/linux/list.h19
-rw-r--r--sys/compat/linuxkpi/common/include/linux/time.h1
-rw-r--r--sys/compat/linuxkpi/common/src/linux_compat.c42
-rw-r--r--sys/compat/linuxkpi/common/src/linux_pci.c2
-rw-r--r--sys/compat/ndis/subr_ntoskrnl.c9
-rw-r--r--sys/conf/files1
-rw-r--r--sys/conf/files.amd641
-rw-r--r--sys/conf/files.arm642
-rw-r--r--sys/conf/files.i3861
-rw-r--r--sys/conf/kern.post.mk8
-rw-r--r--sys/contrib/ipfilter/netinet/ip_frag.c20
-rw-r--r--sys/contrib/ipfilter/netinet/ip_frag.h1
-rw-r--r--sys/dev/acpi_support/acpi_asus_wmi.c2
-rw-r--r--sys/dev/acpi_support/acpi_ibm.c2
-rw-r--r--sys/dev/acpi_support/atk0110.c6
-rw-r--r--sys/dev/acpica/acpi_pcib_pci.c17
-rw-r--r--sys/dev/acpica/acpi_thermal.c2
-rw-r--r--sys/dev/ahci/ahci.c3
-rw-r--r--sys/dev/amdtemp/amdtemp.c2
-rw-r--r--sys/dev/ath/ath_hal/ah_regdomain.c479
-rw-r--r--sys/dev/ath/ath_hal/ah_regdomain.h5
-rw-r--r--sys/dev/bhnd/bcma/bcma_bhndb.c21
-rw-r--r--sys/dev/bhnd/bcma/bcma_nexus.c111
-rw-r--r--sys/dev/bhnd/bhnd.c88
-rw-r--r--sys/dev/bhnd/bhnd.h123
-rw-r--r--sys/dev/bhnd/bhnd_bus_if.m208
-rw-r--r--sys/dev/bhnd/bhnd_types.h9
-rw-r--r--sys/dev/bhnd/bhndb/bhnd_bhndb.c77
-rw-r--r--sys/dev/bhnd/bhndb/bhndb.c93
-rw-r--r--sys/dev/bhnd/bhndb/bhndb.h1
-rw-r--r--sys/dev/bhnd/bhndb/bhndb_private.h7
-rw-r--r--sys/dev/bhnd/bhndb/bhndb_subr.c46
-rw-r--r--sys/dev/bhnd/nvram/bhnd_sprom.c12
-rw-r--r--sys/dev/bhnd/siba/siba_bhndb.c21
-rw-r--r--sys/dev/bhnd/soc/bhnd_soc.c266
-rw-r--r--sys/dev/bhnd/soc/bhnd_soc.h51
-rw-r--r--sys/dev/bhnd/tools/bus_macro.sh21
-rw-r--r--sys/dev/bwn/if_bwn.c2
-rw-r--r--sys/dev/coretemp/coretemp.c2
-rw-r--r--sys/dev/cpuctl/cpuctl.c2
-rw-r--r--sys/dev/cxgbe/cxgbei/icl_cxgbei.c1
-rw-r--r--sys/dev/cxgbe/tom/t4_ddp.c2
-rw-r--r--sys/dev/gpio/gpioiic.c61
-rw-r--r--sys/dev/gpio/gpioled.c25
-rw-r--r--sys/dev/hyperv/include/hyperv_busdma.h45
-rw-r--r--sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c15
-rw-r--r--sys/dev/hyperv/vmbus/hv_connection.c8
-rw-r--r--sys/dev/hyperv/vmbus/hv_hv.c216
-rw-r--r--sys/dev/hyperv/vmbus/hv_vmbus_drv_freebsd.c69
-rw-r--r--sys/dev/hyperv/vmbus/hv_vmbus_priv.h59
-rw-r--r--sys/dev/hyperv/vmbus/hyperv_busdma.c98
-rw-r--r--sys/dev/hyperv/vmbus/hyperv_reg.h37
-rw-r--r--sys/dev/hyperv/vmbus/vmbus_var.h8
-rw-r--r--sys/dev/iicbus/ds3231reg.h2
-rw-r--r--sys/dev/iicbus/iic.c9
-rw-r--r--sys/dev/iicbus/iic.h2
-rw-r--r--sys/dev/iicbus/iicbb_if.m1
-rw-r--r--sys/dev/iicbus/iiconf.c1
-rw-r--r--sys/dev/iicbus/lm75.c2
-rw-r--r--sys/dev/ipw/if_ipw.c53
-rw-r--r--sys/dev/ipw/if_ipwvar.h2
-rw-r--r--sys/dev/iscsi/icl.h2
-rw-r--r--sys/dev/iscsi/icl_soft.c18
-rw-r--r--sys/dev/iscsi/iscsi.c59
-rw-r--r--sys/dev/isp/isp.c13
-rw-r--r--sys/dev/isp/isp_freebsd.c26
-rw-r--r--sys/dev/isp/isp_target.c22
-rw-r--r--sys/dev/isp/ispvar.h3
-rw-r--r--sys/dev/iwm/if_iwm.c7
-rw-r--r--sys/dev/iwm/if_iwm_binding.c2
-rw-r--r--sys/dev/iwm/if_iwm_mac_ctxt.c2
-rw-r--r--sys/dev/iwm/if_iwm_pcie_trans.c2
-rw-r--r--sys/dev/iwm/if_iwm_phy_ctxt.c2
-rw-r--r--sys/dev/iwm/if_iwm_phy_db.c2
-rw-r--r--sys/dev/iwm/if_iwm_power.c2
-rw-r--r--sys/dev/iwm/if_iwm_scan.c2
-rw-r--r--sys/dev/iwm/if_iwm_time_event.c2
-rw-r--r--sys/dev/iwm/if_iwm_util.c2
-rw-r--r--sys/dev/ixl/if_ixl.c8
-rw-r--r--sys/dev/mlx5/mlx5_en/en.h23
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c381
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_main.c111
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_tx.c91
-rw-r--r--sys/dev/mwl/if_mwl.c126
-rw-r--r--sys/dev/nvram2env/nvram2env.c98
-rw-r--r--sys/dev/pccbb/pccbb_pci.c18
-rw-r--r--sys/dev/pci/pci_host_generic.c20
-rw-r--r--sys/dev/pci/pci_pci.c163
-rw-r--r--sys/dev/pci/pcib_private.h2
-rw-r--r--sys/dev/sfxge/common/efsys.h16
-rw-r--r--sys/dev/siba/siba_bwn.c4
-rw-r--r--sys/dev/urtwn/if_urtwn.c27
-rw-r--r--sys/dev/urtwn/if_urtwnreg.h3
-rw-r--r--sys/dev/vnic/mrml_bridge.c1
-rw-r--r--sys/dev/vnic/nic_main.c27
-rw-r--r--sys/dev/vnic/nicvf_main.c9
-rw-r--r--sys/dev/vnic/thunder_bgx.c21
-rw-r--r--sys/dev/vnic/thunder_mdio.c2
-rw-r--r--sys/dev/wi/if_wi.c60
-rw-r--r--sys/dev/wi/if_wivar.h1
-rw-r--r--sys/fs/cd9660/cd9660_vfsops.c6
-rw-r--r--sys/fs/devfs/devfs_devs.c5
-rw-r--r--sys/fs/ext2fs/ext2_alloc.c3
-rw-r--r--sys/fs/ext2fs/ext2_vfsops.c3
-rw-r--r--sys/fs/msdosfs/msdosfs_vfsops.c30
-rw-r--r--sys/fs/udf/udf_vfsops.c6
-rw-r--r--sys/geom/eli/g_eli.c4
-rw-r--r--sys/geom/geom.h3
-rw-r--r--sys/geom/geom_dev.c42
-rw-r--r--sys/geom/geom_disk.c14
-rw-r--r--sys/geom/geom_disk.h1
-rw-r--r--sys/geom/geom_event.c1
-rw-r--r--sys/geom/geom_io.c68
-rw-r--r--sys/geom/geom_kern.c5
-rw-r--r--sys/geom/geom_mbr.c2
-rw-r--r--sys/geom/geom_pc98.c2
-rw-r--r--sys/geom/geom_subr.c3
-rw-r--r--sys/geom/journal/g_journal.c10
-rw-r--r--sys/geom/mirror/g_mirror.c2
-rw-r--r--sys/geom/mountver/g_mountver.c2
-rw-r--r--sys/geom/raid/g_raid.c2
-rw-r--r--sys/geom/raid3/g_raid3.c2
-rw-r--r--sys/gnu/dev/bwn/phy_n/if_bwn_phy_n_core.c12
-rw-r--r--sys/i386/i386/sys_machdep.c6
-rw-r--r--sys/i386/i386/vm_machdep.c8
-rw-r--r--sys/i386/linux/linux_proto.h8
-rw-r--r--sys/i386/linux/linux_syscall.h2
-rw-r--r--sys/i386/linux/linux_syscalls.c2
-rw-r--r--sys/i386/linux/linux_sysent.c2
-rw-r--r--sys/i386/linux/linux_systrace_args.c12
-rw-r--r--sys/i386/linux/syscalls.master6
-rw-r--r--sys/kern/bus_if.m55
-rw-r--r--sys/kern/kern_malloc.c3
-rw-r--r--sys/kern/kern_synch.c2
-rw-r--r--sys/kern/subr_bus.c74
-rw-r--r--sys/kern/subr_devstat.c4
-rw-r--r--sys/kern/subr_intr.c9
-rw-r--r--sys/kern/subr_rman.c21
-rw-r--r--sys/kern/subr_sglist.c101
-rw-r--r--sys/kern/subr_taskqueue.c28
-rw-r--r--sys/kern/sys_socket.c6
-rw-r--r--sys/kern/uipc_socket.c4
-rw-r--r--sys/kern/vfs_aio.c20
-rw-r--r--sys/mips/conf/BCM2
-rw-r--r--sys/mips/include/clock.h2
-rw-r--r--sys/modules/bhnd/bhndb/Makefile1
-rw-r--r--sys/modules/hyperv/vmbus/Makefile3
-rw-r--r--sys/modules/vnic/Makefile10
-rw-r--r--sys/modules/vnic/mrmlbus/Makefile16
-rw-r--r--sys/modules/vnic/thunder_bgx/Makefile16
-rw-r--r--sys/modules/vnic/thunder_mdio/Makefile16
-rw-r--r--sys/modules/vnic/vnicpf/Makefile16
-rw-r--r--sys/modules/vnic/vnicvf/Makefile16
-rw-r--r--sys/net/if_tun.c7
-rw-r--r--sys/net/mp_ring.c4
-rw-r--r--sys/net80211/ieee80211.c2
-rw-r--r--sys/net80211/ieee80211_action.c1
-rw-r--r--sys/net80211/ieee80211_crypto_none.c2
-rw-r--r--sys/net80211/ieee80211_freebsd.c2
-rw-r--r--sys/net80211/ieee80211_hostap.c2
-rw-r--r--sys/net80211/ieee80211_ht.c4
-rw-r--r--sys/net80211/ieee80211_hwmp.c5
-rw-r--r--sys/net80211/ieee80211_ioctl.c5
-rw-r--r--sys/net80211/ieee80211_mesh.c10
-rw-r--r--sys/net80211/ieee80211_phy.c1
-rw-r--r--sys/net80211/ieee80211_scan_sta.c3
-rw-r--r--sys/net80211/ieee80211_scan_sw.c6
-rw-r--r--sys/net80211/ieee80211_sta.c1
-rw-r--r--sys/net80211/ieee80211_superg.c1
-rw-r--r--sys/netinet/tcp_input.c2
-rw-r--r--sys/netinet/tcp_output.c2
-rw-r--r--sys/netinet6/ip6_output.c14
-rw-r--r--sys/netinet6/nd6.c2
-rw-r--r--sys/netinet6/nd6.h4
-rw-r--r--sys/netpfil/ipfw/ip_fw_dynamic.c6
-rw-r--r--sys/netpfil/pf/pf.c15
-rw-r--r--sys/netpfil/pf/pf_norm.c2
-rw-r--r--sys/powerpc/powermac/powermac_thermal.h2
-rw-r--r--sys/powerpc/powermac/smu.c6
-rw-r--r--sys/powerpc/powermac/smusat.c6
-rw-r--r--sys/powerpc/powerpc/exec_machdep.c6
-rw-r--r--sys/sparc64/include/vm.h1
-rw-r--r--sys/sparc64/sparc64/vm_machdep.c8
-rw-r--r--sys/sys/_task.h3
-rw-r--r--sys/sys/ata.h378
-rw-r--r--sys/sys/bio.h15
-rw-r--r--sys/sys/bus.h37
-rw-r--r--sys/sys/disk.h3
-rw-r--r--sys/sys/disk_zone.h184
-rw-r--r--sys/sys/elf_common.h3
-rw-r--r--sys/sys/kobj.h8
-rw-r--r--sys/sys/malloc.h3
-rw-r--r--sys/sys/param.h2
-rw-r--r--sys/sys/rman.h5
-rw-r--r--sys/sys/sglist.h3
-rw-r--r--sys/sys/sysent.h4
-rw-r--r--sys/sys/taskqueue.h4
-rw-r--r--sys/sys/vmmeter.h45
-rw-r--r--sys/ufs/ffs/ffs_alloc.c7
-rw-r--r--sys/ufs/ffs/ffs_vfsops.c49
-rw-r--r--sys/vm/swap_pager.c42
-rw-r--r--sys/vm/vm.h5
-rw-r--r--sys/vm/vm_fault.c47
-rw-r--r--sys/vm/vm_glue.c2
-rw-r--r--sys/vm/vm_page.c4
-rw-r--r--sys/vm/vm_page.h2
-rw-r--r--sys/x86/x86/nexus.c144
260 files changed, 8684 insertions, 2076 deletions
diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c
index 7f6c50e..4f85e1f 100644
--- a/sys/amd64/amd64/sys_machdep.c
+++ b/sys/amd64/amd64/sys_machdep.c
@@ -333,17 +333,19 @@ amd64_set_ioperm(td, uap)
struct thread *td;
struct i386_ioperm_args *uap;
{
- int i, error;
char *iomap;
struct amd64tss *tssp;
struct system_segment_descriptor *tss_sd;
struct pcb *pcb;
+ u_int i;
+ int error;
if ((error = priv_check(td, PRIV_IO)) != 0)
return (error);
if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
return (error);
- if (uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
+ if (uap->start > uap->start + uap->length ||
+ uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
return (EINVAL);
/*
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index ff857bb..fc9e634 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -414,13 +414,7 @@ cpu_set_syscall_retval(struct thread *td, int error)
break;
default:
- if (td->td_proc->p_sysent->sv_errsize) {
- if (error >= td->td_proc->p_sysent->sv_errsize)
- error = -1; /* XXX */
- else
- error = td->td_proc->p_sysent->sv_errtbl[error];
- }
- td->td_frame->tf_rax = error;
+ td->td_frame->tf_rax = SV_ABI_ERRNO(td->td_proc, error);
td->td_frame->tf_rflags |= PSL_C;
break;
}
diff --git a/sys/amd64/linux/linux_proto.h b/sys/amd64/linux/linux_proto.h
index 54e11fe..37e67e6 100644
--- a/sys/amd64/linux/linux_proto.h
+++ b/sys/amd64/linux/linux_proto.h
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/amd64/linux/syscalls.master 293907 2016-01-14 10:13:58Z glebius
+ * created from FreeBSD: head/sys/amd64/linux/syscalls.master 300359 2016-05-21 08:01:14Z dchagin
*/
#ifndef _LINUX_SYSPROTO_H_
@@ -524,16 +524,16 @@ struct linux_getpriority_args {
};
struct linux_sched_setparam_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_getparam_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_setscheduler_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
char policy_l_[PADL_(l_int)]; l_int policy; char policy_r_[PADR_(l_int)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_getscheduler_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
diff --git a/sys/amd64/linux/linux_syscall.h b/sys/amd64/linux/linux_syscall.h
index cee2855..4f32235 100644
--- a/sys/amd64/linux/linux_syscall.h
+++ b/sys/amd64/linux/linux_syscall.h
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/amd64/linux/syscalls.master 293907 2016-01-14 10:13:58Z glebius
+ * created from FreeBSD: head/sys/amd64/linux/syscalls.master 300359 2016-05-21 08:01:14Z dchagin
*/
#define LINUX_SYS_read 0
diff --git a/sys/amd64/linux/linux_syscalls.c b/sys/amd64/linux/linux_syscalls.c
index ea001ba..447ba20 100644
--- a/sys/amd64/linux/linux_syscalls.c
+++ b/sys/amd64/linux/linux_syscalls.c
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/amd64/linux/syscalls.master 293907 2016-01-14 10:13:58Z glebius
+ * created from FreeBSD: head/sys/amd64/linux/syscalls.master 300359 2016-05-21 08:01:14Z dchagin
*/
const char *linux_syscallnames[] = {
diff --git a/sys/amd64/linux/linux_sysent.c b/sys/amd64/linux/linux_sysent.c
index 053f8d1..f2ca9fd 100644
--- a/sys/amd64/linux/linux_sysent.c
+++ b/sys/amd64/linux/linux_sysent.c
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/amd64/linux/syscalls.master 293907 2016-01-14 10:13:58Z glebius
+ * created from FreeBSD: head/sys/amd64/linux/syscalls.master 300359 2016-05-21 08:01:14Z dchagin
*/
#include <sys/param.h>
diff --git a/sys/amd64/linux/linux_systrace_args.c b/sys/amd64/linux/linux_systrace_args.c
index bc279c9..320dc35 100644
--- a/sys/amd64/linux/linux_systrace_args.c
+++ b/sys/amd64/linux/linux_systrace_args.c
@@ -1178,7 +1178,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 142: {
struct linux_sched_setparam_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
- uarg[1] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[1] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 2;
break;
}
@@ -1186,7 +1186,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 143: {
struct linux_sched_getparam_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
- uarg[1] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[1] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 2;
break;
}
@@ -1195,7 +1195,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
struct linux_sched_setscheduler_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
iarg[1] = p->policy; /* l_int */
- uarg[2] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[2] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 3;
break;
}
@@ -4209,7 +4209,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_pid_t";
break;
case 1:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
@@ -4222,7 +4222,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_pid_t";
break;
case 1:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
@@ -4238,7 +4238,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_int";
break;
case 2:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
diff --git a/sys/amd64/linux/syscalls.master b/sys/amd64/linux/syscalls.master
index eb36d5f..5ed098a 100644
--- a/sys/amd64/linux/syscalls.master
+++ b/sys/amd64/linux/syscalls.master
@@ -283,12 +283,12 @@
141 AUE_SETPRIORITY NOPROTO { int setpriority(int which, int who, \
int prio); }
142 AUE_SCHED_SETPARAM STD { int linux_sched_setparam(l_pid_t pid, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
143 AUE_SCHED_GETPARAM STD { int linux_sched_getparam(l_pid_t pid, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
144 AUE_SCHED_SETSCHEDULER STD { int linux_sched_setscheduler( \
l_pid_t pid, l_int policy, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
145 AUE_SCHED_GETSCHEDULER STD { int linux_sched_getscheduler( \
l_pid_t pid); }
146 AUE_SCHED_GET_PRIORITY_MAX STD { int linux_sched_get_priority_max( \
diff --git a/sys/amd64/linux32/linux32_proto.h b/sys/amd64/linux32/linux32_proto.h
index fd74495..29f7d3f 100644
--- a/sys/amd64/linux32/linux32_proto.h
+++ b/sys/amd64/linux32/linux32_proto.h
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/amd64/linux32/syscalls.master 297061 2016-03-20 13:21:20Z dchagin
+ * created from FreeBSD: head/sys/amd64/linux32/syscalls.master 300359 2016-05-21 08:01:14Z dchagin
*/
#ifndef _LINUX32_SYSPROTO_H_
@@ -480,16 +480,16 @@ struct linux_sysctl_args {
};
struct linux_sched_setparam_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_getparam_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_setscheduler_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
char policy_l_[PADL_(l_int)]; l_int policy; char policy_r_[PADR_(l_int)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_getscheduler_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
diff --git a/sys/amd64/linux32/linux32_syscall.h b/sys/amd64/linux32/linux32_syscall.h
index f213377..d6ab9cd 100644
--- a/sys/amd64/linux32/linux32_syscall.h
+++ b/sys/amd64/linux32/linux32_syscall.h
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/amd64/linux32/syscalls.master 297061 2016-03-20 13:21:20Z dchagin
+ * created from FreeBSD: head/sys/amd64/linux32/syscalls.master 300359 2016-05-21 08:01:14Z dchagin
*/
#define LINUX32_SYS_linux_exit 1
diff --git a/sys/amd64/linux32/linux32_syscalls.c b/sys/amd64/linux32/linux32_syscalls.c
index beb6efc..3d23c74 100644
--- a/sys/amd64/linux32/linux32_syscalls.c
+++ b/sys/amd64/linux32/linux32_syscalls.c
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/amd64/linux32/syscalls.master 297061 2016-03-20 13:21:20Z dchagin
+ * created from FreeBSD: head/sys/amd64/linux32/syscalls.master 300359 2016-05-21 08:01:14Z dchagin
*/
const char *linux32_syscallnames[] = {
diff --git a/sys/amd64/linux32/linux32_sysent.c b/sys/amd64/linux32/linux32_sysent.c
index baa9baa..d6f31e2 100644
--- a/sys/amd64/linux32/linux32_sysent.c
+++ b/sys/amd64/linux32/linux32_sysent.c
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/amd64/linux32/syscalls.master 297061 2016-03-20 13:21:20Z dchagin
+ * created from FreeBSD: head/sys/amd64/linux32/syscalls.master 300359 2016-05-21 08:01:14Z dchagin
*/
#include "opt_compat.h"
diff --git a/sys/amd64/linux32/linux32_systrace_args.c b/sys/amd64/linux32/linux32_systrace_args.c
index fc4c89a..cabfab7 100644
--- a/sys/amd64/linux32/linux32_systrace_args.c
+++ b/sys/amd64/linux32/linux32_systrace_args.c
@@ -1047,7 +1047,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 154: {
struct linux_sched_setparam_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
- uarg[1] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[1] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 2;
break;
}
@@ -1055,7 +1055,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 155: {
struct linux_sched_getparam_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
- uarg[1] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[1] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 2;
break;
}
@@ -1064,7 +1064,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
struct linux_sched_setscheduler_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
iarg[1] = p->policy; /* l_int */
- uarg[2] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[2] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 3;
break;
}
@@ -3938,7 +3938,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_pid_t";
break;
case 1:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
@@ -3951,7 +3951,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_pid_t";
break;
case 1:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
@@ -3967,7 +3967,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_int";
break;
case 2:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
diff --git a/sys/amd64/linux32/syscalls.master b/sys/amd64/linux32/syscalls.master
index e40247e..79cd2c8 100644
--- a/sys/amd64/linux32/syscalls.master
+++ b/sys/amd64/linux32/syscalls.master
@@ -268,12 +268,12 @@
152 AUE_MLOCKALL NOPROTO { int mlockall(int how); }
153 AUE_MUNLOCKALL NOPROTO { int munlockall(void); }
154 AUE_SCHED_SETPARAM STD { int linux_sched_setparam(l_pid_t pid, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
155 AUE_SCHED_GETPARAM STD { int linux_sched_getparam(l_pid_t pid, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
156 AUE_SCHED_SETSCHEDULER STD { int linux_sched_setscheduler( \
l_pid_t pid, l_int policy, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
157 AUE_SCHED_GETSCHEDULER STD { int linux_sched_getscheduler( \
l_pid_t pid); }
158 AUE_NULL NOPROTO { int sched_yield(void); }
diff --git a/sys/arm/allwinner/axp209.c b/sys/arm/allwinner/axp209.c
index 3a9753f..7ffc8c6 100644
--- a/sys/arm/allwinner/axp209.c
+++ b/sys/arm/allwinner/axp209.c
@@ -69,7 +69,7 @@ __FBSDID("$FreeBSD$");
#define AXP209_TEMPMON_L(a) ((a) & 0xf)
#define AXP209_TEMPMON_MIN 1447 /* -144.7C */
-#define AXP209_0C_TO_K 2732
+#define AXP209_0C_TO_K 2731
struct axp209_softc {
uint32_t addr;
diff --git a/sys/arm/broadcom/bcm2835/bcm2835_cpufreq.c b/sys/arm/broadcom/bcm2835/bcm2835_cpufreq.c
index 2cf682f..46856f2 100644
--- a/sys/arm/broadcom/bcm2835/bcm2835_cpufreq.c
+++ b/sys/arm/broadcom/bcm2835/bcm2835_cpufreq.c
@@ -82,7 +82,7 @@ __FBSDID("$FreeBSD$");
#define MSG_ERROR -999999999
#define MHZSTEP 100
#define HZSTEP (MHZ2HZ(MHZSTEP))
-#define TZ_ZEROC 2732
+#define TZ_ZEROC 2731
#define VC_LOCK(sc) do { \
sema_wait(&vc_sema); \
diff --git a/sys/arm/freescale/imx/imx6_anatop.c b/sys/arm/freescale/imx/imx6_anatop.c
index c0afe57..551531a 100644
--- a/sys/arm/freescale/imx/imx6_anatop.c
+++ b/sys/arm/freescale/imx/imx6_anatop.c
@@ -139,7 +139,7 @@ static struct oppt {
*/
static uint32_t imx6_ocotp_mhz_tab[] = {792, 852, 996, 1200};
-#define TZ_ZEROC 2732 /* deci-Kelvin <-> deci-Celcius offset. */
+#define TZ_ZEROC 2731 /* deci-Kelvin <-> deci-Celcius offset. */
uint32_t
imx6_anatop_read_4(bus_size_t offset)
diff --git a/sys/arm/freescale/imx/imx_gpio.c b/sys/arm/freescale/imx/imx_gpio.c
index c5f7b8c..7f44845 100644
--- a/sys/arm/freescale/imx/imx_gpio.c
+++ b/sys/arm/freescale/imx/imx_gpio.c
@@ -179,14 +179,14 @@ gpio_pic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
*/
if (ncells != 2) {
- device_printf(sc->dev, "Invalid #interrupt-cells");
+ device_printf(sc->dev, "Invalid #interrupt-cells\n");
return (EINVAL);
}
irq = cells[0];
tripol = cells[1];
if (irq >= sc->gpio_npins) {
- device_printf(sc->dev, "Invalid interrupt number %d", irq);
+ device_printf(sc->dev, "Invalid interrupt number %u\n", irq);
return (EINVAL);
}
switch (tripol) {
@@ -207,7 +207,7 @@ gpio_pic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
pol = INTR_POLARITY_LOW;
break;
default:
- device_printf(sc->dev, "unsupported trigger/polarity 0x%2x\n",
+ device_printf(sc->dev, "Unsupported trigger/polarity 0x%2x\n",
tripol);
return (ENOTSUP);
}
diff --git a/sys/arm/include/_align.h b/sys/arm/include/_align.h
index 5a1db62..0791e45 100644
--- a/sys/arm/include/_align.h
+++ b/sys/arm/include/_align.h
@@ -42,11 +42,11 @@
#define _ARM_INCLUDE__ALIGN_H_
/*
- * Round p (pointer or byte index) up to a correctly-aligned value
- * for all data types (int, long, ...). The result is unsigned int
- * and must be cast to any desired pointer type.
+ * Round p (pointer or byte index) up to the hardware-required alignment which
+ * is sufficient for any data type, pointer or numeric. The resulting type
+ * is equivelent to arm's uintptr_t (but is purposely spelled "unsigned" here).
*/
-#define _ALIGNBYTES (sizeof(int) - 1)
+#define _ALIGNBYTES (sizeof(long long) - 1)
#define _ALIGN(p) (((unsigned)(p) + _ALIGNBYTES) & ~_ALIGNBYTES)
#endif /* !_ARM_INCLUDE__ALIGN_H_ */
diff --git a/sys/arm/include/param.h b/sys/arm/include/param.h
index 384891d..4b089e2 100644
--- a/sys/arm/include/param.h
+++ b/sys/arm/include/param.h
@@ -59,12 +59,6 @@
#define _V6_SUFFIX ""
#endif
-#ifdef __ARM_PCS_VFP
-#define _HF_SUFFIX "hf"
-#else
-#define _HF_SUFFIX ""
-#endif
-
#ifdef __ARM_BIG_ENDIAN
#define _EB_SUFFIX "eb"
#else
@@ -75,7 +69,7 @@
#define MACHINE "arm"
#endif
#ifndef MACHINE_ARCH
-#define MACHINE_ARCH "arm" _V6_SUFFIX _HF_SUFFIX _EB_SUFFIX
+#define MACHINE_ARCH "arm" _V6_SUFFIX _EB_SUFFIX
#endif
#if defined(SMP) || defined(KLD_MODULE)
diff --git a/sys/arm/mv/mpic.c b/sys/arm/mv/mpic.c
index 1dbcb34..9a9785ce 100644
--- a/sys/arm/mv/mpic.c
+++ b/sys/arm/mv/mpic.c
@@ -368,6 +368,11 @@ mpic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
mpic_enable_intr(dev, isrc);
}
+
+static void
+mpic_post_filter(device_t dev, struct intr_irqsrc *isrc)
+{
+}
#endif
static device_method_t mv_mpic_methods[] = {
@@ -378,6 +383,7 @@ static device_method_t mv_mpic_methods[] = {
DEVMETHOD(pic_disable_intr, mpic_disable_intr),
DEVMETHOD(pic_enable_intr, mpic_enable_intr),
DEVMETHOD(pic_map_intr, mpic_map_intr),
+ DEVMETHOD(pic_post_filter, mpic_post_filter),
DEVMETHOD(pic_post_ithread, mpic_post_ithread),
DEVMETHOD(pic_pre_ithread, mpic_pre_ithread),
#endif
diff --git a/sys/arm/mv/mv_ts.c b/sys/arm/mv/mv_ts.c
index 7606859..35cd062 100644
--- a/sys/arm/mv/mv_ts.c
+++ b/sys/arm/mv/mv_ts.c
@@ -80,7 +80,7 @@ ts_probe(device_t dev)
#define MV_TEMP_SENS_OFFS 10
#define MV_TEMP_SENS_MASK 0x1ff
#define MV_TEMP_SENS_READ_MAX 16
-#define TZ_ZEROC 2732
+#define TZ_ZEROC 2731
#define MV_TEMP_CONVERT(x) ((((322 - x) * 100000) / 13625) + TZ_ZEROC)
/*
diff --git a/sys/arm64/arm64/busdma_bounce.c b/sys/arm64/arm64/busdma_bounce.c
index 71a18dc..7166ebf 100644
--- a/sys/arm64/arm64/busdma_bounce.c
+++ b/sys/arm64/arm64/busdma_bounce.c
@@ -955,6 +955,9 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
struct sync_list *sl, *end;
vm_offset_t datavaddr, tempvaddr;
+ if (op == BUS_DMASYNC_POSTWRITE)
+ return;
+
if ((op & BUS_DMASYNC_POSTREAD) != 0) {
/*
* Wait for any DMA operations to complete before the bcopy.
diff --git a/sys/arm64/arm64/gic_v3_reg.h b/sys/arm64/arm64/gic_v3_reg.h
index 2a2072f..f790a41 100644
--- a/sys/arm64/arm64/gic_v3_reg.h
+++ b/sys/arm64/arm64/gic_v3_reg.h
@@ -67,7 +67,7 @@
#define GICD_TYPER (0x0004)
#define GICD_TYPER_IDBITS(n) ((((n) >> 19) & 0x1F) + 1)
-#define GICD_TYPER_I_NUM(n) ((((n) & 0xF1) + 1) * 32)
+#define GICD_TYPER_I_NUM(n) ((((n) & 0x1F) + 1) * 32)
#define GICD_ISENABLER(n) (0x0100 + (((n) >> 5) * 4))
#define GICD_I_PER_ISENABLERn (32)
@@ -142,6 +142,8 @@
#define GICR_PROPBASER_CACHE_NIWAWB 0x5UL
#define GICR_PROPBASER_CACHE_NIRAWAWT 0x6UL
#define GICR_PROPBASER_CACHE_NIRAWAWB 0x7UL
+#define GICR_PROPBASER_CACHE_MASK \
+ (0x7UL << GICR_PROPBASER_CACHE_SHIFT)
/*
* Shareability
@@ -179,6 +181,8 @@
#define GICR_PENDBASER_CACHE_NIWAWB 0x5UL
#define GICR_PENDBASER_CACHE_NIRAWAWT 0x6UL
#define GICR_PENDBASER_CACHE_NIRAWAWB 0x7UL
+#define GICR_PENDBASER_CACHE_MASK \
+ (0x7UL << GICR_PENDBASER_CACHE_SHIFT)
/*
* Shareability
@@ -217,6 +221,26 @@
#define GITS_CTLR (0x0000)
#define GITS_CTLR_EN (1 << 0)
+#define GITS_IIDR (0x0004)
+#define GITS_IIDR_PRODUCT_SHIFT 24
+#define GITS_IIDR_PRODUCT_MASK (0xff << GITS_IIDR_PRODUCT_SHIFT)
+#define GITS_IIDR_VARIANT_SHIFT 16
+#define GITS_IIDR_VARIANT_MASK (0xf << GITS_IIDR_VARIANT_SHIFT)
+#define GITS_IIDR_REVISION_SHIFT 12
+#define GITS_IIDR_REVISION_MASK (0xf << GITS_IIDR_REVISION_SHIFT)
+#define GITS_IIDR_IMPLEMENTOR_SHIFT 0
+#define GITS_IIDR_IMPLEMENTOR_MASK (0xfff << GITS_IIDR_IMPLEMENTOR_SHIFT)
+
+#define GITS_IIDR_RAW(impl, prod, var, rev) \
+ ((prod) << GITS_IIDR_PRODUCT_SHIFT | \
+ (var) << GITS_IIDR_VARIANT_SHIFT | \
+ (rev) << GITS_IIDR_REVISION_SHIFT | \
+ (impl) << GITS_IIDR_IMPLEMENTOR_SHIFT)
+
+#define GITS_IIDR_IMPL_CAVIUM (0x34c)
+#define GITS_IIDR_PROD_THUNDER (0xa1)
+#define GITS_IIDR_VAR_THUNDER_1 (0x0)
+
#define GITS_CBASER (0x0080)
#define GITS_CBASER_VALID (1UL << 63)
/*
@@ -239,7 +263,7 @@
#define GITS_CBASER_CACHE_NIWAWB 0x5UL
#define GITS_CBASER_CACHE_NIRAWAWT 0x6UL
#define GITS_CBASER_CACHE_NIRAWAWB 0x7UL
-#define GITS_CBASER_CACHE_MASK (0x7UL << GITS_CBASER_TYPE_SHIFT)
+#define GITS_CBASER_CACHE_MASK (0x7UL << GITS_CBASER_CACHE_SHIFT)
/*
* Shareability
* 0x0 - Non-shareable
diff --git a/sys/arm64/cavium/thunder_pcie_pem.c b/sys/arm64/cavium/thunder_pcie_pem.c
index 7b4394f..a8f2f43 100644
--- a/sys/arm64/cavium/thunder_pcie_pem.c
+++ b/sys/arm64/cavium/thunder_pcie_pem.c
@@ -313,6 +313,10 @@ thunder_pem_adjust_resource(device_t dev, device_t child, int type,
struct rman *rm;
sc = device_get_softc(dev);
+#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
+ if (type == PCI_RES_BUS)
+ return (pci_domain_adjust_bus(sc->id, child, res, start, end));
+#endif
rm = thunder_pem_rman(sc, type);
if (rm == NULL)
@@ -619,6 +623,11 @@ thunder_pem_alloc_resource(device_t dev, device_t child, int type, int *rid,
struct resource *res;
device_t parent_dev;
+#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
+ if (type == PCI_RES_BUS)
+ return (pci_domain_alloc_bus(sc->id, child, rid, start, end,
+ count, flags));
+#endif
rm = thunder_pem_rman(sc, type);
if (rm == NULL) {
/* Find parent device. On ThunderX we know an exact path. */
@@ -675,7 +684,12 @@ thunder_pem_release_resource(device_t dev, device_t child, int type, int rid,
struct resource *res)
{
device_t parent_dev;
+#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
+ struct thunder_pem_softc *sc = device_get_softc(dev);
+ if (type == PCI_RES_BUS)
+ return (pci_domain_release_bus(sc->id, child, rid, res));
+#endif
/* Find parent device. On ThunderX we know an exact path. */
parent_dev = device_get_parent(device_get_parent(dev));
diff --git a/sys/arm64/conf/DEFAULTS b/sys/arm64/conf/DEFAULTS
index 8f6d58f..4f89768 100644
--- a/sys/arm64/conf/DEFAULTS
+++ b/sys/arm64/conf/DEFAULTS
@@ -12,3 +12,4 @@ device mem # Memory and kernel memory devices
options GEOM_PART_BSD
options GEOM_PART_MBR
+options NEW_PCIB
diff --git a/sys/arm64/include/resource.h b/sys/arm64/include/resource.h
index 723d145..aef4fad 100644
--- a/sys/arm64/include/resource.h
+++ b/sys/arm64/include/resource.h
@@ -42,5 +42,8 @@
#define SYS_RES_MEMORY 3 /* i/o memory */
#define SYS_RES_IOPORT 4 /* i/o ports */
#define SYS_RES_GPIO 5 /* general purpose i/o */
+#ifdef NEW_PCIB
+#define PCI_RES_BUS 6 /* PCI bus numbers */
+#endif
#endif /* !_MACHINE_RESOURCE_H_ */
diff --git a/sys/boot/efi/loader/main.c b/sys/boot/efi/loader/main.c
index 5efe631..1765de9 100644
--- a/sys/boot/efi/loader/main.c
+++ b/sys/boot/efi/loader/main.c
@@ -76,26 +76,29 @@ static void efi_zfs_probe(void);
#endif
/*
- * Need this because EFI uses UTF-16 unicode string constants, but we
- * use UTF-8. We can't use printf due to the possibility of \0 and we
- * don't support support wide characters either.
+ * cpy8to16 copies a traditional C string into a CHAR16 string and
+ * 0 terminates it. len is the size of *dst in bytes.
*/
static void
-print_str16(const CHAR16 *str)
+cpy8to16(const char *src, CHAR16 *dst, size_t len)
{
- int i;
-
- for (i = 0; str[i]; i++)
- printf("%c", (char)str[i]);
+ len <<= 1; /* Assume CHAR16 is 2 bytes */
+ while (len > 0 && *src) {
+ *dst++ = *src++;
+ len--;
+ }
+ *dst++ = (CHAR16)0;
}
static void
-cp16to8(const CHAR16 *src, char *dst, size_t len)
+cpy16to8(const CHAR16 *src, char *dst, size_t len)
{
size_t i;
for (i = 0; i < len && src[i]; i++)
dst[i] = (char)src[i];
+ if (i < len)
+ dst[i] = '\0';
}
static int
@@ -268,14 +271,14 @@ main(int argc, CHAR16 *argv[])
if (i + 1 == argc) {
setenv("comconsole_speed", "115200", 1);
} else {
- cp16to8(&argv[i + 1][0], var,
+ cpy16to8(&argv[i + 1][0], var,
sizeof(var));
setenv("comconsole_speedspeed", var, 1);
}
i++;
break;
} else {
- cp16to8(&argv[i][j + 1], var,
+ cpy16to8(&argv[i][j + 1], var,
sizeof(var));
setenv("comconsole_speed", var, 1);
break;
@@ -330,10 +333,8 @@ main(int argc, CHAR16 *argv[])
BS->HandleProtocol(IH, &imgid, (VOID**)&img);
printf("Command line arguments:");
- for (i = 0; i < argc; i++) {
- printf(" ");
- print_str16(argv[i]);
- }
+ for (i = 0; i < argc; i++)
+ printf(" %S", argv[i]);
printf("\n");
printf("Image base: 0x%lx\n", (u_long)img->ImageBase);
@@ -631,57 +632,6 @@ command_mode(int argc, char *argv[])
return (CMD_OK);
}
-
-/* deprecated */
-COMMAND_SET(nvram, "nvram", "get or set NVRAM variables", command_nvram);
-
-static int
-command_nvram(int argc, char *argv[])
-{
- CHAR16 var[128];
- CHAR16 *data;
- EFI_STATUS status;
- EFI_GUID varguid = { 0,0,0,{0,0,0,0,0,0,0,0} };
- UINTN varsz, datasz, i;
- SIMPLE_TEXT_OUTPUT_INTERFACE *conout;
-
- conout = ST->ConOut;
-
- /* Initiate the search */
- status = RS->GetNextVariableName(&varsz, NULL, NULL);
-
- pager_open();
- for (; status != EFI_NOT_FOUND; ) {
- status = RS->GetNextVariableName(&varsz, var, &varguid);
- //if (EFI_ERROR(status))
- //break;
-
- conout->OutputString(conout, var);
- printf("=");
- datasz = 0;
- status = RS->GetVariable(var, &varguid, NULL, &datasz, NULL);
- /* XXX: check status */
- data = malloc(datasz);
- status = RS->GetVariable(var, &varguid, NULL, &datasz, data);
- if (EFI_ERROR(status))
- printf("<error retrieving variable>");
- else {
- for (i = 0; i < datasz; i++) {
- if (isalnum(data[i]) || isspace(data[i]))
- printf("%c", data[i]);
- else
- printf("\\x%02x", data[i]);
- }
- }
- free(data);
- if (pager_output("\n"))
- break;
- }
- pager_close();
-
- return (CMD_OK);
-}
-
#ifdef EFI_ZFS_BOOT
COMMAND_SET(lszfs, "lszfs", "list child datasets of a zfs dataset",
command_lszfs);
@@ -737,17 +687,18 @@ command_reloadbe(int argc, char *argv[])
}
#endif
-COMMAND_SET(efishow, "efi-show", "print some or all EFI variables", command_efi_printenv);
+COMMAND_SET(efishow, "efi-show", "print some or all EFI variables", command_efi_show);
static int
efi_print_var(CHAR16 *varnamearg, EFI_GUID *matchguid, int lflag)
{
- UINTN datasz;
+ UINTN datasz, i;
EFI_STATUS status;
UINT32 attr;
CHAR16 *data;
char *str;
uint32_t uuid_status;
+ int is_ascii;
datasz = 0;
status = RS->GetVariable(varnamearg, matchguid, &attr,
@@ -764,8 +715,33 @@ efi_print_var(CHAR16 *varnamearg, EFI_GUID *matchguid, int lflag)
return (CMD_ERROR);
}
uuid_to_string((uuid_t *)matchguid, &str, &uuid_status);
- printf("%s %S=%S", str, varnamearg, data);
- free(str);
+ if (lflag) {
+ printf("%s 0x%x %S", str, attr, varnamearg);
+ } else {
+ printf("%s 0x%x %S=", str, attr, varnamearg);
+ is_ascii = 1;
+ free(str);
+ str = (char *)data;
+ for (i = 0; i < datasz - 1; i++) {
+ /* Quick hack to see if this ascii-ish string printable range plus tab, cr and lf */
+ if ((str[i] < 32 || str[i] > 126) && str[i] != 9 && str[i] != 10 && str[i] != 13) {
+ is_ascii = 0;
+ break;
+ }
+ }
+ if (str[datasz - 1] != '\0')
+ is_ascii = 0;
+ if (is_ascii)
+ printf("%s", str);
+ else {
+ for (i = 0; i < datasz / 2; i++) {
+ if (isalnum(data[i]) || isspace(data[i]))
+ printf("%c", data[i]);
+ else
+ printf("\\x%02x", data[i]);
+ }
+ }
+ }
free(data);
if (pager_output("\n"))
return (CMD_WARN);
@@ -773,20 +749,20 @@ efi_print_var(CHAR16 *varnamearg, EFI_GUID *matchguid, int lflag)
}
static int
-command_efi_printenv(int argc, char *argv[])
+command_efi_show(int argc, char *argv[])
{
/*
- * efi-printenv [-a]
+ * efi-show [-a]
* print all the env
- * efi-printenv -u UUID
+ * efi-show -u UUID
* print all the env vars tagged with UUID
- * efi-printenv -v var
+ * efi-show -v var
* search all the env vars and print the ones matching var
- * eif-printenv -u UUID -v var
- * eif-printenv UUID var
+ * eif-show -u UUID -v var
+ * eif-show UUID var
* print all the env vars that match UUID and var
*/
- /* XXX We assume EFI_GUID is the same as uuid_t */
+ /* NB: We assume EFI_GUID is the same as uuid_t */
int aflag = 0, gflag = 0, lflag = 0, vflag = 0;
int ch, rv;
unsigned i;
@@ -825,6 +801,7 @@ command_efi_printenv(int argc, char *argv[])
for (i = 0; i < strlen(optarg); i++)
varnamearg[i] = optarg[i];
varnamearg[i] = 0;
+ break;
default:
printf("Invalid argument %c\n", ch);
return (CMD_ERROR);
@@ -891,24 +868,26 @@ command_efi_printenv(int argc, char *argv[])
*/
varsz = nitems(varname);
varname[0] = 0;
- status = RS->GetNextVariableName(&varsz, varname, &varguid);
- while (status != EFI_NOT_FOUND) {
- status = RS->GetNextVariableName(&varsz, varname,
- &varguid);
+ while ((status = RS->GetNextVariableName(&varsz, varname, &varguid)) !=
+ EFI_NOT_FOUND) {
if (aflag) {
if (efi_print_var(varname, &varguid, lflag) != CMD_OK)
break;
continue;
}
if (vflag) {
- if (wcscmp(varnamearg, varname) == 0)
+ if (wcscmp(varnamearg, varname) == 0) {
if (efi_print_var(varname, &varguid, lflag) != CMD_OK)
break;
+ continue;
+ }
}
if (gflag) {
- if (memcmp(&varguid, &matchguid, sizeof(varguid)) == 0)
+ if (memcmp(&varguid, &matchguid, sizeof(varguid)) == 0) {
if (efi_print_var(varname, &varguid, lflag) != CMD_OK)
break;
+ continue;
+ }
}
}
pager_close();
@@ -921,6 +900,32 @@ COMMAND_SET(efiset, "efi-set", "set EFI variables", command_efi_set);
static int
command_efi_set(int argc, char *argv[])
{
+ char *uuid, *var, *val;
+ CHAR16 wvar[128];
+ EFI_GUID guid;
+ uint32_t status;
+ EFI_STATUS err;
+
+ if (argc != 4) {
+ printf("efi-set uuid var new-value\n");
+ return (CMD_ERROR);
+ }
+ uuid = argv[1];
+ var = argv[2];
+ val = argv[3];
+ uuid_from_string(uuid, (uuid_t *)&guid, &status);
+ if (status != uuid_s_ok) {
+ printf("Invalid uuid %s %d\n", uuid, status);
+ return (CMD_ERROR);
+ }
+ cpy8to16(var, wvar, sizeof(wvar));
+ err = RS->SetVariable(wvar, &guid,
+ EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_RUNTIME_ACCESS | EFI_VARIABLE_BOOTSERVICE_ACCESS,
+ strlen(val) + 1, val);
+ if (EFI_ERROR(err)) {
+ printf("Failed to set variable: error %d\n", EFI_ERROR_CODE(err));
+ return (CMD_ERROR);
+ }
return (CMD_OK);
}
@@ -929,6 +934,29 @@ COMMAND_SET(efiunset, "efi-unset", "delete / unset EFI variables", command_efi_u
static int
command_efi_unset(int argc, char *argv[])
{
+ char *uuid, *var;
+ CHAR16 wvar[128];
+ EFI_GUID guid;
+ uint32_t status;
+ EFI_STATUS err;
+
+ if (argc != 3) {
+ printf("efi-unset uuid var\n");
+ return (CMD_ERROR);
+ }
+ uuid = argv[1];
+ var = argv[2];
+ uuid_from_string(uuid, (uuid_t *)&guid, &status);
+ if (status != uuid_s_ok) {
+ printf("Invalid uuid %s\n", uuid);
+ return (CMD_ERROR);
+ }
+ cpy8to16(var, wvar, sizeof(wvar));
+ err = RS->SetVariable(wvar, &guid, 0, 0, NULL);
+ if (EFI_ERROR(err)) {
+ printf("Failed to unset variable: error %d\n", EFI_ERROR_CODE(err));
+ return (CMD_ERROR);
+ }
return (CMD_OK);
}
diff --git a/sys/boot/fdt/fdt_loader_cmd.c b/sys/boot/fdt/fdt_loader_cmd.c
index 5755851..a8c33d7 100644
--- a/sys/boot/fdt/fdt_loader_cmd.c
+++ b/sys/boot/fdt/fdt_loader_cmd.c
@@ -383,6 +383,8 @@ fdt_apply_overlays()
rv = fdt_open_into(fdtp, new_fdtp, new_fdtp_size);
if (rv != 0) {
printf("failed to open DTB blob for applying overlays\n");
+ free(new_fdtp);
+ free(overlay);
return;
}
diff --git a/sys/boot/i386/libi386/biosdisk.c b/sys/boot/i386/libi386/biosdisk.c
index 9032fe1..209eaed 100644
--- a/sys/boot/i386/libi386/biosdisk.c
+++ b/sys/boot/i386/libi386/biosdisk.c
@@ -749,21 +749,29 @@ bd_read(struct disk_devdesc *dev, daddr_t dblk, int blks, caddr_t dest)
* sectors cannot be decrypted. Round the requested LBA down to
* nearest multiple of DEV_GELIBOOT_BSIZE bytes.
*/
- alignlba = dblk &
- ~(daddr_t)((DEV_GELIBOOT_BSIZE / BIOSDISK_SECSIZE) - 1);
+ alignlba = rounddown2(dblk * BD(dev).bd_sectorsize,
+ DEV_GELIBOOT_BSIZE) / BD(dev).bd_sectorsize;
/*
* Round number of blocks to read up to nearest multiple of
* DEV_GELIBOOT_BSIZE
*/
- alignblks = blks + (dblk - alignlba) +
- ((DEV_GELIBOOT_BSIZE / BIOSDISK_SECSIZE) - 1) &
- ~(int)((DEV_GELIBOOT_BSIZE / BIOSDISK_SECSIZE) - 1);
- diff = (dblk - alignlba) * BIOSDISK_SECSIZE;
+ diff = (dblk - alignlba) * BD(dev).bd_sectorsize;
+ alignblks = roundup2(blks * BD(dev).bd_sectorsize + diff,
+ DEV_GELIBOOT_BSIZE) / BD(dev).bd_sectorsize;
+
/*
- * Use a temporary buffer here because the buffer provided by
- * the caller may be too small.
+ * If the read is rounded up to a larger size, use a temporary
+ * buffer here because the buffer provided by the caller may be
+ * too small.
*/
- tmpbuf = alloca(alignblks * BIOSDISK_SECSIZE);
+ if (diff == 0) {
+ tmpbuf = dest;
+ } else {
+ tmpbuf = malloc(alignblks * BD(dev).bd_sectorsize);
+ if (tmpbuf == NULL) {
+ return (-1);
+ }
+ }
err = bd_io(dev, alignlba, alignblks, tmpbuf, 0);
if (err)
@@ -779,12 +787,15 @@ bd_read(struct disk_devdesc *dev, daddr_t dblk, int blks, caddr_t dest)
/* GELI needs the offset relative to the partition start */
p_off = alignlba - dskp.start;
- err = geli_read(&dskp, p_off * BIOSDISK_SECSIZE, tmpbuf,
- alignblks * BIOSDISK_SECSIZE);
+ err = geli_read(&dskp, p_off * BD(dev).bd_sectorsize, tmpbuf,
+ alignblks * BD(dev).bd_sectorsize);
if (err)
return (err);
- bcopy(tmpbuf + diff, dest, blks * BIOSDISK_SECSIZE);
+ if (tmpbuf != dest) {
+ bcopy(tmpbuf + diff, dest, blks * BD(dev).bd_sectorsize);
+ free(tmpbuf);
+ }
return (0);
}
#endif /* LOADER_GELI_SUPPORT */
@@ -898,10 +909,10 @@ bios_read(void *vdev __unused, struct dsk *priv, off_t off, char *buf, size_t by
dev.d_partition = priv->part;
dev.d_offset = priv->start;
- off = off / BIOSDISK_SECSIZE;
+ off = off / BD(&dev).bd_sectorsize;
/* GELI gives us the offset relative to the partition start */
off += dev.d_offset;
- bytes = bytes / BIOSDISK_SECSIZE;
+ bytes = bytes / BD(&dev).bd_sectorsize;
return (bd_io(&dev, off, bytes, buf, 0));
}
diff --git a/sys/boot/i386/zfsboot/zfsboot.c b/sys/boot/i386/zfsboot/zfsboot.c
index e0cc740..3b09e55 100644
--- a/sys/boot/i386/zfsboot/zfsboot.c
+++ b/sys/boot/i386/zfsboot/zfsboot.c
@@ -200,7 +200,7 @@ vdev_read(vdev_t *vdev, void *priv, off_t off, void *buf, size_t bytes)
{
char *p;
daddr_t lba, alignlba;
- off_t alignoff, diff;
+ off_t diff;
unsigned int nb, alignnb;
struct dsk *dsk = (struct dsk *) priv;
@@ -210,10 +210,11 @@ vdev_read(vdev_t *vdev, void *priv, off_t off, void *buf, size_t bytes)
p = buf;
lba = off / DEV_BSIZE;
lba += dsk->start;
- /* Align reads to 4k else 4k sector GELIs will not decrypt. */
- alignoff = off & ~ (off_t)(DEV_GELIBOOT_BSIZE - 1);
- /* Round LBA down to nearest multiple of DEV_GELIBOOT_BSIZE bytes. */
- alignlba = alignoff / DEV_BSIZE;
+ /*
+ * Align reads to 4k else 4k sector GELIs will not decrypt.
+ * Round LBA down to nearest multiple of DEV_GELIBOOT_BSIZE bytes.
+ */
+ alignlba = rounddown2(off, DEV_GELIBOOT_BSIZE) / DEV_BSIZE;
/*
* The read must be aligned to DEV_GELIBOOT_BSIZE bytes relative to the
* start of the GELI partition, not the start of the actual disk.
diff --git a/sys/cam/ata/ata_all.c b/sys/cam/ata/ata_all.c
index 51231b7..36c1f35 100644
--- a/sys/cam/ata/ata_all.c
+++ b/sys/cam/ata/ata_all.c
@@ -110,18 +110,45 @@ ata_op_string(struct ata_cmd *cmd)
case 0x3f: return ("WRITE_LOG_EXT");
case 0x40: return ("READ_VERIFY");
case 0x42: return ("READ_VERIFY48");
+ case 0x44: return ("ZERO_EXT");
case 0x45:
switch (cmd->features) {
case 0x55: return ("WRITE_UNCORRECTABLE48 PSEUDO");
case 0xaa: return ("WRITE_UNCORRECTABLE48 FLAGGED");
}
return "WRITE_UNCORRECTABLE48";
+ case 0x47: return ("READ_LOG_DMA_EXT");
+ case 0x4a: return ("ZAC_MANAGEMENT_IN");
case 0x51: return ("CONFIGURE_STREAM");
case 0x60: return ("READ_FPDMA_QUEUED");
case 0x61: return ("WRITE_FPDMA_QUEUED");
- case 0x63: return ("NCQ_NON_DATA");
- case 0x64: return ("SEND_FPDMA_QUEUED");
- case 0x65: return ("RECEIVE_FPDMA_QUEUED");
+ case 0x63:
+ switch (cmd->features & 0xf) {
+ case 0x00: return ("NCQ_NON_DATA ABORT NCQ QUEUE");
+ case 0x01: return ("NCQ_NON_DATA DEADLINE HANDLING");
+ case 0x05: return ("NCQ_NON_DATA SET FEATURES");
+ /*
+ * XXX KDM need common decoding between NCQ and non-NCQ
+ * versions of SET FEATURES.
+ */
+ case 0x06: return ("NCQ_NON_DATA ZERO EXT");
+ case 0x07: return ("NCQ_NON_DATA ZAC MANAGEMENT OUT");
+ }
+ return ("NCQ_NON_DATA");
+ case 0x64:
+ switch (cmd->sector_count_exp & 0xf) {
+ case 0x00: return ("SEND_FPDMA_QUEUED DATA SET MANAGEMENT");
+ case 0x02: return ("SEND_FPDMA_QUEUED WRITE LOG DMA EXT");
+ case 0x03: return ("SEND_FPDMA_QUEUED ZAC MANAGEMENT OUT");
+ case 0x04: return ("SEND_FPDMA_QUEUED DATA SET MANAGEMENT XL");
+ }
+ return ("SEND_FPDMA_QUEUED");
+ case 0x65:
+ switch (cmd->sector_count_exp & 0xf) {
+ case 0x01: return ("RECEIVE_FPDMA_QUEUED READ LOG DMA EXT");
+ case 0x02: return ("RECEIVE_FPDMA_QUEUED ZAC MANAGEMENT IN");
+ }
+ return ("RECEIVE_FPDMA_QUEUED");
case 0x67:
if (cmd->features == 0xec)
return ("SEP_ATTN IDENTIFY");
@@ -136,6 +163,7 @@ ata_op_string(struct ata_cmd *cmd)
case 0x87: return ("CFA_TRANSLATE_SECTOR");
case 0x90: return ("EXECUTE_DEVICE_DIAGNOSTIC");
case 0x92: return ("DOWNLOAD_MICROCODE");
+ case 0x9a: return ("ZAC_MANAGEMENT_OUT");
case 0xa0: return ("PACKET");
case 0xa1: return ("ATAPI_IDENTIFY");
case 0xa2: return ("SERVICE");
@@ -179,23 +207,44 @@ ata_op_string(struct ata_cmd *cmd)
case 0xec: return ("ATA_IDENTIFY");
case 0xed: return ("MEDIA_EJECT");
case 0xef:
+ /*
+ * XXX KDM need common decoding between NCQ and non-NCQ
+ * versions of SET FEATURES.
+ */
switch (cmd->features) {
- case 0x03: return ("SETFEATURES SET TRANSFER MODE");
- case 0x02: return ("SETFEATURES ENABLE WCACHE");
- case 0x82: return ("SETFEATURES DISABLE WCACHE");
- case 0x06: return ("SETFEATURES ENABLE PUIS");
- case 0x86: return ("SETFEATURES DISABLE PUIS");
- case 0x07: return ("SETFEATURES SPIN-UP");
- case 0x10: return ("SETFEATURES ENABLE SATA FEATURE");
- case 0x90: return ("SETFEATURES DISABLE SATA FEATURE");
- case 0xaa: return ("SETFEATURES ENABLE RCACHE");
- case 0x55: return ("SETFEATURES DISABLE RCACHE");
+ case 0x02: return ("SETFEATURES ENABLE WCACHE");
+ case 0x03: return ("SETFEATURES SET TRANSFER MODE");
+ case 0x04: return ("SETFEATURES ENABLE APM");
+ case 0x06: return ("SETFEATURES ENABLE PUIS");
+ case 0x07: return ("SETFEATURES SPIN-UP");
+ case 0x0b: return ("SETFEATURES ENABLE WRITE READ VERIFY");
+ case 0x0c: return ("SETFEATURES ENABLE DEVICE LIFE CONTROL");
+ case 0x10: return ("SETFEATURES ENABLE SATA FEATURE");
+ case 0x41: return ("SETFEATURES ENABLE FREEFALL CONTROL");
+ case 0x43: return ("SETFEATURES SET MAX HOST INT SECT TIMES");
+ case 0x45: return ("SETFEATURES SET RATE BASIS");
+ case 0x4a: return ("SETFEATURES EXTENDED POWER CONDITIONS");
+ case 0x55: return ("SETFEATURES DISABLE RCACHE");
case 0x5d: return ("SETFEATURES ENABLE RELIRQ");
- case 0xdd: return ("SETFEATURES DISABLE RELIRQ");
case 0x5e: return ("SETFEATURES ENABLE SRVIRQ");
+ case 0x62: return ("SETFEATURES LONG PHYS SECT ALIGN ERC");
+ case 0x63: return ("SETFEATURES DSN");
+ case 0x66: return ("SETFEATURES DISABLE DEFAULTS");
+ case 0x82: return ("SETFEATURES DISABLE WCACHE");
+ case 0x85: return ("SETFEATURES DISABLE APM");
+ case 0x86: return ("SETFEATURES DISABLE PUIS");
+ case 0x8b: return ("SETFEATURES DISABLE WRITE READ VERIFY");
+ case 0x8c: return ("SETFEATURES DISABLE DEVICE LIFE CONTROL");
+ case 0x90: return ("SETFEATURES DISABLE SATA FEATURE");
+ case 0xaa: return ("SETFEATURES ENABLE RCACHE");
+ case 0xC1: return ("SETFEATURES DISABLE FREEFALL CONTROL");
+ case 0xC3: return ("SETFEATURES SENSE DATA REPORTING");
+ case 0xC4: return ("SETFEATURES NCQ SENSE DATA RETURN");
+ case 0xCC: return ("SETFEATURES ENABLE DEFAULTS");
+ case 0xdd: return ("SETFEATURES DISABLE RELIRQ");
case 0xde: return ("SETFEATURES DISABLE SRVIRQ");
- }
- return "SETFEATURES";
+ }
+ return "SETFEATURES";
case 0xf1: return ("SECURITY_SET_PASSWORD");
case 0xf2: return ("SECURITY_UNLOCK");
case 0xf3: return ("SECURITY_ERASE_PREPARE");
@@ -463,7 +512,8 @@ ata_48bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint16_t features,
cmd == ATA_WRITE_DMA_QUEUED48 ||
cmd == ATA_WRITE_DMA_QUEUED_FUA48 ||
cmd == ATA_WRITE_STREAM_DMA48 ||
- cmd == ATA_DATA_SET_MANAGEMENT)
+ cmd == ATA_DATA_SET_MANAGEMENT ||
+ cmd == ATA_READ_LOG_DMA_EXT)
ataio->cmd.flags |= CAM_ATAIO_DMA;
ataio->cmd.command = cmd;
ataio->cmd.features = features;
@@ -534,6 +584,36 @@ ata_pm_write_cmd(struct ccb_ataio *ataio, int reg, int port, uint32_t val)
}
void
+ata_read_log(struct ccb_ataio *ataio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint32_t log_address, uint32_t page_number, uint16_t block_count,
+ uint32_t protocol, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint32_t timeout)
+{
+ uint64_t lba;
+
+ cam_fill_ataio(ataio,
+ /*retries*/ 1,
+ /*cbfcnp*/ cbfcnp,
+ /*flags*/ CAM_DIR_IN,
+ /*tag_action*/ 0,
+ /*data_ptr*/ data_ptr,
+ /*dxfer_len*/ dxfer_len,
+ /*timeout*/ timeout);
+
+ lba = (((uint64_t)page_number & 0xff00) << 32) |
+ ((page_number & 0x00ff) << 8) |
+ (log_address & 0xff);
+
+ ata_48bit_cmd(ataio,
+ /*cmd*/ (protocol & CAM_ATAIO_DMA) ? ATA_READ_LOG_DMA_EXT :
+ ATA_READ_LOG_EXT,
+ /*features*/ 0,
+ /*lba*/ lba,
+ /*sector_count*/ block_count);
+}
+
+void
ata_bswap(int8_t *buf, int len)
{
u_int16_t *ptr = (u_int16_t*)(buf + len);
@@ -893,3 +973,148 @@ semb_write_buffer(struct ccb_ataio *ataio,
length > 0 ? data_ptr[0] : 0, 0x80, length / 4);
}
+
+void
+ata_zac_mgmt_out(struct ccb_ataio *ataio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ int use_ncq, uint8_t zm_action, uint64_t zone_id,
+ uint8_t zone_flags, uint16_t sector_count, uint8_t *data_ptr,
+ uint32_t dxfer_len, uint32_t timeout)
+{
+ uint8_t command_out, ata_flags;
+ uint16_t features_out, sectors_out;
+ uint32_t auxiliary;
+
+ if (use_ncq == 0) {
+ command_out = ATA_ZAC_MANAGEMENT_OUT;
+ features_out = (zm_action & 0xf) | (zone_flags << 8);
+ if (dxfer_len == 0) {
+ ata_flags = 0;
+ sectors_out = 0;
+ } else {
+ ata_flags = CAM_ATAIO_DMA;
+ /* XXX KDM use sector count? */
+ sectors_out = ((dxfer_len >> 9) & 0xffff);
+ }
+ auxiliary = 0;
+ } else {
+ if (dxfer_len == 0) {
+ command_out = ATA_NCQ_NON_DATA;
+ features_out = ATA_NCQ_ZAC_MGMT_OUT;
+ sectors_out = 0;
+ } else {
+ command_out = ATA_SEND_FPDMA_QUEUED;
+
+ /* Note that we're defaulting to normal priority */
+ sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8;
+
+ /*
+ * For SEND FPDMA QUEUED, the transfer length is
+ * encoded in the FEATURE register, and 0 means
+ * that 65536 512 byte blocks are to be tranferred.
+ * In practice, it seems unlikely that we'll see
+ * a transfer that large.
+ */
+ if (dxfer_len == (65536 * 512)) {
+ features_out = 0;
+ } else {
+ /*
+ * Yes, the caller can theoretically send a
+ * transfer larger than we can handle.
+ * Anyone using this function needs enough
+ * knowledge to avoid doing that.
+ */
+ features_out = ((dxfer_len >> 9) & 0xffff);
+ }
+ }
+ auxiliary = (zm_action & 0xf) | (zone_flags << 8);
+
+ ata_flags = CAM_ATAIO_FPDMA;
+ }
+
+ cam_fill_ataio(ataio,
+ /*retries*/ retries,
+ /*cbfcnp*/ cbfcnp,
+ /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
+ /*tag_action*/ 0,
+ /*data_ptr*/ data_ptr,
+ /*dxfer_len*/ dxfer_len,
+ /*timeout*/ timeout);
+
+ ata_48bit_cmd(ataio,
+ /*cmd*/ command_out,
+ /*features*/ features_out,
+ /*lba*/ zone_id,
+ /*sector_count*/ sectors_out);
+
+ ataio->cmd.flags |= ata_flags;
+ if (auxiliary != 0) {
+ ataio->ata_flags |= ATA_FLAG_AUX;
+ ataio->aux = auxiliary;
+ }
+}
+
+void
+ata_zac_mgmt_in(struct ccb_ataio *ataio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ int use_ncq, uint8_t zm_action, uint64_t zone_id,
+ uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint32_t timeout)
+{
+ uint8_t command_out, ata_flags;
+ uint16_t features_out, sectors_out;
+ uint32_t auxiliary;
+
+ if (use_ncq == 0) {
+ command_out = ATA_ZAC_MANAGEMENT_IN;
+ /* XXX KDM put a macro here */
+ features_out = (zm_action & 0xf) | (zone_flags << 8);
+ ata_flags = CAM_ATAIO_DMA;
+ sectors_out = ((dxfer_len >> 9) & 0xffff);
+ auxiliary = 0;
+ } else {
+ command_out = ATA_RECV_FPDMA_QUEUED;
+ sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8;
+ auxiliary = (zm_action & 0xf) | (zone_flags << 8),
+ ata_flags = CAM_ATAIO_FPDMA;
+ /*
+ * For RECEIVE FPDMA QUEUED, the transfer length is
+ * encoded in the FEATURE register, and 0 means
+ * that 65536 512 byte blocks are to be tranferred.
+ * In practice, it is unlikely we will see a transfer that
+ * large.
+ */
+ if (dxfer_len == (65536 * 512)) {
+ features_out = 0;
+ } else {
+ /*
+ * Yes, the caller can theoretically request a
+ * transfer larger than we can handle.
+ * Anyone using this function needs enough
+ * knowledge to avoid doing that.
+ */
+ features_out = ((dxfer_len >> 9) & 0xffff);
+ }
+ }
+
+ cam_fill_ataio(ataio,
+ /*retries*/ retries,
+ /*cbfcnp*/ cbfcnp,
+ /*flags*/ CAM_DIR_IN,
+ /*tag_action*/ 0,
+ /*data_ptr*/ data_ptr,
+ /*dxfer_len*/ dxfer_len,
+ /*timeout*/ timeout);
+
+ ata_48bit_cmd(ataio,
+ /*cmd*/ command_out,
+ /*features*/ features_out,
+ /*lba*/ zone_id,
+ /*sector_count*/ sectors_out);
+
+ ataio->cmd.flags |= ata_flags;
+ if (auxiliary != 0) {
+ ataio->ata_flags |= ATA_FLAG_AUX;
+ ataio->aux = auxiliary;
+ }
+}
diff --git a/sys/cam/ata/ata_all.h b/sys/cam/ata/ata_all.h
index 433c61c..ea902d0 100644
--- a/sys/cam/ata/ata_all.h
+++ b/sys/cam/ata/ata_all.h
@@ -125,6 +125,11 @@ void ata_ncq_cmd(struct ccb_ataio *ataio, uint8_t cmd,
void ata_reset_cmd(struct ccb_ataio *ataio);
void ata_pm_read_cmd(struct ccb_ataio *ataio, int reg, int port);
void ata_pm_write_cmd(struct ccb_ataio *ataio, int reg, int port, uint32_t val);
+void ata_read_log(struct ccb_ataio *ataio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint32_t log_address, uint32_t page_number,
+ uint16_t block_count, uint32_t protocol,
+ uint8_t *data_ptr, uint32_t dxfer_len, uint32_t timeout);
void ata_bswap(int8_t *buf, int len);
void ata_btrim(int8_t *buf, int len);
@@ -167,4 +172,16 @@ void semb_write_buffer(struct ccb_ataio *ataio,
uint8_t tag_action, uint8_t *data_ptr, uint16_t param_list_length,
uint32_t timeout);
+void ata_zac_mgmt_out(struct ccb_ataio *ataio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ int use_ncq __unused, uint8_t zm_action, uint64_t zone_id,
+ uint8_t zone_flags, uint16_t sector_count, uint8_t *data_ptr,
+ uint32_t dxfer_len, uint32_t timeout);
+
+void ata_zac_mgmt_in(struct ccb_ataio *ataio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ int use_ncq __unused, uint8_t zm_action, uint64_t zone_id,
+ uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint32_t timeout);
+
#endif
diff --git a/sys/cam/ata/ata_da.c b/sys/cam/ata/ata_da.c
index 76b87f1..8a6e241 100644
--- a/sys/cam/ata/ata_da.c
+++ b/sys/cam/ata/ata_da.c
@@ -43,9 +43,11 @@ __FBSDID("$FreeBSD$");
#include <sys/devicestat.h>
#include <sys/eventhandler.h>
#include <sys/malloc.h>
+#include <sys/endian.h>
#include <sys/cons.h>
#include <sys/proc.h>
#include <sys/reboot.h>
+#include <sys/sbuf.h>
#include <geom/geom_disk.h>
#endif /* _KERNEL */
@@ -58,6 +60,8 @@ __FBSDID("$FreeBSD$");
#include <cam/cam_ccb.h>
#include <cam/cam_periph.h>
#include <cam/cam_xpt_periph.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
#include <cam/cam_sim.h>
#include <cam/cam_iosched.h>
@@ -74,25 +78,37 @@ extern int iosched_debug;
typedef enum {
ADA_STATE_RAHEAD,
ADA_STATE_WCACHE,
+ ADA_STATE_LOGDIR,
+ ADA_STATE_IDDIR,
+ ADA_STATE_SUP_CAP,
+ ADA_STATE_ZONE,
ADA_STATE_NORMAL
} ada_state;
typedef enum {
- ADA_FLAG_CAN_48BIT = 0x0002,
- ADA_FLAG_CAN_FLUSHCACHE = 0x0004,
- ADA_FLAG_CAN_NCQ = 0x0008,
- ADA_FLAG_CAN_DMA = 0x0010,
- ADA_FLAG_NEED_OTAG = 0x0020,
- ADA_FLAG_WAS_OTAG = 0x0040,
- ADA_FLAG_CAN_TRIM = 0x0080,
- ADA_FLAG_OPEN = 0x0100,
- ADA_FLAG_SCTX_INIT = 0x0200,
- ADA_FLAG_CAN_CFA = 0x0400,
- ADA_FLAG_CAN_POWERMGT = 0x0800,
- ADA_FLAG_CAN_DMA48 = 0x1000,
- ADA_FLAG_DIRTY = 0x2000,
- ADA_FLAG_CAN_NCQ_TRIM = 0x4000, /* CAN_TRIM also set */
- ADA_FLAG_PIM_CAN_NCQ_TRIM = 0x8000
+ ADA_FLAG_CAN_48BIT = 0x00000002,
+ ADA_FLAG_CAN_FLUSHCACHE = 0x00000004,
+ ADA_FLAG_CAN_NCQ = 0x00000008,
+ ADA_FLAG_CAN_DMA = 0x00000010,
+ ADA_FLAG_NEED_OTAG = 0x00000020,
+ ADA_FLAG_WAS_OTAG = 0x00000040,
+ ADA_FLAG_CAN_TRIM = 0x00000080,
+ ADA_FLAG_OPEN = 0x00000100,
+ ADA_FLAG_SCTX_INIT = 0x00000200,
+ ADA_FLAG_CAN_CFA = 0x00000400,
+ ADA_FLAG_CAN_POWERMGT = 0x00000800,
+ ADA_FLAG_CAN_DMA48 = 0x00001000,
+ ADA_FLAG_CAN_LOG = 0x00002000,
+ ADA_FLAG_CAN_IDLOG = 0x00004000,
+ ADA_FLAG_CAN_SUPCAP = 0x00008000,
+ ADA_FLAG_CAN_ZONE = 0x00010000,
+ ADA_FLAG_CAN_WCACHE = 0x00020000,
+ ADA_FLAG_CAN_RAHEAD = 0x00040000,
+ ADA_FLAG_PROBED = 0x00080000,
+ ADA_FLAG_ANNOUNCED = 0x00100000,
+ ADA_FLAG_DIRTY = 0x00200000,
+ ADA_FLAG_CAN_NCQ_TRIM = 0x00400000, /* CAN_TRIM also set */
+ ADA_FLAG_PIM_ATA_EXT = 0x00800000
} ada_flags;
typedef enum {
@@ -112,9 +128,52 @@ typedef enum {
ADA_CCB_BUFFER_IO = 0x03,
ADA_CCB_DUMP = 0x05,
ADA_CCB_TRIM = 0x06,
+ ADA_CCB_LOGDIR = 0x07,
+ ADA_CCB_IDDIR = 0x08,
+ ADA_CCB_SUP_CAP = 0x09,
+ ADA_CCB_ZONE = 0x0a,
ADA_CCB_TYPE_MASK = 0x0F,
} ada_ccb_state;
+typedef enum {
+ ADA_ZONE_NONE = 0x00,
+ ADA_ZONE_DRIVE_MANAGED = 0x01,
+ ADA_ZONE_HOST_AWARE = 0x02,
+ ADA_ZONE_HOST_MANAGED = 0x03
+} ada_zone_mode;
+
+typedef enum {
+ ADA_ZONE_FLAG_RZ_SUP = 0x0001,
+ ADA_ZONE_FLAG_OPEN_SUP = 0x0002,
+ ADA_ZONE_FLAG_CLOSE_SUP = 0x0004,
+ ADA_ZONE_FLAG_FINISH_SUP = 0x0008,
+ ADA_ZONE_FLAG_RWP_SUP = 0x0010,
+ ADA_ZONE_FLAG_SUP_MASK = (ADA_ZONE_FLAG_RZ_SUP |
+ ADA_ZONE_FLAG_OPEN_SUP |
+ ADA_ZONE_FLAG_CLOSE_SUP |
+ ADA_ZONE_FLAG_FINISH_SUP |
+ ADA_ZONE_FLAG_RWP_SUP),
+ ADA_ZONE_FLAG_URSWRZ = 0x0020,
+ ADA_ZONE_FLAG_OPT_SEQ_SET = 0x0040,
+ ADA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080,
+ ADA_ZONE_FLAG_MAX_SEQ_SET = 0x0100,
+ ADA_ZONE_FLAG_SET_MASK = (ADA_ZONE_FLAG_OPT_SEQ_SET |
+ ADA_ZONE_FLAG_OPT_NONSEQ_SET |
+ ADA_ZONE_FLAG_MAX_SEQ_SET)
+} ada_zone_flags;
+
+static struct ada_zone_desc {
+ ada_zone_flags value;
+ const char *desc;
+} ada_zone_desc_table[] = {
+ {ADA_ZONE_FLAG_RZ_SUP, "Report Zones" },
+ {ADA_ZONE_FLAG_OPEN_SUP, "Open" },
+ {ADA_ZONE_FLAG_CLOSE_SUP, "Close" },
+ {ADA_ZONE_FLAG_FINISH_SUP, "Finish" },
+ {ADA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
+};
+
+
/* Offsets into our private area for storing information */
#define ccb_state ppriv_field0
#define ccb_bp ppriv_ptr1
@@ -157,6 +216,15 @@ struct ada_softc {
int refcount; /* Active xpt_action() calls */
ada_state state;
ada_flags flags;
+ ada_zone_mode zone_mode;
+ ada_zone_flags zone_flags;
+ struct ata_gp_log_dir ata_logdir;
+ int valid_logdir_len;
+ struct ata_identify_log_pages ata_iddir;
+ int valid_iddir_len;
+ uint64_t optimal_seq_zones;
+ uint64_t optimal_nonseq_zones;
+ uint64_t max_seq_zones;
ada_quirks quirks;
ada_delete_methods delete_method;
int trim_max_ranges;
@@ -624,13 +692,28 @@ static struct ada_quirk_entry ada_quirk_table[] =
static disk_strategy_t adastrategy;
static dumper_t adadump;
static periph_init_t adainit;
+static void adadiskgonecb(struct disk *dp);
+static periph_oninv_t adaoninvalidate;
+static periph_dtor_t adacleanup;
static void adaasync(void *callback_arg, u_int32_t code,
struct cam_path *path, void *arg);
+static int adazonemodesysctl(SYSCTL_HANDLER_ARGS);
+static int adazonesupsysctl(SYSCTL_HANDLER_ARGS);
static void adasysctlinit(void *context, int pending);
+static int adagetattr(struct bio *bp);
+static void adasetflags(struct ada_softc *softc,
+ struct ccb_getdev *cgd);
static periph_ctor_t adaregister;
-static periph_dtor_t adacleanup;
+static void ada_dsmtrim(struct ada_softc *softc, struct bio *bp,
+ struct ccb_ataio *ataio);
+static void ada_cfaerase(struct ada_softc *softc, struct bio *bp,
+ struct ccb_ataio *ataio);
+static int ada_zone_bio_to_ata(int disk_zone_cmd);
+static int ada_zone_cmd(struct cam_periph *periph, union ccb *ccb,
+ struct bio *bp, int *queue_ccb);
static periph_start_t adastart;
-static periph_oninv_t adaoninvalidate;
+static void adaprobedone(struct cam_periph *periph, union ccb *ccb);
+static void adazonedone(struct cam_periph *periph, union ccb *ccb);
static void adadone(struct cam_periph *periph,
union ccb *done_ccb);
static int adaerror(union ccb *ccb, u_int32_t cam_flags,
@@ -738,6 +821,8 @@ static int adadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
PERIPHDRIVER_DECLARE(ada, adadriver);
+static MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
+
static int
adaopen(struct disk *dp)
{
@@ -860,6 +945,14 @@ adastrategy(struct bio *bp)
biofinish(bp, NULL, ENXIO);
return;
}
+
+ /*
+ * Zone commands must be ordered, because they can depend on the
+ * effects of previously issued commands, and they may affect
+ * commands after them.
+ */
+ if (bp->bio_cmd == BIO_ZONE)
+ bp->bio_flags |= BIO_ORDERED;
/*
* Place it in the queue of disk activities for this disk
@@ -1129,45 +1222,10 @@ adaasync(void *callback_arg, u_int32_t code,
cgd.ccb_h.func_code = XPT_GDEV_TYPE;
xpt_action((union ccb *)&cgd);
- if ((cgd.ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
- (cgd.inq_flags & SID_DMA))
- softc->flags |= ADA_FLAG_CAN_DMA;
- else
- softc->flags &= ~ADA_FLAG_CAN_DMA;
- if (cgd.ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
- softc->flags |= ADA_FLAG_CAN_48BIT;
- if (cgd.inq_flags & SID_DMA48)
- softc->flags |= ADA_FLAG_CAN_DMA48;
- else
- softc->flags &= ~ADA_FLAG_CAN_DMA48;
- } else
- softc->flags &= ~(ADA_FLAG_CAN_48BIT |
- ADA_FLAG_CAN_DMA48);
- if ((cgd.ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
- (cgd.inq_flags & SID_DMA) && (cgd.inq_flags & SID_CmdQue))
- softc->flags |= ADA_FLAG_CAN_NCQ;
- else
- softc->flags &= ~ADA_FLAG_CAN_NCQ;
-
- if ((cgd.ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
- (cgd.inq_flags & SID_DMA)) {
- softc->flags |= ADA_FLAG_CAN_TRIM;
- /*
- * If we can do RCVSND_FPDMA_QUEUED commands, we may be able to do
- * NCQ trims, if we support trims at all. We also need support from
- * the sim do do things properly. Perhaps we should look at log 13
- * dword 0 bit 0 and dword 1 bit 0 are set too...
- */
- if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 &&
- (softc->flags & ADA_FLAG_PIM_CAN_NCQ_TRIM) != 0 &&
- (cgd.ident_data.satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 &&
- (softc->flags & ADA_FLAG_CAN_TRIM) != 0)
- softc->flags |= ADA_FLAG_CAN_NCQ_TRIM;
- else
- softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM;
- } else
- softc->flags &= ~(ADA_FLAG_CAN_TRIM | ADA_FLAG_CAN_NCQ_TRIM);
- adasetdeletemethod(softc);
+ /*
+ * Set/clear support flags based on the new Identify data.
+ */
+ adasetflags(softc, &cgd);
cam_periph_async(periph, code, path, arg);
break;
@@ -1196,12 +1254,12 @@ adaasync(void *callback_arg, u_int32_t code,
xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
cgd.ccb_h.func_code = XPT_GDEV_TYPE;
xpt_action((union ccb *)&cgd);
- if (ADA_RA >= 0 &&
- cgd.ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD)
+ if (ADA_RA >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD)
softc->state = ADA_STATE_RAHEAD;
- else if (ADA_WC >= 0 &&
- cgd.ident_data.support.command1 & ATA_SUPPORT_WRITECACHE)
+ else if (ADA_WC >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD)
softc->state = ADA_STATE_WCACHE;
+ else if (softc->flags & ADA_FLAG_CAN_LOG)
+ softc->state = ADA_STATE_LOGDIR;
else
break;
if (cam_periph_acquire(periph) != CAM_REQ_CMP)
@@ -1215,6 +1273,73 @@ adaasync(void *callback_arg, u_int32_t code,
}
}
+static int
+adazonemodesysctl(SYSCTL_HANDLER_ARGS)
+{
+ char tmpbuf[40];
+ struct ada_softc *softc;
+ int error;
+
+ softc = (struct ada_softc *)arg1;
+
+ switch (softc->zone_mode) {
+ case ADA_ZONE_DRIVE_MANAGED:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
+ break;
+ case ADA_ZONE_HOST_AWARE:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
+ break;
+ case ADA_ZONE_HOST_MANAGED:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
+ break;
+ case ADA_ZONE_NONE:
+ default:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
+ break;
+ }
+
+ error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
+
+ return (error);
+}
+
+static int
+adazonesupsysctl(SYSCTL_HANDLER_ARGS)
+{
+ char tmpbuf[180];
+ struct ada_softc *softc;
+ struct sbuf sb;
+ int error, first;
+ unsigned int i;
+
+ softc = (struct ada_softc *)arg1;
+
+ error = 0;
+ first = 1;
+ sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0);
+
+ for (i = 0; i < sizeof(ada_zone_desc_table) /
+ sizeof(ada_zone_desc_table[0]); i++) {
+ if (softc->zone_flags & ada_zone_desc_table[i].value) {
+ if (first == 0)
+ sbuf_printf(&sb, ", ");
+ else
+ first = 0;
+ sbuf_cat(&sb, ada_zone_desc_table[i].desc);
+ }
+ }
+
+ if (first == 1)
+ sbuf_printf(&sb, "None");
+
+ sbuf_finish(&sb);
+
+ error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
+
+ return (error);
+}
+
+
static void
adasysctlinit(void *context, int pending)
{
@@ -1231,7 +1356,7 @@ adasysctlinit(void *context, int pending)
}
softc = (struct ada_softc *)periph->softc;
- snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
+ snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d",periph->unit_number);
snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
sysctl_ctx_init(&softc->sysctl_ctx);
@@ -1261,6 +1386,29 @@ adasysctlinit(void *context, int pending)
SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
OID_AUTO, "rotating", CTLFLAG_RD | CTLFLAG_MPSAFE,
&softc->rotating, 0, "Rotating media");
+ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD,
+ softc, 0, adazonemodesysctl, "A",
+ "Zone Mode");
+ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD,
+ softc, 0, adazonesupsysctl, "A",
+ "Zone Support");
+ SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
+ SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
+ "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
+ "Optimal Number of Open Sequential Write Preferred Zones");
+ SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
+ SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
+ "optimal_nonseq_zones", CTLFLAG_RD,
+ &softc->optimal_nonseq_zones,
+ "Optimal Number of Non-Sequentially Written Sequential Write "
+ "Preferred Zones");
+ SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
+ SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
+ "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
+ "Maximum Number of Open Sequential Write Required Zones");
+
#ifdef ADA_TEST_FAILURE
/*
* Add a 'door bell' sysctl which allows one to set it from userland
@@ -1361,6 +1509,103 @@ adadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
return (EINVAL);
}
+static void
+adasetflags(struct ada_softc *softc, struct ccb_getdev *cgd)
+{
+ if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
+ (cgd->inq_flags & SID_DMA))
+ softc->flags |= ADA_FLAG_CAN_DMA;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_DMA;
+
+ if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
+ softc->flags |= ADA_FLAG_CAN_48BIT;
+ if (cgd->inq_flags & SID_DMA48)
+ softc->flags |= ADA_FLAG_CAN_DMA48;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_DMA48;
+ } else
+ softc->flags &= ~(ADA_FLAG_CAN_48BIT | ADA_FLAG_CAN_DMA48);
+
+ if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
+ softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_FLUSHCACHE;
+
+ if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
+ softc->flags |= ADA_FLAG_CAN_POWERMGT;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_POWERMGT;
+
+ if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
+ (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue))
+ softc->flags |= ADA_FLAG_CAN_NCQ;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_NCQ;
+
+ if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
+ (cgd->inq_flags & SID_DMA)) {
+ softc->flags |= ADA_FLAG_CAN_TRIM;
+ softc->trim_max_ranges = TRIM_MAX_RANGES;
+ if (cgd->ident_data.max_dsm_blocks != 0) {
+ softc->trim_max_ranges =
+ min(cgd->ident_data.max_dsm_blocks *
+ ATA_DSM_BLK_RANGES, softc->trim_max_ranges);
+ }
+ /*
+ * If we can do RCVSND_FPDMA_QUEUED commands, we may be able
+ * to do NCQ trims, if we support trims at all. We also need
+ * support from the SIM to do things properly. Perhaps we
+ * should look at log 13 dword 0 bit 0 and dword 1 bit 0 are
+ * set too...
+ */
+ if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 &&
+ (softc->flags & ADA_FLAG_PIM_ATA_EXT) != 0 &&
+ (cgd->ident_data.satacapabilities2 &
+ ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 &&
+ (softc->flags & ADA_FLAG_CAN_TRIM) != 0)
+ softc->flags |= ADA_FLAG_CAN_NCQ_TRIM;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM;
+ } else
+ softc->flags &= ~(ADA_FLAG_CAN_TRIM | ADA_FLAG_CAN_NCQ_TRIM);
+
+ if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
+ softc->flags |= ADA_FLAG_CAN_CFA;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_CFA;
+
+ /*
+ * Now that we've set the appropriate flags, setup the delete
+ * method.
+ */
+ adasetdeletemethod(softc);
+
+ if (cgd->ident_data.support.extension & ATA_SUPPORT_GENLOG)
+ softc->flags |= ADA_FLAG_CAN_LOG;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_LOG;
+
+ if ((cgd->ident_data.support3 & ATA_SUPPORT_ZONE_MASK) ==
+ ATA_SUPPORT_ZONE_HOST_AWARE)
+ softc->zone_mode = ADA_ZONE_HOST_AWARE;
+ else if ((cgd->ident_data.support3 & ATA_SUPPORT_ZONE_MASK) ==
+ ATA_SUPPORT_ZONE_DEV_MANAGED)
+ softc->zone_mode = ADA_ZONE_DRIVE_MANAGED;
+ else
+ softc->zone_mode = ADA_ZONE_NONE;
+
+ if (cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD)
+ softc->flags |= ADA_FLAG_CAN_RAHEAD;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_RAHEAD;
+
+ if (cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE)
+ softc->flags |= ADA_FLAG_CAN_WCACHE;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_WCACHE;
+}
+
static cam_status
adaregister(struct cam_periph *periph, void *arg)
{
@@ -1394,35 +1639,10 @@ adaregister(struct cam_periph *periph, void *arg)
return(CAM_REQ_CMP_ERR);
}
- if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
- (cgd->inq_flags & SID_DMA))
- softc->flags |= ADA_FLAG_CAN_DMA;
- if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
- softc->flags |= ADA_FLAG_CAN_48BIT;
- if (cgd->inq_flags & SID_DMA48)
- softc->flags |= ADA_FLAG_CAN_DMA48;
- }
- if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
- softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
- if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
- softc->flags |= ADA_FLAG_CAN_POWERMGT;
- if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
- (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue))
- softc->flags |= ADA_FLAG_CAN_NCQ;
- if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
- (cgd->inq_flags & SID_DMA)) {
- softc->flags |= ADA_FLAG_CAN_TRIM;
- softc->trim_max_ranges = TRIM_MAX_RANGES;
- if (cgd->ident_data.max_dsm_blocks != 0) {
- softc->trim_max_ranges =
- min(cgd->ident_data.max_dsm_blocks *
- ATA_DSM_BLK_RANGES, softc->trim_max_ranges);
- }
- }
- if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
- softc->flags |= ADA_FLAG_CAN_CFA;
-
- adasetdeletemethod(softc);
+ /*
+ * Set support flags based on the Identify data.
+ */
+ adasetflags(softc, cgd);
periph->softc = softc;
@@ -1498,7 +1718,7 @@ adaregister(struct cam_periph *periph, void *arg)
maxio = min(maxio, 256 * softc->params.secsize);
softc->disk->d_maxsize = maxio;
softc->disk->d_unit = periph->unit_number;
- softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION;
+ softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
if (softc->flags & ADA_FLAG_CAN_TRIM) {
@@ -1516,19 +1736,6 @@ adaregister(struct cam_periph *periph, void *arg)
softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
softc->unmappedio = 1;
}
- /*
- * If we can do RCVSND_FPDMA_QUEUED commands, we may be able to do
- * NCQ trims, if we support trims at all. We also need support from
- * the sim do do things properly. Perhaps we should look at log 13
- * dword 0 bit 0 and dword 1 bit 0 are set too...
- */
- if (cpi.hba_misc & PIM_ATA_EXT)
- softc->flags |= ADA_FLAG_PIM_CAN_NCQ_TRIM;
- if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 &&
- (softc->flags & ADA_FLAG_PIM_CAN_NCQ_TRIM) != 0 &&
- (cgd->ident_data.satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 &&
- (softc->flags & ADA_FLAG_CAN_TRIM) != 0)
- softc->flags |= ADA_FLAG_CAN_NCQ_TRIM;
strlcpy(softc->disk->d_descr, cgd->ident_data.model,
MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model)));
strlcpy(softc->disk->d_ident, cgd->ident_data.serial,
@@ -1555,7 +1762,6 @@ adaregister(struct cam_periph *periph, void *arg)
softc->disk->d_fwsectors = softc->params.secs_per_track;
softc->disk->d_fwheads = softc->params.heads;
ata_disk_firmware_geom_adjust(softc->disk);
- adasetdeletemethod(softc);
/*
* Acquire a reference to the periph before we register with GEOM.
@@ -1570,7 +1776,6 @@ adaregister(struct cam_periph *periph, void *arg)
}
disk_create(softc->disk, DISK_VERSION);
cam_periph_lock(periph);
- cam_periph_unhold(periph);
dp = &softc->params;
snprintf(announce_buf, sizeof(announce_buf),
@@ -1608,20 +1813,23 @@ adaregister(struct cam_periph *periph, void *arg)
(ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
adasendorderedtag, softc);
- if (ADA_RA >= 0 &&
- cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) {
+ if (ADA_RA >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD) {
softc->state = ADA_STATE_RAHEAD;
- } else if (ADA_WC >= 0 &&
- cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
+ } else if (ADA_WC >= 0 && softc->flags & ADA_FLAG_CAN_WCACHE) {
softc->state = ADA_STATE_WCACHE;
+ } else if (softc->flags & ADA_FLAG_CAN_LOG) {
+ softc->state = ADA_STATE_LOGDIR;
} else {
- softc->state = ADA_STATE_NORMAL;
+ /*
+ * Nothing to probe, so we can just transition to the
+ * normal state.
+ */
+ adaprobedone(periph, NULL);
return(CAM_REQ_CMP);
}
- if (cam_periph_acquire(periph) != CAM_REQ_CMP)
- softc->state = ADA_STATE_NORMAL;
- else
- xpt_schedule(periph, CAM_PRIORITY_DEV);
+
+ xpt_schedule(periph, CAM_PRIORITY_DEV);
+
return(CAM_REQ_CMP);
}
@@ -1754,6 +1962,209 @@ ada_cfaerase(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
}
+static int
+ada_zone_bio_to_ata(int disk_zone_cmd)
+{
+ switch (disk_zone_cmd) {
+ case DISK_ZONE_OPEN:
+ return ATA_ZM_OPEN_ZONE;
+ case DISK_ZONE_CLOSE:
+ return ATA_ZM_CLOSE_ZONE;
+ case DISK_ZONE_FINISH:
+ return ATA_ZM_FINISH_ZONE;
+ case DISK_ZONE_RWP:
+ return ATA_ZM_RWP;
+ }
+
+ return -1;
+}
+
+static int
+ada_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
+ int *queue_ccb)
+{
+ struct ada_softc *softc;
+ int error;
+
+ error = 0;
+
+ if (bp->bio_cmd != BIO_ZONE) {
+ error = EINVAL;
+ goto bailout;
+ }
+
+ softc = periph->softc;
+
+ switch (bp->bio_zone.zone_cmd) {
+ case DISK_ZONE_OPEN:
+ case DISK_ZONE_CLOSE:
+ case DISK_ZONE_FINISH:
+ case DISK_ZONE_RWP: {
+ int zone_flags;
+ int zone_sa;
+ uint64_t lba;
+
+ zone_sa = ada_zone_bio_to_ata(bp->bio_zone.zone_cmd);
+ if (zone_sa == -1) {
+ xpt_print(periph->path, "Cannot translate zone "
+ "cmd %#x to ATA\n", bp->bio_zone.zone_cmd);
+ error = EINVAL;
+ goto bailout;
+ }
+
+ zone_flags = 0;
+ lba = bp->bio_zone.zone_params.rwp.id;
+
+ if (bp->bio_zone.zone_params.rwp.flags &
+ DISK_ZONE_RWP_FLAG_ALL)
+ zone_flags |= ZBC_OUT_ALL;
+
+ ata_zac_mgmt_out(&ccb->ataio,
+ /*retries*/ ada_retry_count,
+ /*cbfcnp*/ adadone,
+ /*use_ncq*/ (softc->flags &
+ ADA_FLAG_PIM_ATA_EXT) ? 1 : 0,
+ /*zm_action*/ zone_sa,
+ /*zone_id*/ lba,
+ /*zone_flags*/ zone_flags,
+ /*sector_count*/ 0,
+ /*data_ptr*/ NULL,
+ /*dxfer_len*/ 0,
+ /*timeout*/ ada_default_timeout * 1000);
+ *queue_ccb = 1;
+
+ break;
+ }
+ case DISK_ZONE_REPORT_ZONES: {
+ uint8_t *rz_ptr;
+ uint32_t num_entries, alloc_size;
+ struct disk_zone_report *rep;
+
+ rep = &bp->bio_zone.zone_params.report;
+
+ num_entries = rep->entries_allocated;
+ if (num_entries == 0) {
+ xpt_print(periph->path, "No entries allocated for "
+ "Report Zones request\n");
+ error = EINVAL;
+ goto bailout;
+ }
+ alloc_size = sizeof(struct scsi_report_zones_hdr) +
+ (sizeof(struct scsi_report_zones_desc) * num_entries);
+ alloc_size = min(alloc_size, softc->disk->d_maxsize);
+ rz_ptr = malloc(alloc_size, M_ATADA, M_NOWAIT | M_ZERO);
+ if (rz_ptr == NULL) {
+ xpt_print(periph->path, "Unable to allocate memory "
+ "for Report Zones request\n");
+ error = ENOMEM;
+ goto bailout;
+ }
+
+ ata_zac_mgmt_in(&ccb->ataio,
+ /*retries*/ ada_retry_count,
+ /*cbcfnp*/ adadone,
+ /*use_ncq*/ (softc->flags &
+ ADA_FLAG_PIM_ATA_EXT) ? 1 : 0,
+ /*zm_action*/ ATA_ZM_REPORT_ZONES,
+ /*zone_id*/ rep->starting_id,
+ /*zone_flags*/ rep->rep_options,
+ /*data_ptr*/ rz_ptr,
+ /*dxfer_len*/ alloc_size,
+ /*timeout*/ ada_default_timeout * 1000);
+
+ /*
+ * For BIO_ZONE, this isn't normally needed. However, it
+ * is used by devstat_end_transaction_bio() to determine
+ * how much data was transferred.
+ */
+ /*
+ * XXX KDM we have a problem. But I'm not sure how to fix
+ * it. devstat uses bio_bcount - bio_resid to calculate
+ * the amount of data transferred. The GEOM disk code
+ * uses bio_length - bio_resid to calculate the amount of
+ * data in bio_completed. We have different structure
+ * sizes above and below the ada(4) driver. So, if we
+ * use the sizes above, the amount transferred won't be
+ * quite accurate for devstat. If we use different sizes
+ * for bio_bcount and bio_length (above and below
+ * respectively), then the residual needs to match one or
+ * the other. Everything is calculated after the bio
+ * leaves the driver, so changing the values around isn't
+ * really an option. For now, just set the count to the
+ * passed in length. This means that the calculations
+ * above (e.g. bio_completed) will be correct, but the
+ * amount of data reported to devstat will be slightly
+ * under or overstated.
+ */
+ bp->bio_bcount = bp->bio_length;
+
+ *queue_ccb = 1;
+
+ break;
+ }
+ case DISK_ZONE_GET_PARAMS: {
+ struct disk_zone_disk_params *params;
+
+ params = &bp->bio_zone.zone_params.disk_params;
+ bzero(params, sizeof(*params));
+
+ switch (softc->zone_mode) {
+ case ADA_ZONE_DRIVE_MANAGED:
+ params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
+ break;
+ case ADA_ZONE_HOST_AWARE:
+ params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
+ break;
+ case ADA_ZONE_HOST_MANAGED:
+ params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
+ break;
+ default:
+ case ADA_ZONE_NONE:
+ params->zone_mode = DISK_ZONE_MODE_NONE;
+ break;
+ }
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_URSWRZ)
+ params->flags |= DISK_ZONE_DISK_URSWRZ;
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_OPT_SEQ_SET) {
+ params->optimal_seq_zones = softc->optimal_seq_zones;
+ params->flags |= DISK_ZONE_OPT_SEQ_SET;
+ }
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_OPT_NONSEQ_SET) {
+ params->optimal_nonseq_zones =
+ softc->optimal_nonseq_zones;
+ params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
+ }
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_MAX_SEQ_SET) {
+ params->max_seq_zones = softc->max_seq_zones;
+ params->flags |= DISK_ZONE_MAX_SEQ_SET;
+ }
+ if (softc->zone_flags & ADA_ZONE_FLAG_RZ_SUP)
+ params->flags |= DISK_ZONE_RZ_SUP;
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_OPEN_SUP)
+ params->flags |= DISK_ZONE_OPEN_SUP;
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_CLOSE_SUP)
+ params->flags |= DISK_ZONE_CLOSE_SUP;
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_FINISH_SUP)
+ params->flags |= DISK_ZONE_FINISH_SUP;
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_RWP_SUP)
+ params->flags |= DISK_ZONE_RWP_SUP;
+ break;
+ }
+ default:
+ break;
+ }
+bailout:
+ return (error);
+}
+
static void
adastart(struct cam_periph *periph, union ccb *start_ccb)
{
@@ -1941,6 +2352,20 @@ adastart(struct cam_periph *periph, union ccb *start_ccb)
else
ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
break;
+ case BIO_ZONE: {
+ int error, queue_ccb;
+
+ queue_ccb = 0;
+
+ error = ada_zone_cmd(periph, start_ccb, bp, &queue_ccb);
+ if ((error != 0)
+ || (queue_ccb == 0)) {
+ biofinish(bp, NULL, error);
+ xpt_release_ccb(start_ccb);
+ return;
+ }
+ break;
+ }
}
start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
start_ccb->ccb_h.flags |= CAM_UNLOCKED;
@@ -1982,21 +2407,306 @@ out:
xpt_action(start_ccb);
break;
}
+ case ADA_STATE_LOGDIR:
+ {
+ struct ata_gp_log_dir *log_dir;
+
+ if ((softc->flags & ADA_FLAG_CAN_LOG) == 0) {
+ adaprobedone(periph, start_ccb);
+ break;
+ }
+
+ log_dir = malloc(sizeof(*log_dir), M_ATADA, M_NOWAIT|M_ZERO);
+ if (log_dir == NULL) {
+ xpt_print(periph->path, "Couldn't malloc log_dir "
+ "data\n");
+ softc->state = ADA_STATE_NORMAL;
+ xpt_release_ccb(start_ccb);
+ break;
+ }
+
+
+ ata_read_log(ataio,
+ /*retries*/1,
+ /*cbfcnp*/adadone,
+ /*log_address*/ ATA_LOG_DIRECTORY,
+ /*page_number*/ 0,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ?
+ CAM_ATAIO_DMA : 0,
+ /*data_ptr*/ (uint8_t *)log_dir,
+ /*dxfer_len*/sizeof(*log_dir),
+ /*timeout*/ada_default_timeout*1000);
+
+ start_ccb->ccb_h.ccb_state = ADA_CCB_LOGDIR;
+ xpt_action(start_ccb);
+ break;
+ }
+ case ADA_STATE_IDDIR:
+ {
+ struct ata_identify_log_pages *id_dir;
+
+ id_dir = malloc(sizeof(*id_dir), M_ATADA, M_NOWAIT | M_ZERO);
+ if (id_dir == NULL) {
+ xpt_print(periph->path, "Couldn't malloc id_dir "
+ "data\n");
+ adaprobedone(periph, start_ccb);
+ break;
+ }
+
+ ata_read_log(ataio,
+ /*retries*/1,
+ /*cbfcnp*/adadone,
+ /*log_address*/ ATA_IDENTIFY_DATA_LOG,
+ /*page_number*/ ATA_IDL_PAGE_LIST,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ?
+ CAM_ATAIO_DMA : 0,
+ /*data_ptr*/ (uint8_t *)id_dir,
+ /*dxfer_len*/ sizeof(*id_dir),
+ /*timeout*/ada_default_timeout*1000);
+
+ start_ccb->ccb_h.ccb_state = ADA_CCB_IDDIR;
+ xpt_action(start_ccb);
+ break;
+ }
+ case ADA_STATE_SUP_CAP:
+ {
+ struct ata_identify_log_sup_cap *sup_cap;
+
+ sup_cap = malloc(sizeof(*sup_cap), M_ATADA, M_NOWAIT|M_ZERO);
+ if (sup_cap == NULL) {
+ xpt_print(periph->path, "Couldn't malloc sup_cap "
+ "data\n");
+ adaprobedone(periph, start_ccb);
+ break;
+ }
+
+ ata_read_log(ataio,
+ /*retries*/1,
+ /*cbfcnp*/adadone,
+ /*log_address*/ ATA_IDENTIFY_DATA_LOG,
+ /*page_number*/ ATA_IDL_SUP_CAP,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ?
+ CAM_ATAIO_DMA : 0,
+ /*data_ptr*/ (uint8_t *)sup_cap,
+ /*dxfer_len*/ sizeof(*sup_cap),
+ /*timeout*/ada_default_timeout*1000);
+
+ start_ccb->ccb_h.ccb_state = ADA_CCB_SUP_CAP;
+ xpt_action(start_ccb);
+ break;
+ }
+ case ADA_STATE_ZONE:
+ {
+ struct ata_zoned_info_log *ata_zone;
+
+ ata_zone = malloc(sizeof(*ata_zone), M_ATADA, M_NOWAIT|M_ZERO);
+ if (ata_zone == NULL) {
+ xpt_print(periph->path, "Couldn't malloc ata_zone "
+ "data\n");
+ adaprobedone(periph, start_ccb);
+ break;
+ }
+
+ ata_read_log(ataio,
+ /*retries*/1,
+ /*cbfcnp*/adadone,
+ /*log_address*/ ATA_IDENTIFY_DATA_LOG,
+ /*page_number*/ ATA_IDL_ZDI,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ?
+ CAM_ATAIO_DMA : 0,
+ /*data_ptr*/ (uint8_t *)ata_zone,
+ /*dxfer_len*/ sizeof(*ata_zone),
+ /*timeout*/ada_default_timeout*1000);
+
+ start_ccb->ccb_h.ccb_state = ADA_CCB_ZONE;
+ xpt_action(start_ccb);
+ break;
+ }
+ }
+}
+
+static void
+adaprobedone(struct cam_periph *periph, union ccb *ccb)
+{
+ struct ada_softc *softc;
+
+ softc = (struct ada_softc *)periph->softc;
+
+ if (ccb != NULL)
+ xpt_release_ccb(ccb);
+
+ softc->state = ADA_STATE_NORMAL;
+ softc->flags |= ADA_FLAG_PROBED;
+ adaschedule(periph);
+ if ((softc->flags & ADA_FLAG_ANNOUNCED) == 0) {
+ softc->flags |= ADA_FLAG_ANNOUNCED;
+ cam_periph_unhold(periph);
+ } else {
+ cam_periph_release_locked(periph);
+ }
+}
+
+static void
+adazonedone(struct cam_periph *periph, union ccb *ccb)
+{
+ struct ada_softc *softc;
+ struct bio *bp;
+
+ softc = periph->softc;
+ bp = (struct bio *)ccb->ccb_h.ccb_bp;
+
+ switch (bp->bio_zone.zone_cmd) {
+ case DISK_ZONE_OPEN:
+ case DISK_ZONE_CLOSE:
+ case DISK_ZONE_FINISH:
+ case DISK_ZONE_RWP:
+ break;
+ case DISK_ZONE_REPORT_ZONES: {
+ uint32_t avail_len;
+ struct disk_zone_report *rep;
+ struct scsi_report_zones_hdr *hdr;
+ struct scsi_report_zones_desc *desc;
+ struct disk_zone_rep_entry *entry;
+ uint32_t num_alloced, hdr_len, num_avail;
+ uint32_t num_to_fill, i;
+
+ rep = &bp->bio_zone.zone_params.report;
+ avail_len = ccb->ataio.dxfer_len - ccb->ataio.resid;
+ /*
+ * Note that bio_resid isn't normally used for zone
+ * commands, but it is used by devstat_end_transaction_bio()
+ * to determine how much data was transferred. Because
+ * the size of the SCSI/ATA data structures is different
+ * than the size of the BIO interface structures, the
+ * amount of data actually transferred from the drive will
+ * be different than the amount of data transferred to
+ * the user.
+ */
+ num_alloced = rep->entries_allocated;
+ hdr = (struct scsi_report_zones_hdr *)ccb->ataio.data_ptr;
+ if (avail_len < sizeof(*hdr)) {
+ /*
+ * Is there a better error than EIO here? We asked
+ * for at least the header, and we got less than
+ * that.
+ */
+ bp->bio_error = EIO;
+ bp->bio_flags |= BIO_ERROR;
+ bp->bio_resid = bp->bio_bcount;
+ break;
+ }
+
+ hdr_len = le32dec(hdr->length);
+ if (hdr_len > 0)
+ rep->entries_available = hdr_len / sizeof(*desc);
+ else
+ rep->entries_available = 0;
+ /*
+ * NOTE: using the same values for the BIO version of the
+ * same field as the SCSI/ATA values. This means we could
+ * get some additional values that aren't defined in bio.h
+ * if more values of the same field are defined later.
+ */
+ rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
+ rep->header.maximum_lba = le64dec(hdr->maximum_lba);
+ /*
+ * If the drive reports no entries that match the query,
+ * we're done.
+ */
+ if (hdr_len == 0) {
+ rep->entries_filled = 0;
+ bp->bio_resid = bp->bio_bcount;
+ break;
+ }
+
+ num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
+ hdr_len / sizeof(*desc));
+ /*
+ * If the drive didn't return any data, then we're done.
+ */
+ if (num_avail == 0) {
+ rep->entries_filled = 0;
+ bp->bio_resid = bp->bio_bcount;
+ break;
+ }
+
+ num_to_fill = min(num_avail, rep->entries_allocated);
+ /*
+ * If the user didn't allocate any entries for us to fill,
+ * we're done.
+ */
+ if (num_to_fill == 0) {
+ rep->entries_filled = 0;
+ bp->bio_resid = bp->bio_bcount;
+ break;
+ }
+
+ for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
+ i < num_to_fill; i++, desc++, entry++) {
+ /*
+ * NOTE: we're mapping the values here directly
+ * from the SCSI/ATA bit definitions to the bio.h
+ * definitions. There is also a warning in
+ * disk_zone.h, but the impact is that if
+ * additional values are added in the SCSI/ATA
+ * specs these will be visible to consumers of
+ * this interface.
+ */
+ entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
+ entry->zone_condition =
+ (desc->zone_flags & SRZ_ZONE_COND_MASK) >>
+ SRZ_ZONE_COND_SHIFT;
+ entry->zone_flags |= desc->zone_flags &
+ (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET);
+ entry->zone_length = le64dec(desc->zone_length);
+ entry->zone_start_lba = le64dec(desc->zone_start_lba);
+ entry->write_pointer_lba =
+ le64dec(desc->write_pointer_lba);
+ }
+ rep->entries_filled = num_to_fill;
+ /*
+ * Note that this residual is accurate from the user's
+ * standpoint, but the amount transferred isn't accurate
+ * from the standpoint of what actually came back from the
+ * drive.
+ */
+ bp->bio_resid = bp->bio_bcount - (num_to_fill * sizeof(*entry));
+ break;
+ }
+ case DISK_ZONE_GET_PARAMS:
+ default:
+ /*
+ * In theory we should not get a GET_PARAMS bio, since it
+ * should be handled without queueing the command to the
+ * drive.
+ */
+ panic("%s: Invalid zone command %d", __func__,
+ bp->bio_zone.zone_cmd);
+ break;
}
+
+ if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
+ free(ccb->ataio.data_ptr, M_ATADA);
}
+
static void
adadone(struct cam_periph *periph, union ccb *done_ccb)
{
struct ada_softc *softc;
struct ccb_ataio *ataio;
- struct ccb_getdev *cgd;
struct cam_path *path;
+ uint32_t priority;
int state;
softc = (struct ada_softc *)periph->softc;
ataio = &done_ccb->ataio;
path = done_ccb->ccb_h.path;
+ priority = done_ccb->ccb_h.pinfo.priority;
CAM_DEBUG(path, CAM_DEBUG_TRACE, ("adadone\n"));
@@ -2040,6 +2750,7 @@ adadone(struct cam_periph *periph, union ccb *done_ccb)
} else {
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
panic("REQ_CMP with QFRZN");
+
error = 0;
}
bp->bio_error = error;
@@ -2047,11 +2758,15 @@ adadone(struct cam_periph *periph, union ccb *done_ccb)
bp->bio_resid = bp->bio_bcount;
bp->bio_flags |= BIO_ERROR;
} else {
- if (state == ADA_CCB_TRIM)
+ if (bp->bio_cmd == BIO_ZONE)
+ adazonedone(periph, done_ccb);
+ else if (state == ADA_CCB_TRIM)
bp->bio_resid = 0;
else
bp->bio_resid = ataio->resid;
- if (bp->bio_resid > 0)
+
+ if ((bp->bio_resid > 0)
+ && (bp->bio_cmd != BIO_ZONE))
bp->bio_flags |= BIO_ERROR;
}
softc->outstanding_cmds--;
@@ -2100,7 +2815,6 @@ adadone(struct cam_periph *periph, union ccb *done_ccb)
{
if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
if (adaerror(done_ccb, 0, 0) == ERESTART) {
-out:
/* Drop freeze taken due to CAM_DEV_QFREEZE */
cam_release_devq(path, 0, 0, 0, FALSE);
return;
@@ -2121,30 +2835,21 @@ out:
* is removed, and we need it around for the CCB release
* operation.
*/
- cgd = (struct ccb_getdev *)done_ccb;
- xpt_setup_ccb(&cgd->ccb_h, path, CAM_PRIORITY_NORMAL);
- cgd->ccb_h.func_code = XPT_GDEV_TYPE;
- xpt_action((union ccb *)cgd);
- if (ADA_WC >= 0 &&
- cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
- softc->state = ADA_STATE_WCACHE;
- xpt_release_ccb(done_ccb);
- xpt_schedule(periph, CAM_PRIORITY_DEV);
- goto out;
- }
- softc->state = ADA_STATE_NORMAL;
+
xpt_release_ccb(done_ccb);
+ softc->state = ADA_STATE_WCACHE;
+ xpt_schedule(periph, priority);
/* Drop freeze taken due to CAM_DEV_QFREEZE */
cam_release_devq(path, 0, 0, 0, FALSE);
- adaschedule(periph);
- cam_periph_release_locked(periph);
return;
}
case ADA_CCB_WCACHE:
{
if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
if (adaerror(done_ccb, 0, 0) == ERESTART) {
- goto out;
+ /* Drop freeze taken due to CAM_DEV_QFREEZE */
+ cam_release_devq(path, 0, 0, 0, FALSE);
+ return;
} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
cam_release_devq(path,
/*relsim_flags*/0,
@@ -2154,20 +2859,365 @@ out:
}
}
- softc->state = ADA_STATE_NORMAL;
- /*
- * Since our peripheral may be invalidated by an error
- * above or an external event, we must release our CCB
- * before releasing the reference on the peripheral.
- * The peripheral will only go away once the last reference
- * is removed, and we need it around for the CCB release
- * operation.
- */
- xpt_release_ccb(done_ccb);
/* Drop freeze taken due to CAM_DEV_QFREEZE */
cam_release_devq(path, 0, 0, 0, FALSE);
- adaschedule(periph);
- cam_periph_release_locked(periph);
+
+ if (softc->flags & ADA_FLAG_CAN_LOG) {
+ xpt_release_ccb(done_ccb);
+ softc->state = ADA_STATE_LOGDIR;
+ xpt_schedule(periph, priority);
+ } else {
+ adaprobedone(periph, done_ccb);
+ }
+ return;
+ }
+ case ADA_CCB_LOGDIR:
+ {
+ int error;
+
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ error = 0;
+ softc->valid_logdir_len = 0;
+ bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
+ softc->valid_logdir_len =
+ ataio->dxfer_len - ataio->resid;
+ if (softc->valid_logdir_len > 0)
+ bcopy(ataio->data_ptr, &softc->ata_logdir,
+ min(softc->valid_logdir_len,
+ sizeof(softc->ata_logdir)));
+ /*
+ * Figure out whether the Identify Device log is
+ * supported. The General Purpose log directory
+ * has a header, and lists the number of pages
+ * available for each GP log identified by the
+ * offset into the list.
+ */
+ if ((softc->valid_logdir_len >=
+ ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
+ && (le16dec(softc->ata_logdir.header) ==
+ ATA_GP_LOG_DIR_VERSION)
+ && (le16dec(&softc->ata_logdir.num_pages[
+ (ATA_IDENTIFY_DATA_LOG *
+ sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
+ softc->flags |= ADA_FLAG_CAN_IDLOG;
+ } else {
+ softc->flags &= ~ADA_FLAG_CAN_IDLOG;
+ }
+ } else {
+ error = adaerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ /*
+ * If we can't get the ATA log directory,
+ * then ATA logs are effectively not
+ * supported even if the bit is set in the
+ * identify data.
+ */
+ softc->flags &= ~(ADA_FLAG_CAN_LOG |
+ ADA_FLAG_CAN_IDLOG);
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+
+
+ }
+
+ free(ataio->data_ptr, M_ATADA);
+
+ if ((error == 0)
+ && (softc->flags & ADA_FLAG_CAN_IDLOG)) {
+ softc->state = ADA_STATE_IDDIR;
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ } else
+ adaprobedone(periph, done_ccb);
+
+ return;
+ }
+ case ADA_CCB_IDDIR: {
+ int error;
+
+ if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ off_t entries_offset, max_entries;
+ error = 0;
+
+ softc->valid_iddir_len = 0;
+ bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
+ softc->flags &= ~(ADA_FLAG_CAN_SUPCAP |
+ ADA_FLAG_CAN_ZONE);
+ softc->valid_iddir_len =
+ ataio->dxfer_len - ataio->resid;
+ if (softc->valid_iddir_len > 0)
+ bcopy(ataio->data_ptr, &softc->ata_iddir,
+ min(softc->valid_iddir_len,
+ sizeof(softc->ata_iddir)));
+
+ entries_offset =
+ __offsetof(struct ata_identify_log_pages,entries);
+ max_entries = softc->valid_iddir_len - entries_offset;
+ if ((softc->valid_iddir_len > (entries_offset + 1))
+ && (le64dec(softc->ata_iddir.header) ==
+ ATA_IDLOG_REVISION)
+ && (softc->ata_iddir.entry_count > 0)) {
+ int num_entries, i;
+
+ num_entries = softc->ata_iddir.entry_count;
+ num_entries = min(num_entries,
+ softc->valid_iddir_len - entries_offset);
+ for (i = 0; i < num_entries &&
+ i < max_entries; i++) {
+ if (softc->ata_iddir.entries[i] ==
+ ATA_IDL_SUP_CAP)
+ softc->flags |=
+ ADA_FLAG_CAN_SUPCAP;
+ else if (softc->ata_iddir.entries[i]==
+ ATA_IDL_ZDI)
+ softc->flags |=
+ ADA_FLAG_CAN_ZONE;
+
+ if ((softc->flags &
+ ADA_FLAG_CAN_SUPCAP)
+ && (softc->flags &
+ ADA_FLAG_CAN_ZONE))
+ break;
+ }
+ }
+ } else {
+ error = adaerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ /*
+ * If we can't get the ATA Identify Data log
+ * directory, then it effectively isn't
+ * supported even if the ATA Log directory
+ * a non-zero number of pages present for
+ * this log.
+ */
+ softc->flags &= ~ADA_FLAG_CAN_IDLOG;
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+ }
+
+ free(ataio->data_ptr, M_ATADA);
+
+ if ((error == 0)
+ && (softc->flags & ADA_FLAG_CAN_SUPCAP)) {
+ softc->state = ADA_STATE_SUP_CAP;
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ } else
+ adaprobedone(periph, done_ccb);
+ return;
+ }
+ case ADA_CCB_SUP_CAP: {
+ int error;
+
+ if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ uint32_t valid_len;
+ size_t needed_size;
+ struct ata_identify_log_sup_cap *sup_cap;
+ error = 0;
+
+ sup_cap = (struct ata_identify_log_sup_cap *)
+ ataio->data_ptr;
+ valid_len = ataio->dxfer_len - ataio->resid;
+ needed_size =
+ __offsetof(struct ata_identify_log_sup_cap,
+ sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
+ if (valid_len >= needed_size) {
+ uint64_t zoned, zac_cap;
+
+ zoned = le64dec(sup_cap->zoned_cap);
+ if (zoned & ATA_ZONED_VALID) {
+ /*
+ * This should have already been
+ * set, because this is also in the
+ * ATA identify data.
+ */
+ if ((zoned & ATA_ZONED_MASK) ==
+ ATA_SUPPORT_ZONE_HOST_AWARE)
+ softc->zone_mode =
+ ADA_ZONE_HOST_AWARE;
+ else if ((zoned & ATA_ZONED_MASK) ==
+ ATA_SUPPORT_ZONE_DEV_MANAGED)
+ softc->zone_mode =
+ ADA_ZONE_DRIVE_MANAGED;
+ }
+
+ zac_cap = le64dec(sup_cap->sup_zac_cap);
+ if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
+ if (zac_cap & ATA_REPORT_ZONES_SUP)
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_RZ_SUP;
+ if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_OPEN_SUP;
+ if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_CLOSE_SUP;
+ if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_FINISH_SUP;
+ if (zac_cap & ATA_ND_RWP_SUP)
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_RWP_SUP;
+ } else {
+ /*
+ * This field was introduced in
+ * ACS-4, r08 on April 28th, 2015.
+ * If the drive firmware was written
+ * to an earlier spec, it won't have
+ * the field. So, assume all
+ * commands are supported.
+ */
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_SUP_MASK;
+ }
+
+ }
+ } else {
+ error = adaerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ /*
+ * If we can't get the ATA Identify Data
+ * Supported Capabilities page, clear the
+ * flag...
+ */
+ softc->flags &= ~ADA_FLAG_CAN_SUPCAP;
+ /*
+ * And clear zone capabilities.
+ */
+ softc->zone_flags &= ~ADA_ZONE_FLAG_SUP_MASK;
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+ }
+
+ free(ataio->data_ptr, M_ATADA);
+
+ if ((error == 0)
+ && (softc->flags & ADA_FLAG_CAN_ZONE)) {
+ softc->state = ADA_STATE_ZONE;
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ } else
+ adaprobedone(periph, done_ccb);
+ return;
+ }
+ case ADA_CCB_ZONE: {
+ int error;
+
+ if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ struct ata_zoned_info_log *zi_log;
+ uint32_t valid_len;
+ size_t needed_size;
+
+ zi_log = (struct ata_zoned_info_log *)ataio->data_ptr;
+
+ valid_len = ataio->dxfer_len - ataio->resid;
+ needed_size = __offsetof(struct ata_zoned_info_log,
+ version_info) + 1 + sizeof(zi_log->version_info);
+ if (valid_len >= needed_size) {
+ uint64_t tmpvar;
+
+ tmpvar = le64dec(zi_log->zoned_cap);
+ if (tmpvar & ATA_ZDI_CAP_VALID) {
+ if (tmpvar & ATA_ZDI_CAP_URSWRZ)
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_URSWRZ;
+ else
+ softc->zone_flags &=
+ ~ADA_ZONE_FLAG_URSWRZ;
+ }
+ tmpvar = le64dec(zi_log->optimal_seq_zones);
+ if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_OPT_SEQ_SET;
+ softc->optimal_seq_zones = (tmpvar &
+ ATA_ZDI_OPT_SEQ_MASK);
+ } else {
+ softc->zone_flags &=
+ ~ADA_ZONE_FLAG_OPT_SEQ_SET;
+ softc->optimal_seq_zones = 0;
+ }
+
+ tmpvar =le64dec(zi_log->optimal_nonseq_zones);
+ if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_OPT_NONSEQ_SET;
+ softc->optimal_nonseq_zones =
+ (tmpvar & ATA_ZDI_OPT_NS_MASK);
+ } else {
+ softc->zone_flags &=
+ ~ADA_ZONE_FLAG_OPT_NONSEQ_SET;
+ softc->optimal_nonseq_zones = 0;
+ }
+
+ tmpvar = le64dec(zi_log->max_seq_req_zones);
+ if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_MAX_SEQ_SET;
+ softc->max_seq_zones =
+ (tmpvar & ATA_ZDI_MAX_SEQ_MASK);
+ } else {
+ softc->zone_flags &=
+ ~ADA_ZONE_FLAG_MAX_SEQ_SET;
+ softc->max_seq_zones = 0;
+ }
+ }
+ } else {
+ error = adaerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ softc->flags &= ~ADA_FLAG_CAN_ZONE;
+ softc->flags &= ~ADA_ZONE_FLAG_SET_MASK;
+
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+
+ }
+ free(ataio->data_ptr, M_ATADA);
+
+ adaprobedone(periph, done_ccb);
return;
}
case ADA_CCB_DUMP:
diff --git a/sys/cam/cam_ccb.h b/sys/cam/cam_ccb.h
index 6086ed2..914333d 100644
--- a/sys/cam/cam_ccb.h
+++ b/sys/cam/cam_ccb.h
@@ -1087,7 +1087,17 @@ struct ccb_notify_acknowledge {
u_int tag_id; /* Tag for immediate notify */
u_int seq_id; /* Tar for target of notify */
u_int initiator_id; /* Initiator Identifier */
- u_int arg; /* Function specific */
+ u_int arg; /* Response information */
+ /*
+ * Lower byte of arg is one of RESPONSE CODE values defined below
+ * (subset of response codes from SPL-4 and FCP-4 specifications),
+ * upper 3 bytes is code-specific ADDITIONAL RESPONSE INFORMATION.
+ */
+#define CAM_RSP_TMF_COMPLETE 0x00
+#define CAM_RSP_TMF_REJECTED 0x04
+#define CAM_RSP_TMF_FAILED 0x05
+#define CAM_RSP_TMF_SUCCEEDED 0x08
+#define CAM_RSP_TMF_INCORRECT_LUN 0x09
};
/* HBA engine structures. */
diff --git a/sys/cam/ctl/scsi_ctl.c b/sys/cam/ctl/scsi_ctl.c
index 98966e8..f403391 100644
--- a/sys/cam/ctl/scsi_ctl.c
+++ b/sys/cam/ctl/scsi_ctl.c
@@ -1552,6 +1552,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
/*
* Queue this back down to the SIM as an immediate notify.
*/
+ done_ccb->ccb_h.status = CAM_REQ_INPROG;
done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
xpt_action(done_ccb);
break;
@@ -2041,6 +2042,28 @@ ctlfe_done(union ctl_io *io)
*/
ccb->ccb_h.status = CAM_REQ_INPROG;
ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
+ switch (io->taskio.task_status) {
+ case CTL_TASK_FUNCTION_COMPLETE:
+ ccb->cna2.arg = CAM_RSP_TMF_COMPLETE;
+ break;
+ case CTL_TASK_FUNCTION_SUCCEEDED:
+ ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED;
+ ccb->ccb_h.flags |= CAM_SEND_STATUS;
+ break;
+ case CTL_TASK_FUNCTION_REJECTED:
+ ccb->cna2.arg = CAM_RSP_TMF_REJECTED;
+ ccb->ccb_h.flags |= CAM_SEND_STATUS;
+ break;
+ case CTL_TASK_LUN_DOES_NOT_EXIST:
+ ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN;
+ ccb->ccb_h.flags |= CAM_SEND_STATUS;
+ break;
+ case CTL_TASK_FUNCTION_NOT_SUPPORTED:
+ ccb->cna2.arg = CAM_RSP_TMF_FAILED;
+ ccb->ccb_h.flags |= CAM_SEND_STATUS;
+ break;
+ }
+ ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8;
xpt_action(ccb);
} else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) {
if (softc->flags & CTLFE_LUN_WILDCARD) {
diff --git a/sys/cam/scsi/scsi_all.c b/sys/cam/scsi/scsi_all.c
index 3b23ae0..722b44f 100644
--- a/sys/cam/scsi/scsi_all.c
+++ b/sys/cam/scsi/scsi_all.c
@@ -111,6 +111,7 @@ static void fetchtableentries(int sense_key, int asc, int ascq,
struct scsi_inquiry_data *,
const struct sense_key_table_entry **,
const struct asc_table_entry **);
+
#ifdef _KERNEL
static void init_scsi_delay(void);
static int sysctl_scsi_delay(SYSCTL_HANDLER_ARGS);
@@ -502,9 +503,9 @@ static struct op_table_entry scsi_op_codes[] = {
/* 93 M ERASE(16) */
{ 0x93, T, "ERASE(16)" },
/* 94 O ZBC OUT */
- { 0x94, D, "ZBC OUT" },
- /* 95 O ZBC OUT */
- { 0x95, D, "ZBC OUT" },
+ { 0x94, ALL, "ZBC OUT" },
+ /* 95 O ZBC IN */
+ { 0x95, ALL, "ZBC IN" },
/* 96 */
/* 97 */
/* 98 */
@@ -520,7 +521,6 @@ static struct op_table_entry scsi_op_codes[] = {
/* XXX KDM ALL for this? op-num.txt defines it for none.. */
/* 9E SERVICE ACTION IN(16) */
{ 0x9E, ALL, "SERVICE ACTION IN(16)" },
- /* XXX KDM ALL for this? op-num.txt defines it for ADC.. */
/* 9F M SERVICE ACTION OUT(16) */
{ 0x9F, ALL, "SERVICE ACTION OUT(16)" },
/* A0 MMOOO OMMM OMO REPORT LUNS */
@@ -673,6 +673,12 @@ scsi_op_desc(u_int16_t opcode, struct scsi_inquiry_data *inq_data)
if (pd_type == T_RBC)
pd_type = T_DIRECT;
+ /*
+ * Host managed drives are direct access for the most part.
+ */
+ if (pd_type == T_ZBC_HM)
+ pd_type = T_DIRECT;
+
/* Map NODEVICE to Direct Access Device to handle REPORT LUNS, etc. */
if (pd_type == T_NODEVICE)
pd_type = T_DIRECT;
@@ -4259,6 +4265,7 @@ scsi_get_block_info(struct scsi_sense_data *sense_data, u_int sense_len,
switch (SID_TYPE(inq_data)) {
case T_DIRECT:
case T_RBC:
+ case T_ZBC_HM:
break;
default:
goto bailout;
@@ -5408,6 +5415,9 @@ scsi_print_inquiry(struct scsi_inquiry_data *inq_data)
case T_ADC:
dtype = "Automation/Drive Interface";
break;
+ case T_ZBC_HM:
+ dtype = "Host Managed Zoned Block";
+ break;
case T_NODEVICE:
dtype = "Uninstalled";
break;
@@ -8135,23 +8145,30 @@ scsi_ata_identify(struct ccb_scsiio *csio, u_int32_t retries,
u_int16_t dxfer_len, u_int8_t sense_len,
u_int32_t timeout)
{
- scsi_ata_pass_16(csio,
- retries,
- cbfcnp,
- /*flags*/CAM_DIR_IN,
- tag_action,
- /*protocol*/AP_PROTO_PIO_IN,
- /*ata_flags*/AP_FLAG_TDIR_FROM_DEV|
- AP_FLAG_BYT_BLOK_BYTES|AP_FLAG_TLEN_SECT_CNT,
- /*features*/0,
- /*sector_count*/dxfer_len,
- /*lba*/0,
- /*command*/ATA_ATA_IDENTIFY,
- /*control*/0,
- data_ptr,
- dxfer_len,
- sense_len,
- timeout);
+ scsi_ata_pass(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_IN,
+ tag_action,
+ /*protocol*/AP_PROTO_PIO_IN,
+ /*ata_flags*/AP_FLAG_TDIR_FROM_DEV |
+ AP_FLAG_BYT_BLOK_BYTES |
+ AP_FLAG_TLEN_SECT_CNT,
+ /*features*/0,
+ /*sector_count*/dxfer_len,
+ /*lba*/0,
+ /*command*/ATA_ATA_IDENTIFY,
+ /*device*/ 0,
+ /*icc*/ 0,
+ /*auxiliary*/ 0,
+ /*control*/0,
+ data_ptr,
+ dxfer_len,
+ /*cdb_storage*/ NULL,
+ /*cdb_storage_len*/ 0,
+ /*minimum_cmd_size*/ 0,
+ sense_len,
+ timeout);
}
void
@@ -8179,6 +8196,248 @@ scsi_ata_trim(struct ccb_scsiio *csio, u_int32_t retries,
timeout);
}
+int
+scsi_ata_read_log(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, uint32_t log_address,
+ uint32_t page_number, uint16_t block_count,
+ uint8_t protocol, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t sense_len, uint32_t timeout)
+{
+ uint8_t command, protocol_out;
+ uint16_t count_out;
+ uint64_t lba;
+ int retval;
+
+ retval = 0;
+
+ switch (protocol) {
+ case AP_PROTO_DMA:
+ count_out = block_count;
+ command = ATA_READ_LOG_DMA_EXT;
+ protocol_out = AP_PROTO_DMA;
+ break;
+ case AP_PROTO_PIO_IN:
+ default:
+ count_out = block_count;
+ command = ATA_READ_LOG_EXT;
+ protocol_out = AP_PROTO_PIO_IN;
+ break;
+ }
+
+ lba = (((uint64_t)page_number & 0xff00) << 32) |
+ ((page_number & 0x00ff) << 8) |
+ (log_address & 0xff);
+
+ protocol_out |= AP_EXTEND;
+
+ retval = scsi_ata_pass(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_IN,
+ tag_action,
+ /*protocol*/ protocol_out,
+ /*ata_flags*/AP_FLAG_TLEN_SECT_CNT |
+ AP_FLAG_BYT_BLOK_BLOCKS |
+ AP_FLAG_TDIR_FROM_DEV,
+ /*feature*/ 0,
+ /*sector_count*/ count_out,
+ /*lba*/ lba,
+ /*command*/ command,
+ /*device*/ 0,
+ /*icc*/ 0,
+ /*auxiliary*/ 0,
+ /*control*/0,
+ data_ptr,
+ dxfer_len,
+ /*cdb_storage*/ NULL,
+ /*cdb_storage_len*/ 0,
+ /*minimum_cmd_size*/ 0,
+ sense_len,
+ timeout);
+
+ return (retval);
+}
+
+/*
+ * Note! This is an unusual CDB building function because it can return
+ * an error in the event that the command in question requires a variable
+ * length CDB, but the caller has not given storage space for one or has not
+ * given enough storage space. If there is enough space available in the
+ * standard SCSI CCB CDB bytes, we'll prefer that over passed in storage.
+ */
+int
+scsi_ata_pass(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint32_t flags, uint8_t tag_action,
+ uint8_t protocol, uint8_t ata_flags, uint16_t features,
+ uint16_t sector_count, uint64_t lba, uint8_t command,
+ uint8_t device, uint8_t icc, uint32_t auxiliary,
+ uint8_t control, u_int8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t *cdb_storage, size_t cdb_storage_len,
+ int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout)
+{
+ uint32_t cam_flags;
+ uint8_t *cdb_ptr;
+ int cmd_size;
+ int retval;
+ uint8_t cdb_len;
+
+ retval = 0;
+ cam_flags = flags;
+
+ /*
+ * Round the user's request to the nearest command size that is at
+ * least as big as what he requested.
+ */
+ if (minimum_cmd_size <= 12)
+ cmd_size = 12;
+ else if (minimum_cmd_size > 16)
+ cmd_size = 32;
+ else
+ cmd_size = 16;
+
+ /*
+ * If we have parameters that require a 48-bit ATA command, we have to
+ * use the 16 byte ATA PASS-THROUGH command at least.
+ */
+ if (((lba > ATA_MAX_28BIT_LBA)
+ || (sector_count > 255)
+ || (features > 255)
+ || (protocol & AP_EXTEND))
+ && ((cmd_size < 16)
+ || ((protocol & AP_EXTEND) == 0))) {
+ if (cmd_size < 16)
+ cmd_size = 16;
+ protocol |= AP_EXTEND;
+ }
+
+ /*
+ * The icc and auxiliary ATA registers are only supported in the
+ * 32-byte version of the ATA PASS-THROUGH command.
+ */
+ if ((icc != 0)
+ || (auxiliary != 0)) {
+ cmd_size = 32;
+ protocol |= AP_EXTEND;
+ }
+
+
+ if ((cmd_size > sizeof(csio->cdb_io.cdb_bytes))
+ && ((cdb_storage == NULL)
+ || (cdb_storage_len < cmd_size))) {
+ retval = 1;
+ goto bailout;
+ }
+
+ /*
+ * At this point we know we have enough space to store the command
+ * in one place or another. We prefer the built-in array, but used
+ * the passed in storage if necessary.
+ */
+ if (cmd_size <= sizeof(csio->cdb_io.cdb_bytes))
+ cdb_ptr = csio->cdb_io.cdb_bytes;
+ else {
+ cdb_ptr = cdb_storage;
+ cam_flags |= CAM_CDB_POINTER;
+ }
+
+ if (cmd_size <= 12) {
+ struct ata_pass_12 *cdb;
+
+ cdb = (struct ata_pass_12 *)cdb_ptr;
+ cdb_len = sizeof(*cdb);
+ bzero(cdb, cdb_len);
+
+ cdb->opcode = ATA_PASS_12;
+ cdb->protocol = protocol;
+ cdb->flags = ata_flags;
+ cdb->features = features;
+ cdb->sector_count = sector_count;
+ cdb->lba_low = lba & 0xff;
+ cdb->lba_mid = (lba >> 8) & 0xff;
+ cdb->lba_high = (lba >> 16) & 0xff;
+ cdb->device = ((lba >> 24) & 0xf) | ATA_DEV_LBA;
+ cdb->command = command;
+ cdb->control = control;
+ } else if (cmd_size <= 16) {
+ struct ata_pass_16 *cdb;
+
+ cdb = (struct ata_pass_16 *)cdb_ptr;
+ cdb_len = sizeof(*cdb);
+ bzero(cdb, cdb_len);
+
+ cdb->opcode = ATA_PASS_16;
+ cdb->protocol = protocol;
+ cdb->flags = ata_flags;
+ cdb->features = features & 0xff;
+ cdb->sector_count = sector_count & 0xff;
+ cdb->lba_low = lba & 0xff;
+ cdb->lba_mid = (lba >> 8) & 0xff;
+ cdb->lba_high = (lba >> 16) & 0xff;
+ /*
+ * If AP_EXTEND is set, we're sending a 48-bit command.
+ * Otherwise it's a 28-bit command.
+ */
+ if (protocol & AP_EXTEND) {
+ cdb->lba_low_ext = (lba >> 24) & 0xff;
+ cdb->lba_mid_ext = (lba >> 32) & 0xff;
+ cdb->lba_high_ext = (lba >> 40) & 0xff;
+ cdb->features_ext = (features >> 8) & 0xff;
+ cdb->sector_count_ext = (sector_count >> 8) & 0xff;
+ cdb->device = device | ATA_DEV_LBA;
+ } else {
+ cdb->lba_low_ext = (lba >> 24) & 0xf;
+ cdb->device = ((lba >> 24) & 0xf) | ATA_DEV_LBA;
+ }
+ cdb->command = command;
+ cdb->control = control;
+ } else {
+ struct ata_pass_32 *cdb;
+ uint8_t tmp_lba[8];
+
+ cdb = (struct ata_pass_32 *)cdb_ptr;
+ cdb_len = sizeof(*cdb);
+ bzero(cdb, cdb_len);
+ cdb->opcode = VARIABLE_LEN_CDB;
+ cdb->control = control;
+ cdb->length = sizeof(*cdb) - __offsetof(struct ata_pass_32,
+ service_action);
+ scsi_ulto2b(ATA_PASS_32_SA, cdb->service_action);
+ cdb->protocol = protocol;
+ cdb->flags = ata_flags;
+
+ if ((protocol & AP_EXTEND) == 0) {
+ lba &= 0x0fffffff;
+ cdb->device = ((lba >> 24) & 0xf) | ATA_DEV_LBA;
+ features &= 0xff;
+ sector_count &= 0xff;
+ } else {
+ cdb->device = device | ATA_DEV_LBA;
+ }
+ scsi_u64to8b(lba, tmp_lba);
+ bcopy(&tmp_lba[2], cdb->lba, sizeof(cdb->lba));
+ scsi_ulto2b(features, cdb->features);
+ scsi_ulto2b(sector_count, cdb->count);
+ cdb->command = command;
+ cdb->icc = icc;
+ scsi_ulto4b(auxiliary, cdb->auxiliary);
+ }
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ cam_flags,
+ tag_action,
+ data_ptr,
+ dxfer_len,
+ sense_len,
+ cmd_size,
+ timeout);
+bailout:
+ return (retval);
+}
+
void
scsi_ata_pass_16(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
diff --git a/sys/cam/scsi/scsi_all.h b/sys/cam/scsi/scsi_all.h
index 075629b..1fd4540 100644
--- a/sys/cam/scsi/scsi_all.h
+++ b/sys/cam/scsi/scsi_all.h
@@ -1414,6 +1414,7 @@ struct ata_pass_12 {
#define AP_PROTO_UDMA_OUT (0x0b << 1)
#define AP_PROTO_FPDMA (0x0c << 1)
#define AP_PROTO_RESP_INFO (0x0f << 1)
+#define AP_PROTO_MASK 0x1e
#define AP_MULTI 0xe0
u_int8_t flags;
#define AP_T_LEN 0x03
@@ -1955,6 +1956,27 @@ struct ata_pass_16 {
u_int8_t control;
};
+struct ata_pass_32 {
+ uint8_t opcode;
+ uint8_t control;
+ uint8_t reserved1[5];
+ uint8_t length;
+ uint8_t service_action[2];
+#define ATA_PASS_32_SA 0x1ff0
+ uint8_t protocol;
+ uint8_t flags;
+ uint8_t reserved2[2];
+ uint8_t lba[6];
+ uint8_t features[2];
+ uint8_t count[2];
+ uint8_t device;
+ uint8_t command;
+ uint8_t reserved3;
+ uint8_t icc;
+ uint8_t auxiliary[4];
+};
+
+
#define SC_SCSI_1 0x01
#define SC_SCSI_2 0x03
@@ -1997,6 +2019,8 @@ struct ata_pass_16 {
#define MODE_SENSE_10 0x5A
#define PERSISTENT_RES_IN 0x5E
#define PERSISTENT_RES_OUT 0x5F
+#define EXTENDED_CDB 0x7E
+#define VARIABLE_LEN_CDB 0x7F
#define EXTENDED_COPY 0x83
#define RECEIVE_COPY_STATUS 0x84
#define ATA_PASS_16 0x85
@@ -2064,6 +2088,7 @@ struct ata_pass_16 {
#define T_OCRW 0x0f
#define T_OSD 0x11
#define T_ADC 0x12
+#define T_ZBC_HM 0x14
#define T_NODEVICE 0x1f
#define T_ANY 0xff /* Used in Quirk table matches */
@@ -2712,10 +2737,17 @@ struct scsi_vpd_block_device_characteristics
uint8_t flags;
#define SVPD_VBULS 0x01
#define SVPD_FUAB 0x02
-#define SVPD_HAW_ZBC 0x10
+#define SVPD_ZBC_NR 0x00 /* Not Reported */
+#define SVPD_HAW_ZBC 0x10 /* Host Aware */
+#define SVPD_DM_ZBC 0x20 /* Drive Managed */
+#define SVPD_ZBC_MASK 0x30 /* Zoned mask */
uint8_t reserved[55];
};
+#define SBDC_IS_PRESENT(bdc, length, field) \
+ ((length >= offsetof(struct scsi_vpd_block_device_characteristics, \
+ field) + sizeof(bdc->field)) ? 1 : 0)
+
/*
* Logical Block Provisioning VPD Page based on
* T10/1799-D Revision 31
@@ -2774,6 +2806,28 @@ struct scsi_vpd_block_limits
u_int8_t max_atomic_boundary_size[4];
};
+/*
+ * Zoned Block Device Characacteristics VPD page.
+ * From ZBC-r04, dated August 12, 2015.
+ */
+struct scsi_vpd_zoned_bdc {
+ uint8_t device;
+ uint8_t page_code;
+#define SVPD_ZONED_BDC 0xB6
+ uint8_t page_length[2];
+#define SVPD_ZBDC_PL 0x3C
+ uint8_t flags;
+#define SVPD_ZBDC_URSWRZ 0x01
+ uint8_t reserved1[3];
+ uint8_t optimal_seq_zones[4];
+#define SVPD_ZBDC_OPT_SEQ_NR 0xffffffff
+ uint8_t optimal_nonseq_zones[4];
+#define SVPD_ZBDC_OPT_NONSEQ_NR 0xffffffff
+ uint8_t max_seq_req_zones[4];
+#define SVPD_ZBDC_MAX_SEQ_UNLIMITED 0xffffffff
+ uint8_t reserved2[44];
+};
+
struct scsi_read_capacity
{
u_int8_t opcode;
@@ -3345,6 +3399,29 @@ struct scsi_sense_osd_attr_id
};
/*
+ * ATA Return descriptor, used for the SCSI ATA PASS-THROUGH(12), (16) and
+ * (32) commands. Described in SAT-4r05.
+ */
+struct scsi_sense_ata_ret_desc
+{
+ uint8_t desc_type;
+#define SSD_DESC_ATA 0x09
+ uint8_t length;
+ uint8_t flags;
+#define SSD_DESC_ATA_FLAG_EXTEND 0x01
+ uint8_t error;
+ uint8_t count_15_8;
+ uint8_t count_7_0;
+ uint8_t lba_31_24;
+ uint8_t lba_7_0;
+ uint8_t lba_39_32;
+ uint8_t lba_15_8;
+ uint8_t lba_47_40;
+ uint8_t lba_23_16;
+ uint8_t device;
+ uint8_t status;
+};
+/*
* Used with Sense keys No Sense (0x00) and Not Ready (0x02).
*
* Maximum descriptors allowed: 32 (as of SPC-4)
@@ -3960,6 +4037,23 @@ void scsi_ata_trim(struct ccb_scsiio *csio, u_int32_t retries,
u_int8_t *data_ptr, u_int16_t dxfer_len,
u_int8_t sense_len, u_int32_t timeout);
+int scsi_ata_read_log(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, uint32_t log_address,
+ uint32_t page_number, uint16_t block_count,
+ uint8_t protocol, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t sense_len, uint32_t timeout);
+
+int scsi_ata_pass(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint32_t flags, uint8_t tag_action,
+ uint8_t protocol, uint8_t ata_flags, uint16_t features,
+ uint16_t sector_count, uint64_t lba, uint8_t command,
+ uint8_t device, uint8_t icc, uint32_t auxiliary,
+ uint8_t control, u_int8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t *cdb_storage, size_t cdb_storage_len,
+ int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout);
+
void scsi_ata_pass_16(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
u_int32_t flags, u_int8_t tag_action,
diff --git a/sys/cam/scsi/scsi_da.c b/sys/cam/scsi/scsi_da.c
index 40bdeef..aa05b70 100644
--- a/sys/cam/scsi/scsi_da.c
+++ b/sys/cam/scsi/scsi_da.c
@@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include <sys/cons.h>
#include <sys/endian.h>
#include <sys/proc.h>
+#include <sys/sbuf.h>
#include <geom/geom.h>
#include <geom/geom_disk.h>
#endif /* _KERNEL */
@@ -63,12 +64,20 @@ __FBSDID("$FreeBSD$");
#include <cam/cam_iosched.h>
#include <cam/scsi/scsi_message.h>
-
-#ifndef _KERNEL
#include <cam/scsi/scsi_da.h>
-#endif /* !_KERNEL */
#ifdef _KERNEL
+/*
+ * Note that there are probe ordering dependencies here. The order isn't
+ * controlled by this enumeration, but by explicit state transitions in
+ * dastart() and dadone(). Here are some of the dependencies:
+ *
+ * 1. RC should come first, before RC16, unless there is evidence that RC16
+ * is supported.
+ * 2. BDC needs to come before any of the ATA probes, or the ZONE probe.
+ * 3. The ATA probes should go in this order:
+ * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE
+ */
typedef enum {
DA_STATE_PROBE_RC,
DA_STATE_PROBE_RC16,
@@ -76,23 +85,33 @@ typedef enum {
DA_STATE_PROBE_BLK_LIMITS,
DA_STATE_PROBE_BDC,
DA_STATE_PROBE_ATA,
+ DA_STATE_PROBE_ATA_LOGDIR,
+ DA_STATE_PROBE_ATA_IDDIR,
+ DA_STATE_PROBE_ATA_SUP,
+ DA_STATE_PROBE_ATA_ZONE,
+ DA_STATE_PROBE_ZONE,
DA_STATE_NORMAL
} da_state;
typedef enum {
- DA_FLAG_PACK_INVALID = 0x001,
- DA_FLAG_NEW_PACK = 0x002,
- DA_FLAG_PACK_LOCKED = 0x004,
- DA_FLAG_PACK_REMOVABLE = 0x008,
- DA_FLAG_NEED_OTAG = 0x020,
- DA_FLAG_WAS_OTAG = 0x040,
- DA_FLAG_RETRY_UA = 0x080,
- DA_FLAG_OPEN = 0x100,
- DA_FLAG_SCTX_INIT = 0x200,
- DA_FLAG_CAN_RC16 = 0x400,
- DA_FLAG_PROBED = 0x800,
- DA_FLAG_DIRTY = 0x1000,
- DA_FLAG_ANNOUNCED = 0x2000
+ DA_FLAG_PACK_INVALID = 0x000001,
+ DA_FLAG_NEW_PACK = 0x000002,
+ DA_FLAG_PACK_LOCKED = 0x000004,
+ DA_FLAG_PACK_REMOVABLE = 0x000008,
+ DA_FLAG_NEED_OTAG = 0x000020,
+ DA_FLAG_WAS_OTAG = 0x000040,
+ DA_FLAG_RETRY_UA = 0x000080,
+ DA_FLAG_OPEN = 0x000100,
+ DA_FLAG_SCTX_INIT = 0x000200,
+ DA_FLAG_CAN_RC16 = 0x000400,
+ DA_FLAG_PROBED = 0x000800,
+ DA_FLAG_DIRTY = 0x001000,
+ DA_FLAG_ANNOUNCED = 0x002000,
+ DA_FLAG_CAN_ATA_DMA = 0x004000,
+ DA_FLAG_CAN_ATA_LOG = 0x008000,
+ DA_FLAG_CAN_ATA_IDLOG = 0x010000,
+ DA_FLAG_CAN_ATA_SUPCAP = 0x020000,
+ DA_FLAG_CAN_ATA_ZONE = 0x040000
} da_flags;
typedef enum {
@@ -103,7 +122,8 @@ typedef enum {
DA_Q_4K = 0x08,
DA_Q_NO_RC16 = 0x10,
DA_Q_NO_UNMAP = 0x20,
- DA_Q_RETRY_BUSY = 0x40
+ DA_Q_RETRY_BUSY = 0x40,
+ DA_Q_SMR_DM = 0x80
} da_quirks;
#define DA_Q_BIT_STRING \
@@ -114,7 +134,8 @@ typedef enum {
"\0044K" \
"\005NO_RC16" \
"\006NO_UNMAP" \
- "\007RETRY_BUSY"
+ "\007RETRY_BUSY" \
+ "\008SMR_DM"
typedef enum {
DA_CCB_PROBE_RC = 0x01,
@@ -127,8 +148,13 @@ typedef enum {
DA_CCB_DUMP = 0x0A,
DA_CCB_DELETE = 0x0B,
DA_CCB_TUR = 0x0C,
- DA_CCB_TYPE_MASK = 0x0F,
- DA_CCB_RETRY_UA = 0x10
+ DA_CCB_PROBE_ZONE = 0x0D,
+ DA_CCB_PROBE_ATA_LOGDIR = 0x0E,
+ DA_CCB_PROBE_ATA_IDDIR = 0x0F,
+ DA_CCB_PROBE_ATA_SUP = 0x10,
+ DA_CCB_PROBE_ATA_ZONE = 0x11,
+ DA_CCB_TYPE_MASK = 0x1F,
+ DA_CCB_RETRY_UA = 0x20
} da_ccb_state;
/*
@@ -152,6 +178,63 @@ typedef enum {
DA_DELETE_MAX = DA_DELETE_ZERO
} da_delete_methods;
+/*
+ * For SCSI, host managed drives show up as a separate device type. For
+ * ATA, host managed drives also have a different device signature.
+ * XXX KDM figure out the ATA host managed signature.
+ */
+typedef enum {
+ DA_ZONE_NONE = 0x00,
+ DA_ZONE_DRIVE_MANAGED = 0x01,
+ DA_ZONE_HOST_AWARE = 0x02,
+ DA_ZONE_HOST_MANAGED = 0x03
+} da_zone_mode;
+
+/*
+ * We distinguish between these interface cases in addition to the drive type:
+ * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC
+ * o ATA drive behind a SCSI translation layer that does not know about
+ * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this
+ * case, we would need to share the ATA code with the ada(4) driver.
+ * o SCSI drive.
+ */
+typedef enum {
+ DA_ZONE_IF_SCSI,
+ DA_ZONE_IF_ATA_PASS,
+ DA_ZONE_IF_ATA_SAT,
+} da_zone_interface;
+
+typedef enum {
+ DA_ZONE_FLAG_RZ_SUP = 0x0001,
+ DA_ZONE_FLAG_OPEN_SUP = 0x0002,
+ DA_ZONE_FLAG_CLOSE_SUP = 0x0004,
+ DA_ZONE_FLAG_FINISH_SUP = 0x0008,
+ DA_ZONE_FLAG_RWP_SUP = 0x0010,
+ DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP |
+ DA_ZONE_FLAG_OPEN_SUP |
+ DA_ZONE_FLAG_CLOSE_SUP |
+ DA_ZONE_FLAG_FINISH_SUP |
+ DA_ZONE_FLAG_RWP_SUP),
+ DA_ZONE_FLAG_URSWRZ = 0x0020,
+ DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040,
+ DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080,
+ DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100,
+ DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET |
+ DA_ZONE_FLAG_OPT_NONSEQ_SET |
+ DA_ZONE_FLAG_MAX_SEQ_SET)
+} da_zone_flags;
+
+static struct da_zone_desc {
+ da_zone_flags value;
+ const char *desc;
+} da_zone_desc_table[] = {
+ {DA_ZONE_FLAG_RZ_SUP, "Report Zones" },
+ {DA_ZONE_FLAG_OPEN_SUP, "Open" },
+ {DA_ZONE_FLAG_CLOSE_SUP, "Close" },
+ {DA_ZONE_FLAG_FINISH_SUP, "Finish" },
+ {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
+};
+
typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb,
struct bio *bp);
static da_delete_func_t da_delete_trim;
@@ -214,7 +297,17 @@ struct da_softc {
int error_inject;
int trim_max_ranges;
int delete_available; /* Delete methods possibly available */
- u_int maxio;
+ da_zone_mode zone_mode;
+ da_zone_interface zone_interface;
+ da_zone_flags zone_flags;
+ struct ata_gp_log_dir ata_logdir;
+ int valid_logdir_len;
+ struct ata_identify_log_pages ata_iddir;
+ int valid_iddir_len;
+ uint64_t optimal_seq_zones;
+ uint64_t optimal_nonseq_zones;
+ uint64_t max_seq_zones;
+ u_int maxio;
uint32_t unmap_max_ranges;
uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */
uint64_t ws_max_blks;
@@ -1188,6 +1281,15 @@ static struct da_quirk_entry da_quirk_table[] =
},
{
/*
+ * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR)
+ * Drive Managed SATA hard drive. This drive doesn't report
+ * in firmware that it is a drive managed SMR drive.
+ */
+ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS0002*", "*" },
+ /*quirks*/DA_Q_SMR_DM
+ },
+ {
+ /*
* MX-ES USB Drive by Mach Xtreme
*/
{ T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"},
@@ -1204,6 +1306,8 @@ static void dasysctlinit(void *context, int pending);
static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS);
static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
+static int dazonemodesysctl(SYSCTL_HANDLER_ARGS);
+static int dazonesupsysctl(SYSCTL_HANDLER_ARGS);
static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS);
static void dadeletemethodset(struct da_softc *softc,
da_delete_methods delete_method);
@@ -1217,6 +1321,7 @@ static periph_ctor_t daregister;
static periph_dtor_t dacleanup;
static periph_start_t dastart;
static periph_oninv_t daoninvalidate;
+static void dazonedone(struct cam_periph *periph, union ccb *ccb);
static void dadone(struct cam_periph *periph,
union ccb *done_ccb);
static int daerror(union ccb *ccb, u_int32_t cam_flags,
@@ -1447,6 +1552,14 @@ dastrategy(struct bio *bp)
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp));
/*
+ * Zone commands must be ordered, because they can depend on the
+ * effects of previously issued commands, and they may affect
+ * commands after them.
+ */
+ if (bp->bio_cmd == BIO_ZONE)
+ bp->bio_flags |= BIO_ORDERED;
+
+ /*
* Place it in the queue of disk activities for this disk
*/
cam_iosched_queue_work(softc->cam_iosched, bp);
@@ -1678,7 +1791,8 @@ daasync(void *callback_arg, u_int32_t code,
break;
if (SID_TYPE(&cgd->inq_data) != T_DIRECT
&& SID_TYPE(&cgd->inq_data) != T_RBC
- && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
+ && SID_TYPE(&cgd->inq_data) != T_OPTICAL
+ && SID_TYPE(&cgd->inq_data) != T_ZBC_HM)
break;
/*
@@ -1829,6 +1943,29 @@ dasysctlinit(void *context, int pending)
&softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
"Minimum CDB size");
+ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD,
+ softc, 0, dazonemodesysctl, "A",
+ "Zone Mode");
+ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD,
+ softc, 0, dazonesupsysctl, "A",
+ "Zone Support");
+ SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
+ SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
+ "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
+ "Optimal Number of Open Sequential Write Preferred Zones");
+ SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
+ SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
+ "optimal_nonseq_zones", CTLFLAG_RD,
+ &softc->optimal_nonseq_zones,
+ "Optimal Number of Non-Sequentially Written Sequential Write "
+ "Preferred Zones");
+ SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
+ SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
+ "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
+ "Maximum Number of Open Sequential Write Required Zones");
+
SYSCTL_ADD_INT(&softc->sysctl_ctx,
SYSCTL_CHILDREN(softc->sysctl_tree),
OID_AUTO,
@@ -2147,6 +2284,72 @@ dadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
return (0);
}
+static int
+dazonemodesysctl(SYSCTL_HANDLER_ARGS)
+{
+ char tmpbuf[40];
+ struct da_softc *softc;
+ int error;
+
+ softc = (struct da_softc *)arg1;
+
+ switch (softc->zone_mode) {
+ case DA_ZONE_DRIVE_MANAGED:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
+ break;
+ case DA_ZONE_HOST_AWARE:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
+ break;
+ case DA_ZONE_HOST_MANAGED:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
+ break;
+ case DA_ZONE_NONE:
+ default:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
+ break;
+ }
+
+ error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
+
+ return (error);
+}
+
+static int
+dazonesupsysctl(SYSCTL_HANDLER_ARGS)
+{
+ char tmpbuf[180];
+ struct da_softc *softc;
+ struct sbuf sb;
+ int error, first;
+ unsigned int i;
+
+ softc = (struct da_softc *)arg1;
+
+ error = 0;
+ first = 1;
+ sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0);
+
+ for (i = 0; i < sizeof(da_zone_desc_table) /
+ sizeof(da_zone_desc_table[0]); i++) {
+ if (softc->zone_flags & da_zone_desc_table[i].value) {
+ if (first == 0)
+ sbuf_printf(&sb, ", ");
+ else
+ first = 0;
+ sbuf_cat(&sb, da_zone_desc_table[i].desc);
+ }
+ }
+
+ if (first == 1)
+ sbuf_printf(&sb, "None");
+
+ sbuf_finish(&sb);
+
+ error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
+
+ return (error);
+}
+
static cam_status
daregister(struct cam_periph *periph, void *arg)
{
@@ -2211,6 +2414,23 @@ daregister(struct cam_periph *periph, void *arg)
if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
softc->quirks |= DA_Q_NO_6_BYTE;
+ if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM)
+ softc->zone_mode = DA_ZONE_HOST_MANAGED;
+ else if (softc->quirks & DA_Q_SMR_DM)
+ softc->zone_mode = DA_ZONE_DRIVE_MANAGED;
+ else
+ softc->zone_mode = DA_ZONE_NONE;
+
+ if (softc->zone_mode != DA_ZONE_NONE) {
+ if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
+ if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC))
+ softc->zone_interface = DA_ZONE_IF_ATA_SAT;
+ else
+ softc->zone_interface = DA_ZONE_IF_ATA_PASS;
+ } else
+ softc->zone_interface = DA_ZONE_IF_SCSI;
+ }
+
TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
/*
@@ -2292,7 +2512,7 @@ daregister(struct cam_periph *periph, void *arg)
softc->maxio = cpi.maxio;
softc->disk->d_maxsize = softc->maxio;
softc->disk->d_unit = periph->unit_number;
- softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION;
+ softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0)
softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
if ((cpi.hba_misc & PIM_UNMAPPED) != 0) {
@@ -2360,6 +2580,300 @@ daregister(struct cam_periph *periph, void *arg)
return(CAM_REQ_CMP);
}
+static int
+da_zone_bio_to_scsi(int disk_zone_cmd)
+{
+ switch (disk_zone_cmd) {
+ case DISK_ZONE_OPEN:
+ return ZBC_OUT_SA_OPEN;
+ case DISK_ZONE_CLOSE:
+ return ZBC_OUT_SA_CLOSE;
+ case DISK_ZONE_FINISH:
+ return ZBC_OUT_SA_FINISH;
+ case DISK_ZONE_RWP:
+ return ZBC_OUT_SA_RWP;
+ }
+
+ return -1;
+}
+
+static int
+da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
+ int *queue_ccb)
+{
+ struct da_softc *softc;
+ int error;
+
+ error = 0;
+
+ if (bp->bio_cmd != BIO_ZONE) {
+ error = EINVAL;
+ goto bailout;
+ }
+
+ softc = periph->softc;
+
+ switch (bp->bio_zone.zone_cmd) {
+ case DISK_ZONE_OPEN:
+ case DISK_ZONE_CLOSE:
+ case DISK_ZONE_FINISH:
+ case DISK_ZONE_RWP: {
+ int zone_flags;
+ int zone_sa;
+ uint64_t lba;
+
+ zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd);
+ if (zone_sa == -1) {
+ xpt_print(periph->path, "Cannot translate zone "
+ "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd);
+ error = EINVAL;
+ goto bailout;
+ }
+
+ zone_flags = 0;
+ lba = bp->bio_zone.zone_params.rwp.id;
+
+ if (bp->bio_zone.zone_params.rwp.flags &
+ DISK_ZONE_RWP_FLAG_ALL)
+ zone_flags |= ZBC_OUT_ALL;
+
+ if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
+ scsi_zbc_out(&ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbfcnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*service_action*/ zone_sa,
+ /*zone_id*/ lba,
+ /*zone_flags*/ zone_flags,
+ /*data_ptr*/ NULL,
+ /*dxfer_len*/ 0,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+ } else {
+ /*
+ * Note that in this case, even though we can
+ * technically use NCQ, we don't bother for several
+ * reasons:
+ * 1. It hasn't been tested on a SAT layer that
+ * supports it. This is new as of SAT-4.
+ * 2. Even when there is a SAT layer that supports
+ * it, that SAT layer will also probably support
+ * ZBC -> ZAC translation, since they are both
+ * in the SAT-4 spec.
+ * 3. Translation will likely be preferable to ATA
+ * passthrough. LSI / Avago at least single
+ * steps ATA passthrough commands in the HBA,
+ * regardless of protocol, so unless that
+ * changes, there is a performance penalty for
+ * doing ATA passthrough no matter whether
+ * you're using NCQ/FPDMA, DMA or PIO.
+ * 4. It requires a 32-byte CDB, which at least at
+ * this point in CAM requires a CDB pointer, which
+ * would require us to allocate an additional bit
+ * of storage separate from the CCB.
+ */
+ error = scsi_ata_zac_mgmt_out(&ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbfcnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*use_ncq*/ 0,
+ /*zm_action*/ zone_sa,
+ /*zone_id*/ lba,
+ /*zone_flags*/ zone_flags,
+ /*data_ptr*/ NULL,
+ /*dxfer_len*/ 0,
+ /*cdb_storage*/ NULL,
+ /*cdb_storage_len*/ 0,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+ if (error != 0) {
+ error = EINVAL;
+ xpt_print(periph->path,
+ "scsi_ata_zac_mgmt_out() returned an "
+ "error!");
+ goto bailout;
+ }
+ }
+ *queue_ccb = 1;
+
+ break;
+ }
+ case DISK_ZONE_REPORT_ZONES: {
+ uint8_t *rz_ptr;
+ uint32_t num_entries, alloc_size;
+ struct disk_zone_report *rep;
+
+ rep = &bp->bio_zone.zone_params.report;
+
+ num_entries = rep->entries_allocated;
+ if (num_entries == 0) {
+ xpt_print(periph->path, "No entries allocated for "
+ "Report Zones request\n");
+ error = EINVAL;
+ goto bailout;
+ }
+ alloc_size = sizeof(struct scsi_report_zones_hdr) +
+ (sizeof(struct scsi_report_zones_desc) * num_entries);
+ alloc_size = min(alloc_size, softc->disk->d_maxsize);
+ rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO);
+ if (rz_ptr == NULL) {
+ xpt_print(periph->path, "Unable to allocate memory "
+ "for Report Zones request\n");
+ error = ENOMEM;
+ goto bailout;
+ }
+
+ if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
+ scsi_zbc_in(&ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbcfnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*service_action*/ ZBC_IN_SA_REPORT_ZONES,
+ /*zone_start_lba*/ rep->starting_id,
+ /*zone_options*/ rep->rep_options,
+ /*data_ptr*/ rz_ptr,
+ /*dxfer_len*/ alloc_size,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+ } else {
+ /*
+ * Note that in this case, even though we can
+ * technically use NCQ, we don't bother for several
+ * reasons:
+ * 1. It hasn't been tested on a SAT layer that
+ * supports it. This is new as of SAT-4.
+ * 2. Even when there is a SAT layer that supports
+ * it, that SAT layer will also probably support
+ * ZBC -> ZAC translation, since they are both
+ * in the SAT-4 spec.
+ * 3. Translation will likely be preferable to ATA
+ * passthrough. LSI / Avago at least single
+ * steps ATA passthrough commands in the HBA,
+ * regardless of protocol, so unless that
+ * changes, there is a performance penalty for
+ * doing ATA passthrough no matter whether
+ * you're using NCQ/FPDMA, DMA or PIO.
+ * 4. It requires a 32-byte CDB, which at least at
+ * this point in CAM requires a CDB pointer, which
+ * would require us to allocate an additional bit
+ * of storage separate from the CCB.
+ */
+ error = scsi_ata_zac_mgmt_in(&ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbcfnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*use_ncq*/ 0,
+ /*zm_action*/ ATA_ZM_REPORT_ZONES,
+ /*zone_id*/ rep->starting_id,
+ /*zone_flags*/ rep->rep_options,
+ /*data_ptr*/ rz_ptr,
+ /*dxfer_len*/ alloc_size,
+ /*cdb_storage*/ NULL,
+ /*cdb_storage_len*/ 0,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+ if (error != 0) {
+ error = EINVAL;
+ xpt_print(periph->path,
+ "scsi_ata_zac_mgmt_in() returned an "
+ "error!");
+ goto bailout;
+ }
+ }
+
+ /*
+ * For BIO_ZONE, this isn't normally needed. However, it
+ * is used by devstat_end_transaction_bio() to determine
+ * how much data was transferred.
+ */
+ /*
+ * XXX KDM we have a problem. But I'm not sure how to fix
+ * it. devstat uses bio_bcount - bio_resid to calculate
+ * the amount of data transferred. The GEOM disk code
+ * uses bio_length - bio_resid to calculate the amount of
+ * data in bio_completed. We have different structure
+ * sizes above and below the ada(4) driver. So, if we
+ * use the sizes above, the amount transferred won't be
+ * quite accurate for devstat. If we use different sizes
+ * for bio_bcount and bio_length (above and below
+ * respectively), then the residual needs to match one or
+ * the other. Everything is calculated after the bio
+ * leaves the driver, so changing the values around isn't
+ * really an option. For now, just set the count to the
+ * passed in length. This means that the calculations
+ * above (e.g. bio_completed) will be correct, but the
+ * amount of data reported to devstat will be slightly
+ * under or overstated.
+ */
+ bp->bio_bcount = bp->bio_length;
+
+ *queue_ccb = 1;
+
+ break;
+ }
+ case DISK_ZONE_GET_PARAMS: {
+ struct disk_zone_disk_params *params;
+
+ params = &bp->bio_zone.zone_params.disk_params;
+ bzero(params, sizeof(*params));
+
+ switch (softc->zone_mode) {
+ case DA_ZONE_DRIVE_MANAGED:
+ params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
+ break;
+ case DA_ZONE_HOST_AWARE:
+ params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
+ break;
+ case DA_ZONE_HOST_MANAGED:
+ params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
+ break;
+ default:
+ case DA_ZONE_NONE:
+ params->zone_mode = DISK_ZONE_MODE_NONE;
+ break;
+ }
+
+ if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ)
+ params->flags |= DISK_ZONE_DISK_URSWRZ;
+
+ if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) {
+ params->optimal_seq_zones = softc->optimal_seq_zones;
+ params->flags |= DISK_ZONE_OPT_SEQ_SET;
+ }
+
+ if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) {
+ params->optimal_nonseq_zones =
+ softc->optimal_nonseq_zones;
+ params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
+ }
+
+ if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) {
+ params->max_seq_zones = softc->max_seq_zones;
+ params->flags |= DISK_ZONE_MAX_SEQ_SET;
+ }
+ if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP)
+ params->flags |= DISK_ZONE_RZ_SUP;
+
+ if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP)
+ params->flags |= DISK_ZONE_OPEN_SUP;
+
+ if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP)
+ params->flags |= DISK_ZONE_CLOSE_SUP;
+
+ if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP)
+ params->flags |= DISK_ZONE_FINISH_SUP;
+
+ if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP)
+ params->flags |= DISK_ZONE_RWP_SUP;
+ break;
+ }
+ default:
+ break;
+ }
+bailout:
+ return (error);
+}
+
static void
dastart(struct cam_periph *periph, union ccb *start_ccb)
{
@@ -2473,6 +2987,20 @@ more:
SSD_FULL_SIZE,
da_default_timeout*1000);
break;
+ case BIO_ZONE: {
+ int error, queue_ccb;
+
+ queue_ccb = 0;
+
+ error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb);
+ if ((error != 0)
+ || (queue_ccb == 0)) {
+ biofinish(bp, NULL, error);
+ xpt_release_ccb(start_ccb);
+ return;
+ }
+ break;
+ }
}
start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
start_ccb->ccb_h.flags |= CAM_UNLOCKED;
@@ -2663,15 +3191,28 @@ out:
struct ata_params *ata_params;
if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
+ if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
+ || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
+ /*
+ * Note that if the ATA VPD page isn't
+ * supported, we aren't talking to an ATA
+ * device anyway. Support for that VPD
+ * page is mandatory for SCSI to ATA (SAT)
+ * translation layers.
+ */
+ softc->state = DA_STATE_PROBE_ZONE;
+ goto skipstate;
+ }
daprobedone(periph, start_ccb);
break;
}
ata_params = (struct ata_params*)
- malloc(sizeof(*ata_params), M_SCSIDA, M_NOWAIT|M_ZERO);
+ malloc(sizeof(*ata_params), M_SCSIDA,M_NOWAIT|M_ZERO);
if (ata_params == NULL) {
- printf("dastart: Couldn't malloc ata_params data\n");
+ xpt_print(periph->path, "Couldn't malloc ata_params "
+ "data\n");
/* da_free_periph??? */
break;
}
@@ -2689,6 +3230,252 @@ out:
xpt_action(start_ccb);
break;
}
+ case DA_STATE_PROBE_ATA_LOGDIR:
+ {
+ struct ata_gp_log_dir *log_dir;
+ int retval;
+
+ retval = 0;
+
+ if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) {
+ /*
+ * If we don't have log support, not much point in
+ * trying to probe zone support.
+ */
+ daprobedone(periph, start_ccb);
+ break;
+ }
+
+ /*
+ * If we have an ATA device (the SCSI ATA Information VPD
+ * page should be present and the ATA identify should have
+ * succeeded) and it supports logs, ask for the log directory.
+ */
+
+ log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO);
+ if (log_dir == NULL) {
+ xpt_print(periph->path, "Couldn't malloc log_dir "
+ "data\n");
+ daprobedone(periph, start_ccb);
+ break;
+ }
+
+ retval = scsi_ata_read_log(&start_ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbfcnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*log_address*/ ATA_LOG_DIRECTORY,
+ /*page_number*/ 0,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
+ AP_PROTO_DMA : AP_PROTO_PIO_IN,
+ /*data_ptr*/ (uint8_t *)log_dir,
+ /*dxfer_len*/ sizeof(*log_dir),
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+
+ if (retval != 0) {
+ xpt_print(periph->path, "scsi_ata_read_log() failed!");
+ free(log_dir, M_SCSIDA);
+ daprobedone(periph, start_ccb);
+ break;
+ }
+ start_ccb->ccb_h.ccb_bp = NULL;
+ start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR;
+ xpt_action(start_ccb);
+ break;
+ }
+ case DA_STATE_PROBE_ATA_IDDIR:
+ {
+ struct ata_identify_log_pages *id_dir;
+ int retval;
+
+ retval = 0;
+
+ /*
+ * Check here to see whether the Identify Device log is
+ * supported in the directory of logs. If so, continue
+ * with requesting the log of identify device pages.
+ */
+ if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) {
+ daprobedone(periph, start_ccb);
+ break;
+ }
+
+ id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO);
+ if (id_dir == NULL) {
+ xpt_print(periph->path, "Couldn't malloc id_dir "
+ "data\n");
+ daprobedone(periph, start_ccb);
+ break;
+ }
+
+ retval = scsi_ata_read_log(&start_ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbfcnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*log_address*/ ATA_IDENTIFY_DATA_LOG,
+ /*page_number*/ ATA_IDL_PAGE_LIST,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
+ AP_PROTO_DMA : AP_PROTO_PIO_IN,
+ /*data_ptr*/ (uint8_t *)id_dir,
+ /*dxfer_len*/ sizeof(*id_dir),
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+
+ if (retval != 0) {
+ xpt_print(periph->path, "scsi_ata_read_log() failed!");
+ free(id_dir, M_SCSIDA);
+ daprobedone(periph, start_ccb);
+ break;
+ }
+ start_ccb->ccb_h.ccb_bp = NULL;
+ start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR;
+ xpt_action(start_ccb);
+ break;
+ }
+ case DA_STATE_PROBE_ATA_SUP:
+ {
+ struct ata_identify_log_sup_cap *sup_cap;
+ int retval;
+
+ retval = 0;
+
+ /*
+ * Check here to see whether the Supported Capabilities log
+ * is in the list of Identify Device logs.
+ */
+ if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) {
+ daprobedone(periph, start_ccb);
+ break;
+ }
+
+ sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO);
+ if (sup_cap == NULL) {
+ xpt_print(periph->path, "Couldn't malloc sup_cap "
+ "data\n");
+ daprobedone(periph, start_ccb);
+ break;
+ }
+
+ retval = scsi_ata_read_log(&start_ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbfcnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*log_address*/ ATA_IDENTIFY_DATA_LOG,
+ /*page_number*/ ATA_IDL_SUP_CAP,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
+ AP_PROTO_DMA : AP_PROTO_PIO_IN,
+ /*data_ptr*/ (uint8_t *)sup_cap,
+ /*dxfer_len*/ sizeof(*sup_cap),
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+
+ if (retval != 0) {
+ xpt_print(periph->path, "scsi_ata_read_log() failed!");
+ free(sup_cap, M_SCSIDA);
+ daprobedone(periph, start_ccb);
+ break;
+
+ }
+
+ start_ccb->ccb_h.ccb_bp = NULL;
+ start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP;
+ xpt_action(start_ccb);
+ break;
+ }
+ case DA_STATE_PROBE_ATA_ZONE:
+ {
+ struct ata_zoned_info_log *ata_zone;
+ int retval;
+
+ retval = 0;
+
+ /*
+ * Check here to see whether the zoned device information
+ * page is supported. If so, continue on to request it.
+ * If not, skip to DA_STATE_PROBE_LOG or done.
+ */
+ if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) {
+ daprobedone(periph, start_ccb);
+ break;
+ }
+ ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA,
+ M_NOWAIT|M_ZERO);
+ if (ata_zone == NULL) {
+ xpt_print(periph->path, "Couldn't malloc ata_zone "
+ "data\n");
+ daprobedone(periph, start_ccb);
+ break;
+ }
+
+ retval = scsi_ata_read_log(&start_ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbfcnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*log_address*/ ATA_IDENTIFY_DATA_LOG,
+ /*page_number*/ ATA_IDL_ZDI,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
+ AP_PROTO_DMA : AP_PROTO_PIO_IN,
+ /*data_ptr*/ (uint8_t *)ata_zone,
+ /*dxfer_len*/ sizeof(*ata_zone),
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+
+ if (retval != 0) {
+ xpt_print(periph->path, "scsi_ata_read_log() failed!");
+ free(ata_zone, M_SCSIDA);
+ daprobedone(periph, start_ccb);
+ break;
+ }
+ start_ccb->ccb_h.ccb_bp = NULL;
+ start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE;
+ xpt_action(start_ccb);
+
+ break;
+ }
+ case DA_STATE_PROBE_ZONE:
+ {
+ struct scsi_vpd_zoned_bdc *bdc;
+
+ /*
+ * Note that this page will be supported for SCSI protocol
+ * devices that support ZBC (SMR devices), as well as ATA
+ * protocol devices that are behind a SAT (SCSI to ATA
+ * Translation) layer that supports converting ZBC commands
+ * to their ZAC equivalents.
+ */
+ if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) {
+ daprobedone(periph, start_ccb);
+ break;
+ }
+ bdc = (struct scsi_vpd_zoned_bdc *)
+ malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
+
+ if (bdc == NULL) {
+ xpt_release_ccb(start_ccb);
+ xpt_print(periph->path, "Couldn't malloc zone VPD "
+ "data\n");
+ break;
+ }
+ scsi_inquiry(&start_ccb->csio,
+ /*retries*/da_retry_count,
+ /*cbfcnp*/dadone,
+ /*tag_action*/MSG_SIMPLE_Q_TAG,
+ /*inq_buf*/(u_int8_t *)bdc,
+ /*inq_len*/sizeof(*bdc),
+ /*evpd*/TRUE,
+ /*page_code*/SVPD_ZONED_BDC,
+ /*sense_len*/SSD_FULL_SIZE,
+ /*timeout*/da_default_timeout * 1000);
+ start_ccb->ccb_h.ccb_bp = NULL;
+ start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE;
+ xpt_action(start_ccb);
+ break;
+ }
}
}
@@ -3053,6 +3840,153 @@ cmd6workaround(union ccb *ccb)
}
static void
+dazonedone(struct cam_periph *periph, union ccb *ccb)
+{
+ struct da_softc *softc;
+ struct bio *bp;
+
+ softc = periph->softc;
+ bp = (struct bio *)ccb->ccb_h.ccb_bp;
+
+ switch (bp->bio_zone.zone_cmd) {
+ case DISK_ZONE_OPEN:
+ case DISK_ZONE_CLOSE:
+ case DISK_ZONE_FINISH:
+ case DISK_ZONE_RWP:
+ break;
+ case DISK_ZONE_REPORT_ZONES: {
+ uint32_t avail_len;
+ struct disk_zone_report *rep;
+ struct scsi_report_zones_hdr *hdr;
+ struct scsi_report_zones_desc *desc;
+ struct disk_zone_rep_entry *entry;
+ uint32_t num_alloced, hdr_len, num_avail;
+ uint32_t num_to_fill, i;
+ int ata;
+
+ rep = &bp->bio_zone.zone_params.report;
+ avail_len = ccb->csio.dxfer_len - ccb->csio.resid;
+ /*
+ * Note that bio_resid isn't normally used for zone
+ * commands, but it is used by devstat_end_transaction_bio()
+ * to determine how much data was transferred. Because
+ * the size of the SCSI/ATA data structures is different
+ * than the size of the BIO interface structures, the
+ * amount of data actually transferred from the drive will
+ * be different than the amount of data transferred to
+ * the user.
+ */
+ bp->bio_resid = ccb->csio.resid;
+ num_alloced = rep->entries_allocated;
+ hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr;
+ if (avail_len < sizeof(*hdr)) {
+ /*
+ * Is there a better error than EIO here? We asked
+ * for at least the header, and we got less than
+ * that.
+ */
+ bp->bio_error = EIO;
+ bp->bio_flags |= BIO_ERROR;
+ bp->bio_resid = bp->bio_bcount;
+ break;
+ }
+
+ if (softc->zone_interface == DA_ZONE_IF_ATA_PASS)
+ ata = 1;
+ else
+ ata = 0;
+
+ hdr_len = ata ? le32dec(hdr->length) :
+ scsi_4btoul(hdr->length);
+ if (hdr_len > 0)
+ rep->entries_available = hdr_len / sizeof(*desc);
+ else
+ rep->entries_available = 0;
+ /*
+ * NOTE: using the same values for the BIO version of the
+ * same field as the SCSI/ATA values. This means we could
+ * get some additional values that aren't defined in bio.h
+ * if more values of the same field are defined later.
+ */
+ rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
+ rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) :
+ scsi_8btou64(hdr->maximum_lba);
+ /*
+ * If the drive reports no entries that match the query,
+ * we're done.
+ */
+ if (hdr_len == 0) {
+ rep->entries_filled = 0;
+ break;
+ }
+
+ num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
+ hdr_len / sizeof(*desc));
+ /*
+ * If the drive didn't return any data, then we're done.
+ */
+ if (num_avail == 0) {
+ rep->entries_filled = 0;
+ break;
+ }
+
+ num_to_fill = min(num_avail, rep->entries_allocated);
+ /*
+ * If the user didn't allocate any entries for us to fill,
+ * we're done.
+ */
+ if (num_to_fill == 0) {
+ rep->entries_filled = 0;
+ break;
+ }
+
+ for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
+ i < num_to_fill; i++, desc++, entry++) {
+ /*
+ * NOTE: we're mapping the values here directly
+ * from the SCSI/ATA bit definitions to the bio.h
+ * definitons. There is also a warning in
+ * disk_zone.h, but the impact is that if
+ * additional values are added in the SCSI/ATA
+ * specs these will be visible to consumers of
+ * this interface.
+ */
+ entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
+ entry->zone_condition =
+ (desc->zone_flags & SRZ_ZONE_COND_MASK) >>
+ SRZ_ZONE_COND_SHIFT;
+ entry->zone_flags |= desc->zone_flags &
+ (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET);
+ entry->zone_length =
+ ata ? le64dec(desc->zone_length) :
+ scsi_8btou64(desc->zone_length);
+ entry->zone_start_lba =
+ ata ? le64dec(desc->zone_start_lba) :
+ scsi_8btou64(desc->zone_start_lba);
+ entry->write_pointer_lba =
+ ata ? le64dec(desc->write_pointer_lba) :
+ scsi_8btou64(desc->write_pointer_lba);
+ }
+ rep->entries_filled = num_to_fill;
+ break;
+ }
+ case DISK_ZONE_GET_PARAMS:
+ default:
+ /*
+ * In theory we should not get a GET_PARAMS bio, since it
+ * should be handled without queueing the command to the
+ * drive.
+ */
+ panic("%s: Invalid zone command %d", __func__,
+ bp->bio_zone.zone_cmd);
+ break;
+ }
+
+ if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
+ free(ccb->csio.data_ptr, M_SCSIDA);
+}
+
+static void
dadone(struct cam_periph *periph, union ccb *done_ccb)
{
struct da_softc *softc;
@@ -3147,11 +4081,14 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
} else if (bp != NULL) {
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
panic("REQ_CMP with QFRZN");
- if (state == DA_CCB_DELETE)
+ if (bp->bio_cmd == BIO_ZONE)
+ dazonedone(periph, done_ccb);
+ else if (state == DA_CCB_DELETE)
bp->bio_resid = 0;
else
bp->bio_resid = csio->resid;
- if (csio->resid > 0)
+ if ((csio->resid > 0)
+ && (bp->bio_cmd != BIO_ZONE))
bp->bio_flags |= BIO_ERROR;
if (softc->error_inject != 0) {
bp->bio_error = softc->error_inject;
@@ -3569,27 +4506,69 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
}
case DA_CCB_PROBE_BDC:
{
- struct scsi_vpd_block_characteristics *bdc;
+ struct scsi_vpd_block_device_characteristics *bdc;
- bdc = (struct scsi_vpd_block_characteristics *)csio->data_ptr;
+ bdc = (struct scsi_vpd_block_device_characteristics *)
+ csio->data_ptr;
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ uint32_t valid_len;
+
/*
* Disable queue sorting for non-rotational media
* by default.
*/
u_int16_t old_rate = softc->disk->d_rotation_rate;
- softc->disk->d_rotation_rate =
- scsi_2btoul(bdc->medium_rotation_rate);
- if (softc->disk->d_rotation_rate ==
- SVPD_BDC_RATE_NON_ROTATING) {
- cam_iosched_set_sort_queue(softc->cam_iosched, 0);
- softc->rotating = 0;
+ valid_len = csio->dxfer_len - csio->resid;
+ if (SBDC_IS_PRESENT(bdc, valid_len,
+ medium_rotation_rate)) {
+ softc->disk->d_rotation_rate =
+ scsi_2btoul(bdc->medium_rotation_rate);
+ if (softc->disk->d_rotation_rate ==
+ SVPD_BDC_RATE_NON_ROTATING) {
+ cam_iosched_set_sort_queue(
+ softc->cam_iosched, 0);
+ softc->rotating = 0;
+ }
+ if (softc->disk->d_rotation_rate != old_rate) {
+ disk_attr_changed(softc->disk,
+ "GEOM::rotation_rate", M_NOWAIT);
+ }
}
- if (softc->disk->d_rotation_rate != old_rate) {
- disk_attr_changed(softc->disk,
- "GEOM::rotation_rate", M_NOWAIT);
+ if ((SBDC_IS_PRESENT(bdc, valid_len, flags))
+ && (softc->zone_mode == DA_ZONE_NONE)) {
+ int ata_proto;
+
+ if (scsi_vpd_supported_page(periph,
+ SVPD_ATA_INFORMATION))
+ ata_proto = 1;
+ else
+ ata_proto = 0;
+
+ /*
+ * The Zoned field will only be set for
+ * Drive Managed and Host Aware drives. If
+ * they are Host Managed, the device type
+ * in the standard INQUIRY data should be
+ * set to T_ZBC_HM (0x14).
+ */
+ if ((bdc->flags & SVPD_ZBC_MASK) ==
+ SVPD_HAW_ZBC) {
+ softc->zone_mode = DA_ZONE_HOST_AWARE;
+ softc->zone_interface = (ata_proto) ?
+ DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
+ } else if ((bdc->flags & SVPD_ZBC_MASK) ==
+ SVPD_DM_ZBC) {
+ softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
+ softc->zone_interface = (ata_proto) ?
+ DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
+ } else if ((bdc->flags & SVPD_ZBC_MASK) !=
+ SVPD_ZBC_NR) {
+ xpt_print(periph->path, "Unknown zoned "
+ "type %#x",
+ bdc->flags & SVPD_ZBC_MASK);
+ }
}
} else {
int error;
@@ -3619,10 +4598,14 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
{
int i;
struct ata_params *ata_params;
+ int continue_probe;
+ int error;
int16_t *ptr;
ata_params = (struct ata_params *)csio->data_ptr;
ptr = (uint16_t *)ata_params;
+ continue_probe = 0;
+ error = 0;
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
uint16_t old_rate;
@@ -3654,14 +4637,59 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
disk_attr_changed(softc->disk,
"GEOM::rotation_rate", M_NOWAIT);
}
+
+ if (ata_params->capabilities1 & ATA_SUPPORT_DMA)
+ softc->flags |= DA_FLAG_CAN_ATA_DMA;
+
+ if (ata_params->support.extension &
+ ATA_SUPPORT_GENLOG)
+ softc->flags |= DA_FLAG_CAN_ATA_LOG;
+
+ /*
+ * At this point, if we have a SATA host aware drive,
+ * we communicate via ATA passthrough unless the
+ * SAT layer supports ZBC -> ZAC translation. In
+ * that case,
+ */
+ /*
+ * XXX KDM figure out how to detect a host managed
+ * SATA drive.
+ */
+ if (softc->zone_mode == DA_ZONE_NONE) {
+ /*
+ * Note that we don't override the zone
+ * mode or interface if it has already been
+ * set. This is because it has either been
+ * set as a quirk, or when we probed the
+ * SCSI Block Device Characteristics page,
+ * the zoned field was set. The latter
+ * means that the SAT layer supports ZBC to
+ * ZAC translation, and we would prefer to
+ * use that if it is available.
+ */
+ if ((ata_params->support3 &
+ ATA_SUPPORT_ZONE_MASK) ==
+ ATA_SUPPORT_ZONE_HOST_AWARE) {
+ softc->zone_mode = DA_ZONE_HOST_AWARE;
+ softc->zone_interface =
+ DA_ZONE_IF_ATA_PASS;
+ } else if ((ata_params->support3 &
+ ATA_SUPPORT_ZONE_MASK) ==
+ ATA_SUPPORT_ZONE_DEV_MANAGED) {
+ softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
+ softc->zone_interface =
+ DA_ZONE_IF_ATA_PASS;
+ }
+ }
+
} else {
- int error;
error = daerror(done_ccb, CAM_RETRY_SELTO,
SF_RETRY_UA|SF_NO_PRINT);
if (error == ERESTART)
return;
else if (error != 0) {
- if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
/* Don't wedge this device's queue */
cam_release_devq(done_ccb->ccb_h.path,
/*relsim_flags*/0,
@@ -3673,6 +4701,454 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
}
free(ata_params, M_SCSIDA);
+ if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
+ || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
+ /*
+ * If the ATA IDENTIFY failed, we could be talking
+ * to a SCSI drive, although that seems unlikely,
+ * since the drive did report that it supported the
+ * ATA Information VPD page. If the ATA IDENTIFY
+ * succeeded, and the SAT layer doesn't support
+ * ZBC -> ZAC translation, continue on to get the
+ * directory of ATA logs, and complete the rest of
+ * the ZAC probe. If the SAT layer does support
+ * ZBC -> ZAC translation, we want to use that,
+ * and we'll probe the SCSI Zoned Block Device
+ * Characteristics VPD page next.
+ */
+ if ((error == 0)
+ && (softc->flags & DA_FLAG_CAN_ATA_LOG)
+ && (softc->zone_interface == DA_ZONE_IF_ATA_PASS))
+ softc->state = DA_STATE_PROBE_ATA_LOGDIR;
+ else
+ softc->state = DA_STATE_PROBE_ZONE;
+ continue_probe = 1;
+ }
+ if (continue_probe != 0) {
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ } else
+ daprobedone(periph, done_ccb);
+ return;
+ }
+ case DA_CCB_PROBE_ATA_LOGDIR:
+ {
+ int error;
+
+ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ error = 0;
+ softc->valid_logdir_len = 0;
+ bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
+ softc->valid_logdir_len =
+ csio->dxfer_len - csio->resid;
+ if (softc->valid_logdir_len > 0)
+ bcopy(csio->data_ptr, &softc->ata_logdir,
+ min(softc->valid_logdir_len,
+ sizeof(softc->ata_logdir)));
+ /*
+ * Figure out whether the Identify Device log is
+ * supported. The General Purpose log directory
+ * has a header, and lists the number of pages
+ * available for each GP log identified by the
+ * offset into the list.
+ */
+ if ((softc->valid_logdir_len >=
+ ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
+ && (le16dec(softc->ata_logdir.header) ==
+ ATA_GP_LOG_DIR_VERSION)
+ && (le16dec(&softc->ata_logdir.num_pages[
+ (ATA_IDENTIFY_DATA_LOG *
+ sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
+ softc->flags |= DA_FLAG_CAN_ATA_IDLOG;
+ } else {
+ softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
+ }
+ } else {
+ error = daerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ /*
+ * If we can't get the ATA log directory,
+ * then ATA logs are effectively not
+ * supported even if the bit is set in the
+ * identify data.
+ */
+ softc->flags &= ~(DA_FLAG_CAN_ATA_LOG |
+ DA_FLAG_CAN_ATA_IDLOG);
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+ }
+
+ free(csio->data_ptr, M_SCSIDA);
+
+ if ((error == 0)
+ && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) {
+ softc->state = DA_STATE_PROBE_ATA_IDDIR;
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ }
+ daprobedone(periph, done_ccb);
+ return;
+ }
+ case DA_CCB_PROBE_ATA_IDDIR:
+ {
+ int error;
+
+ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ off_t entries_offset, max_entries;
+ error = 0;
+
+ softc->valid_iddir_len = 0;
+ bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
+ softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP |
+ DA_FLAG_CAN_ATA_ZONE);
+ softc->valid_iddir_len =
+ csio->dxfer_len - csio->resid;
+ if (softc->valid_iddir_len > 0)
+ bcopy(csio->data_ptr, &softc->ata_iddir,
+ min(softc->valid_iddir_len,
+ sizeof(softc->ata_iddir)));
+
+ entries_offset =
+ __offsetof(struct ata_identify_log_pages,entries);
+ max_entries = softc->valid_iddir_len - entries_offset;
+ if ((softc->valid_iddir_len > (entries_offset + 1))
+ && (le64dec(softc->ata_iddir.header) ==
+ ATA_IDLOG_REVISION)
+ && (softc->ata_iddir.entry_count > 0)) {
+ int num_entries, i;
+
+ num_entries = softc->ata_iddir.entry_count;
+ num_entries = min(num_entries,
+ softc->valid_iddir_len - entries_offset);
+ for (i = 0; i < num_entries &&
+ i < max_entries; i++) {
+ if (softc->ata_iddir.entries[i] ==
+ ATA_IDL_SUP_CAP)
+ softc->flags |=
+ DA_FLAG_CAN_ATA_SUPCAP;
+ else if (softc->ata_iddir.entries[i]==
+ ATA_IDL_ZDI)
+ softc->flags |=
+ DA_FLAG_CAN_ATA_ZONE;
+
+ if ((softc->flags &
+ DA_FLAG_CAN_ATA_SUPCAP)
+ && (softc->flags &
+ DA_FLAG_CAN_ATA_ZONE))
+ break;
+ }
+ }
+ } else {
+ error = daerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ /*
+ * If we can't get the ATA Identify Data log
+ * directory, then it effectively isn't
+ * supported even if the ATA Log directory
+ * a non-zero number of pages present for
+ * this log.
+ */
+ softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+ }
+
+ free(csio->data_ptr, M_SCSIDA);
+
+ if ((error == 0)
+ && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) {
+ softc->state = DA_STATE_PROBE_ATA_SUP;
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ }
+ daprobedone(periph, done_ccb);
+ return;
+ }
+ case DA_CCB_PROBE_ATA_SUP:
+ {
+ int error;
+
+ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ uint32_t valid_len;
+ size_t needed_size;
+ struct ata_identify_log_sup_cap *sup_cap;
+ error = 0;
+
+ sup_cap = (struct ata_identify_log_sup_cap *)
+ csio->data_ptr;
+ valid_len = csio->dxfer_len - csio->resid;
+ needed_size =
+ __offsetof(struct ata_identify_log_sup_cap,
+ sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
+ if (valid_len >= needed_size) {
+ uint64_t zoned, zac_cap;
+
+ zoned = le64dec(sup_cap->zoned_cap);
+ if (zoned & ATA_ZONED_VALID) {
+ /*
+ * This should have already been
+ * set, because this is also in the
+ * ATA identify data.
+ */
+ if ((zoned & ATA_ZONED_MASK) ==
+ ATA_SUPPORT_ZONE_HOST_AWARE)
+ softc->zone_mode =
+ DA_ZONE_HOST_AWARE;
+ else if ((zoned & ATA_ZONED_MASK) ==
+ ATA_SUPPORT_ZONE_DEV_MANAGED)
+ softc->zone_mode =
+ DA_ZONE_DRIVE_MANAGED;
+ }
+
+ zac_cap = le64dec(sup_cap->sup_zac_cap);
+ if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
+ if (zac_cap & ATA_REPORT_ZONES_SUP)
+ softc->zone_flags |=
+ DA_ZONE_FLAG_RZ_SUP;
+ if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
+ softc->zone_flags |=
+ DA_ZONE_FLAG_OPEN_SUP;
+ if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
+ softc->zone_flags |=
+ DA_ZONE_FLAG_CLOSE_SUP;
+ if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
+ softc->zone_flags |=
+ DA_ZONE_FLAG_FINISH_SUP;
+ if (zac_cap & ATA_ND_RWP_SUP)
+ softc->zone_flags |=
+ DA_ZONE_FLAG_RWP_SUP;
+ } else {
+ /*
+ * This field was introduced in
+ * ACS-4, r08 on April 28th, 2015.
+ * If the drive firmware was written
+ * to an earlier spec, it won't have
+ * the field. So, assume all
+ * commands are supported.
+ */
+ softc->zone_flags |=
+ DA_ZONE_FLAG_SUP_MASK;
+ }
+
+ }
+ } else {
+ error = daerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ /*
+ * If we can't get the ATA Identify Data
+ * Supported Capabilities page, clear the
+ * flag...
+ */
+ softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP;
+ /*
+ * And clear zone capabilities.
+ */
+ softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK;
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+ }
+
+ free(csio->data_ptr, M_SCSIDA);
+
+ if ((error == 0)
+ && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) {
+ softc->state = DA_STATE_PROBE_ATA_ZONE;
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ }
+ daprobedone(periph, done_ccb);
+ return;
+ }
+ case DA_CCB_PROBE_ATA_ZONE:
+ {
+ int error;
+
+ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ struct ata_zoned_info_log *zi_log;
+ uint32_t valid_len;
+ size_t needed_size;
+
+ zi_log = (struct ata_zoned_info_log *)csio->data_ptr;
+
+ valid_len = csio->dxfer_len - csio->resid;
+ needed_size = __offsetof(struct ata_zoned_info_log,
+ version_info) + 1 + sizeof(zi_log->version_info);
+ if (valid_len >= needed_size) {
+ uint64_t tmpvar;
+
+ tmpvar = le64dec(zi_log->zoned_cap);
+ if (tmpvar & ATA_ZDI_CAP_VALID) {
+ if (tmpvar & ATA_ZDI_CAP_URSWRZ)
+ softc->zone_flags |=
+ DA_ZONE_FLAG_URSWRZ;
+ else
+ softc->zone_flags &=
+ ~DA_ZONE_FLAG_URSWRZ;
+ }
+ tmpvar = le64dec(zi_log->optimal_seq_zones);
+ if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
+ softc->zone_flags |=
+ DA_ZONE_FLAG_OPT_SEQ_SET;
+ softc->optimal_seq_zones = (tmpvar &
+ ATA_ZDI_OPT_SEQ_MASK);
+ } else {
+ softc->zone_flags &=
+ ~DA_ZONE_FLAG_OPT_SEQ_SET;
+ softc->optimal_seq_zones = 0;
+ }
+
+ tmpvar =le64dec(zi_log->optimal_nonseq_zones);
+ if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
+ softc->zone_flags |=
+ DA_ZONE_FLAG_OPT_NONSEQ_SET;
+ softc->optimal_nonseq_zones =
+ (tmpvar & ATA_ZDI_OPT_NS_MASK);
+ } else {
+ softc->zone_flags &=
+ ~DA_ZONE_FLAG_OPT_NONSEQ_SET;
+ softc->optimal_nonseq_zones = 0;
+ }
+
+ tmpvar = le64dec(zi_log->max_seq_req_zones);
+ if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
+ softc->zone_flags |=
+ DA_ZONE_FLAG_MAX_SEQ_SET;
+ softc->max_seq_zones =
+ (tmpvar & ATA_ZDI_MAX_SEQ_MASK);
+ } else {
+ softc->zone_flags &=
+ ~DA_ZONE_FLAG_MAX_SEQ_SET;
+ softc->max_seq_zones = 0;
+ }
+ }
+ } else {
+ error = daerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ softc->flags &= ~DA_FLAG_CAN_ATA_ZONE;
+ softc->flags &= ~DA_ZONE_FLAG_SET_MASK;
+
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+
+ }
+ free(csio->data_ptr, M_SCSIDA);
+
+ daprobedone(periph, done_ccb);
+ return;
+ }
+ case DA_CCB_PROBE_ZONE:
+ {
+ int error;
+
+ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ uint32_t valid_len;
+ size_t needed_len;
+ struct scsi_vpd_zoned_bdc *zoned_bdc;
+
+ error = 0;
+ zoned_bdc = (struct scsi_vpd_zoned_bdc *)
+ csio->data_ptr;
+ valid_len = csio->dxfer_len - csio->resid;
+ needed_len = __offsetof(struct scsi_vpd_zoned_bdc,
+ max_seq_req_zones) + 1 +
+ sizeof(zoned_bdc->max_seq_req_zones);
+ if ((valid_len >= needed_len)
+ && (scsi_2btoul(zoned_bdc->page_length) >=
+ SVPD_ZBDC_PL)) {
+ if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ)
+ softc->zone_flags |=
+ DA_ZONE_FLAG_URSWRZ;
+ else
+ softc->zone_flags &=
+ ~DA_ZONE_FLAG_URSWRZ;
+ softc->optimal_seq_zones =
+ scsi_4btoul(zoned_bdc->optimal_seq_zones);
+ softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
+ softc->optimal_nonseq_zones = scsi_4btoul(
+ zoned_bdc->optimal_nonseq_zones);
+ softc->zone_flags |=
+ DA_ZONE_FLAG_OPT_NONSEQ_SET;
+ softc->max_seq_zones =
+ scsi_4btoul(zoned_bdc->max_seq_req_zones);
+ softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
+ }
+ /*
+ * All of the zone commands are mandatory for SCSI
+ * devices.
+ *
+ * XXX KDM this is valid as of September 2015.
+ * Re-check this assumption once the SAT spec is
+ * updated to support SCSI ZBC to ATA ZAC mapping.
+ * Since ATA allows zone commands to be reported
+ * as supported or not, this may not necessarily
+ * be true for an ATA device behind a SAT (SCSI to
+ * ATA Translation) layer.
+ */
+ softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
+ } else {
+ error = daerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+ }
daprobedone(periph, done_ccb);
return;
}
@@ -4167,3 +5643,253 @@ scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries,
}
#endif /* _KERNEL */
+
+void
+scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, uint8_t service_action, uint64_t zone_id,
+ uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t sense_len, uint32_t timeout)
+{
+ struct scsi_zbc_out *scsi_cmd;
+
+ scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes;
+ scsi_cmd->opcode = ZBC_OUT;
+ scsi_cmd->service_action = service_action;
+ scsi_u64to8b(zone_id, scsi_cmd->zone_id);
+ scsi_cmd->zone_flags = zone_flags;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
+ tag_action,
+ data_ptr,
+ dxfer_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+void
+scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba,
+ uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t sense_len, uint32_t timeout)
+{
+ struct scsi_zbc_in *scsi_cmd;
+
+ scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes;
+ scsi_cmd->opcode = ZBC_IN;
+ scsi_cmd->service_action = service_action;
+ scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba);
+ scsi_cmd->zone_options = zone_options;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE,
+ tag_action,
+ data_ptr,
+ dxfer_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+}
+
+int
+scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int use_ncq,
+ uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
+ uint8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t *cdb_storage, size_t cdb_storage_len,
+ uint8_t sense_len, uint32_t timeout)
+{
+ uint8_t command_out, protocol, ata_flags;
+ uint16_t features_out;
+ uint32_t sectors_out, auxiliary;
+ int retval;
+
+ retval = 0;
+
+ if (use_ncq == 0) {
+ command_out = ATA_ZAC_MANAGEMENT_OUT;
+ features_out = (zm_action & 0xf) | (zone_flags << 8),
+ ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
+ if (dxfer_len == 0) {
+ protocol = AP_PROTO_NON_DATA;
+ ata_flags |= AP_FLAG_TLEN_NO_DATA;
+ sectors_out = 0;
+ } else {
+ protocol = AP_PROTO_DMA;
+ ata_flags |= AP_FLAG_TLEN_SECT_CNT |
+ AP_FLAG_TDIR_TO_DEV;
+ sectors_out = ((dxfer_len >> 9) & 0xffff);
+ }
+ auxiliary = 0;
+ } else {
+ ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
+ if (dxfer_len == 0) {
+ command_out = ATA_NCQ_NON_DATA;
+ features_out = ATA_NCQ_ZAC_MGMT_OUT;
+ /*
+ * We're assuming the SCSI to ATA translation layer
+ * will set the NCQ tag number in the tag field.
+ * That isn't clear from the SAT-4 spec (as of rev 05).
+ */
+ sectors_out = 0;
+ ata_flags |= AP_FLAG_TLEN_NO_DATA;
+ } else {
+ command_out = ATA_SEND_FPDMA_QUEUED;
+ /*
+ * Note that we're defaulting to normal priority,
+ * and assuming that the SCSI to ATA translation
+ * layer will insert the NCQ tag number in the tag
+ * field. That isn't clear in the SAT-4 spec (as
+ * of rev 05).
+ */
+ sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8;
+
+ ata_flags |= AP_FLAG_TLEN_FEAT |
+ AP_FLAG_TDIR_TO_DEV;
+
+ /*
+ * For SEND FPDMA QUEUED, the transfer length is
+ * encoded in the FEATURE register, and 0 means
+ * that 65536 512 byte blocks are to be tranferred.
+ * In practice, it seems unlikely that we'll see
+ * a transfer that large, and it may confuse the
+ * the SAT layer, because generally that means that
+ * 0 bytes should be transferred.
+ */
+ if (dxfer_len == (65536 * 512)) {
+ features_out = 0;
+ } else if (dxfer_len <= (65535 * 512)) {
+ features_out = ((dxfer_len >> 9) & 0xffff);
+ } else {
+ /* The transfer is too big. */
+ retval = 1;
+ goto bailout;
+ }
+
+ }
+
+ auxiliary = (zm_action & 0xf) | (zone_flags << 8);
+ protocol = AP_PROTO_FPDMA;
+ }
+
+ protocol |= AP_EXTEND;
+
+ retval = scsi_ata_pass(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
+ tag_action,
+ /*protocol*/ protocol,
+ /*ata_flags*/ ata_flags,
+ /*features*/ features_out,
+ /*sector_count*/ sectors_out,
+ /*lba*/ zone_id,
+ /*command*/ command_out,
+ /*device*/ 0,
+ /*icc*/ 0,
+ /*auxiliary*/ auxiliary,
+ /*control*/ 0,
+ /*data_ptr*/ data_ptr,
+ /*dxfer_len*/ dxfer_len,
+ /*cdb_storage*/ cdb_storage,
+ /*cdb_storage_len*/ cdb_storage_len,
+ /*minimum_cmd_size*/ 0,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ timeout);
+
+bailout:
+
+ return (retval);
+}
+
+int
+scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int use_ncq,
+ uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
+ uint8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t *cdb_storage, size_t cdb_storage_len,
+ uint8_t sense_len, uint32_t timeout)
+{
+ uint8_t command_out, protocol;
+ uint16_t features_out, sectors_out;
+ uint32_t auxiliary;
+ int ata_flags;
+ int retval;
+
+ retval = 0;
+ ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS;
+
+ if (use_ncq == 0) {
+ command_out = ATA_ZAC_MANAGEMENT_IN;
+ /* XXX KDM put a macro here */
+ features_out = (zm_action & 0xf) | (zone_flags << 8),
+ sectors_out = dxfer_len >> 9, /* XXX KDM macro*/
+ protocol = AP_PROTO_DMA;
+ ata_flags |= AP_FLAG_TLEN_SECT_CNT;
+ auxiliary = 0;
+ } else {
+ ata_flags |= AP_FLAG_TLEN_FEAT;
+
+ command_out = ATA_RECV_FPDMA_QUEUED;
+ sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8;
+
+ /*
+ * For RECEIVE FPDMA QUEUED, the transfer length is
+ * encoded in the FEATURE register, and 0 means
+ * that 65536 512 byte blocks are to be tranferred.
+ * In practice, it seems unlikely that we'll see
+ * a transfer that large, and it may confuse the
+ * the SAT layer, because generally that means that
+ * 0 bytes should be transferred.
+ */
+ if (dxfer_len == (65536 * 512)) {
+ features_out = 0;
+ } else if (dxfer_len <= (65535 * 512)) {
+ features_out = ((dxfer_len >> 9) & 0xffff);
+ } else {
+ /* The transfer is too big. */
+ retval = 1;
+ goto bailout;
+ }
+ auxiliary = (zm_action & 0xf) | (zone_flags << 8),
+ protocol = AP_PROTO_FPDMA;
+ }
+
+ protocol |= AP_EXTEND;
+
+ retval = scsi_ata_pass(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ CAM_DIR_IN,
+ tag_action,
+ /*protocol*/ protocol,
+ /*ata_flags*/ ata_flags,
+ /*features*/ features_out,
+ /*sector_count*/ sectors_out,
+ /*lba*/ zone_id,
+ /*command*/ command_out,
+ /*device*/ 0,
+ /*icc*/ 0,
+ /*auxiliary*/ auxiliary,
+ /*control*/ 0,
+ /*data_ptr*/ data_ptr,
+ /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */
+ /*cdb_storage*/ cdb_storage,
+ /*cdb_storage_len*/ cdb_storage_len,
+ /*minimum_cmd_size*/ 0,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ timeout);
+
+bailout:
+ return (retval);
+}
diff --git a/sys/cam/scsi/scsi_da.h b/sys/cam/scsi/scsi_da.h
index ad4d0db..e6eb95f 100644
--- a/sys/cam/scsi/scsi_da.h
+++ b/sys/cam/scsi/scsi_da.h
@@ -153,6 +153,84 @@ struct scsi_read_defect_data_12
uint8_t control;
};
+struct scsi_zbc_out
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define ZBC_OUT_SA_CLOSE 0x01
+#define ZBC_OUT_SA_FINISH 0x02
+#define ZBC_OUT_SA_OPEN 0x03
+#define ZBC_OUT_SA_RWP 0x04
+ uint8_t zone_id[8];
+ uint8_t reserved[4];
+ uint8_t zone_flags;
+#define ZBC_OUT_ALL 0x01
+ uint8_t control;
+};
+
+struct scsi_zbc_in
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define ZBC_IN_SA_REPORT_ZONES 0x00
+ uint8_t zone_start_lba[8];
+ uint8_t length[4];
+ uint8_t zone_options;
+#define ZBC_IN_PARTIAL 0x80
+#define ZBC_IN_REP_ALL_ZONES 0x00
+#define ZBC_IN_REP_EMPTY 0x01
+#define ZBC_IN_REP_IMP_OPEN 0x02
+#define ZBC_IN_REP_EXP_OPEN 0x03
+#define ZBC_IN_REP_CLOSED 0x04
+#define ZBC_IN_REP_FULL 0x05
+#define ZBC_IN_REP_READONLY 0x06
+#define ZBC_IN_REP_OFFLINE 0x07
+#define ZBC_IN_REP_RESET 0x10
+#define ZBC_IN_REP_NON_SEQ 0x11
+#define ZBC_IN_REP_NON_WP 0x3f
+#define ZBC_IN_REP_MASK 0x3f
+ uint8_t control;
+};
+
+struct scsi_report_zones_desc {
+ uint8_t zone_type;
+#define SRZ_TYPE_CONVENTIONAL 0x01
+#define SRZ_TYPE_SEQ_REQUIRED 0x02
+#define SRZ_TYPE_SEQ_PREFERRED 0x03
+#define SRZ_TYPE_MASK 0x0f
+ uint8_t zone_flags;
+#define SRZ_ZONE_COND_SHIFT 4
+#define SRZ_ZONE_COND_MASK 0xf0
+#define SRZ_ZONE_COND_NWP 0x00
+#define SRZ_ZONE_COND_EMPTY 0x10
+#define SRZ_ZONE_COND_IMP_OPEN 0x20
+#define SRZ_ZONE_COND_EXP_OPEN 0x30
+#define SRZ_ZONE_COND_CLOSED 0x40
+#define SRZ_ZONE_COND_READONLY 0xd0
+#define SRZ_ZONE_COND_FULL 0xe0
+#define SRZ_ZONE_COND_OFFLINE 0xf0
+#define SRZ_ZONE_NON_SEQ 0x02
+#define SRZ_ZONE_RESET 0x01
+ uint8_t reserved[6];
+ uint8_t zone_length[8];
+ uint8_t zone_start_lba[8];
+ uint8_t write_pointer_lba[8];
+ uint8_t reserved2[32];
+};
+
+struct scsi_report_zones_hdr {
+ uint8_t length[4];
+ uint8_t byte4;
+#define SRZ_SAME_ALL_DIFFERENT 0x00 /* Lengths and types vary */
+#define SRZ_SAME_ALL_SAME 0x01 /* Lengths and types the same */
+#define SRZ_SAME_LAST_DIFFERENT 0x02 /* Types same, last length varies */
+#define SRZ_SAME_TYPES_DIFFERENT 0x03 /* Types vary, length the same */
+#define SRZ_SAME_MASK 0x0f
+ uint8_t reserved[3];
+ uint8_t maximum_lba[8];
+ uint8_t reserved2[48];
+ struct scsi_report_zones_desc desc_list[];
+};
/*
* Opcodes
@@ -167,6 +245,8 @@ struct scsi_read_defect_data_12
#define VERIFY 0x2f
#define READ_DEFECT_DATA_10 0x37
#define SANITIZE 0x48
+#define ZBC_OUT 0x94
+#define ZBC_IN 0x95
#define READ_DEFECT_DATA_12 0xb7
struct format_defect_list_header
@@ -581,6 +661,38 @@ void scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries,
u_int32_t timeout);
#endif /* !_KERNEL */
+
+void scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, uint8_t service_action, uint64_t zone_id,
+ uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t sense_len, uint32_t timeout);
+
+void scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, uint8_t service_action,
+ uint64_t zone_start_lba, uint8_t zone_options,
+ uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len,
+ uint32_t timeout);
+
+int scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int use_ncq,
+ uint8_t zm_action, uint64_t zone_id,
+ uint8_t zone_flags, uint8_t *data_ptr,
+ uint32_t dxfer_len, uint8_t *cdb_storage,
+ size_t cdb_storage_len, uint8_t sense_len,
+ uint32_t timeout);
+
+int scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int use_ncq,
+ uint8_t zm_action, uint64_t zone_id,
+ uint8_t zone_flags, uint8_t *data_ptr,
+ uint32_t dxfer_len, uint8_t *cdb_storage,
+ size_t cdb_storage_len, uint8_t sense_len,
+ uint32_t timeout);
+
__END_DECLS
#endif /* _SCSI_SCSI_DA_H */
diff --git a/sys/compat/linux/linux_file.c b/sys/compat/linux/linux_file.c
index 3f9361d..d3af860 100644
--- a/sys/compat/linux/linux_file.c
+++ b/sys/compat/linux/linux_file.c
@@ -69,108 +69,106 @@ __FBSDID("$FreeBSD$");
int
linux_creat(struct thread *td, struct linux_creat_args *args)
{
- char *path;
- int error;
-
- LCONVPATHEXIST(td, args->path, &path);
+ char *path;
+ int error;
+ LCONVPATHEXIST(td, args->path, &path);
#ifdef DEBUG
if (ldebug(creat))
printf(ARGS(creat, "%s, %d"), path, args->mode);
#endif
- error = kern_openat(td, AT_FDCWD, path, UIO_SYSSPACE,
- O_WRONLY | O_CREAT | O_TRUNC, args->mode);
- LFREEPATH(path);
- return (error);
+ error = kern_openat(td, AT_FDCWD, path, UIO_SYSSPACE,
+ O_WRONLY | O_CREAT | O_TRUNC, args->mode);
+ LFREEPATH(path);
+ return (error);
}
static int
linux_common_open(struct thread *td, int dirfd, char *path, int l_flags, int mode)
{
- cap_rights_t rights;
- struct proc *p = td->td_proc;
- struct file *fp;
- int fd;
- int bsd_flags, error;
-
- bsd_flags = 0;
- switch (l_flags & LINUX_O_ACCMODE) {
- case LINUX_O_WRONLY:
- bsd_flags |= O_WRONLY;
- break;
- case LINUX_O_RDWR:
- bsd_flags |= O_RDWR;
- break;
- default:
- bsd_flags |= O_RDONLY;
- }
- if (l_flags & LINUX_O_NDELAY)
- bsd_flags |= O_NONBLOCK;
- if (l_flags & LINUX_O_APPEND)
- bsd_flags |= O_APPEND;
- if (l_flags & LINUX_O_SYNC)
- bsd_flags |= O_FSYNC;
- if (l_flags & LINUX_O_NONBLOCK)
- bsd_flags |= O_NONBLOCK;
- if (l_flags & LINUX_FASYNC)
- bsd_flags |= O_ASYNC;
- if (l_flags & LINUX_O_CREAT)
- bsd_flags |= O_CREAT;
- if (l_flags & LINUX_O_TRUNC)
- bsd_flags |= O_TRUNC;
- if (l_flags & LINUX_O_EXCL)
- bsd_flags |= O_EXCL;
- if (l_flags & LINUX_O_NOCTTY)
- bsd_flags |= O_NOCTTY;
- if (l_flags & LINUX_O_DIRECT)
- bsd_flags |= O_DIRECT;
- if (l_flags & LINUX_O_NOFOLLOW)
- bsd_flags |= O_NOFOLLOW;
- if (l_flags & LINUX_O_DIRECTORY)
- bsd_flags |= O_DIRECTORY;
- /* XXX LINUX_O_NOATIME: unable to be easily implemented. */
-
- error = kern_openat(td, dirfd, path, UIO_SYSSPACE, bsd_flags, mode);
- if (error != 0)
- goto done;
-
- if (bsd_flags & O_NOCTTY)
- goto done;
-
- /*
- * XXX In between kern_open() and fget(), another process
- * having the same filedesc could use that fd without
- * checking below.
- */
- fd = td->td_retval[0];
- if (fget(td, fd, cap_rights_init(&rights, CAP_IOCTL), &fp) == 0) {
- if (fp->f_type != DTYPE_VNODE) {
- fdrop(fp, td);
- goto done;
- }
- sx_slock(&proctree_lock);
- PROC_LOCK(p);
- if (SESS_LEADER(p) && !(p->p_flag & P_CONTROLT)) {
- PROC_UNLOCK(p);
- sx_sunlock(&proctree_lock);
- /* XXXPJD: Verify if TIOCSCTTY is allowed. */
- (void) fo_ioctl(fp, TIOCSCTTY, (caddr_t) 0,
- td->td_ucred, td);
- } else {
- PROC_UNLOCK(p);
- sx_sunlock(&proctree_lock);
- }
- fdrop(fp, td);
- }
+ cap_rights_t rights;
+ struct proc *p = td->td_proc;
+ struct file *fp;
+ int fd;
+ int bsd_flags, error;
+
+ bsd_flags = 0;
+ switch (l_flags & LINUX_O_ACCMODE) {
+ case LINUX_O_WRONLY:
+ bsd_flags |= O_WRONLY;
+ break;
+ case LINUX_O_RDWR:
+ bsd_flags |= O_RDWR;
+ break;
+ default:
+ bsd_flags |= O_RDONLY;
+ }
+ if (l_flags & LINUX_O_NDELAY)
+ bsd_flags |= O_NONBLOCK;
+ if (l_flags & LINUX_O_APPEND)
+ bsd_flags |= O_APPEND;
+ if (l_flags & LINUX_O_SYNC)
+ bsd_flags |= O_FSYNC;
+ if (l_flags & LINUX_O_NONBLOCK)
+ bsd_flags |= O_NONBLOCK;
+ if (l_flags & LINUX_FASYNC)
+ bsd_flags |= O_ASYNC;
+ if (l_flags & LINUX_O_CREAT)
+ bsd_flags |= O_CREAT;
+ if (l_flags & LINUX_O_TRUNC)
+ bsd_flags |= O_TRUNC;
+ if (l_flags & LINUX_O_EXCL)
+ bsd_flags |= O_EXCL;
+ if (l_flags & LINUX_O_NOCTTY)
+ bsd_flags |= O_NOCTTY;
+ if (l_flags & LINUX_O_DIRECT)
+ bsd_flags |= O_DIRECT;
+ if (l_flags & LINUX_O_NOFOLLOW)
+ bsd_flags |= O_NOFOLLOW;
+ if (l_flags & LINUX_O_DIRECTORY)
+ bsd_flags |= O_DIRECTORY;
+ /* XXX LINUX_O_NOATIME: unable to be easily implemented. */
+
+ error = kern_openat(td, dirfd, path, UIO_SYSSPACE, bsd_flags, mode);
+ if (error != 0)
+ goto done;
+ if (bsd_flags & O_NOCTTY)
+ goto done;
+
+ /*
+ * XXX In between kern_open() and fget(), another process
+ * having the same filedesc could use that fd without
+ * checking below.
+ */
+ fd = td->td_retval[0];
+ if (fget(td, fd, cap_rights_init(&rights, CAP_IOCTL), &fp) == 0) {
+ if (fp->f_type != DTYPE_VNODE) {
+ fdrop(fp, td);
+ goto done;
+ }
+ sx_slock(&proctree_lock);
+ PROC_LOCK(p);
+ if (SESS_LEADER(p) && !(p->p_flag & P_CONTROLT)) {
+ PROC_UNLOCK(p);
+ sx_sunlock(&proctree_lock);
+ /* XXXPJD: Verify if TIOCSCTTY is allowed. */
+ (void) fo_ioctl(fp, TIOCSCTTY, (caddr_t) 0,
+ td->td_ucred, td);
+ } else {
+ PROC_UNLOCK(p);
+ sx_sunlock(&proctree_lock);
+ }
+ fdrop(fp, td);
+ }
done:
#ifdef DEBUG
- if (ldebug(open))
- printf(LMSG("open returns error %d"), error);
+ if (ldebug(open))
+ printf(LMSG("open returns error %d"), error);
#endif
- LFREEPATH(path);
- return (error);
+ LFREEPATH(path);
+ return (error);
}
int
@@ -195,44 +193,41 @@ linux_openat(struct thread *td, struct linux_openat_args *args)
int
linux_open(struct thread *td, struct linux_open_args *args)
{
- char *path;
-
- if (args->flags & LINUX_O_CREAT)
- LCONVPATHCREAT(td, args->path, &path);
- else
- LCONVPATHEXIST(td, args->path, &path);
+ char *path;
+ if (args->flags & LINUX_O_CREAT)
+ LCONVPATHCREAT(td, args->path, &path);
+ else
+ LCONVPATHEXIST(td, args->path, &path);
#ifdef DEBUG
if (ldebug(open))
printf(ARGS(open, "%s, 0x%x, 0x%x"),
path, args->flags, args->mode);
#endif
-
return (linux_common_open(td, AT_FDCWD, path, args->flags, args->mode));
}
int
linux_lseek(struct thread *td, struct linux_lseek_args *args)
{
-
- struct lseek_args /* {
- int fd;
- int pad;
- off_t offset;
- int whence;
- } */ tmp_args;
- int error;
+ struct lseek_args /* {
+ int fd;
+ int pad;
+ off_t offset;
+ int whence;
+ } */ tmp_args;
+ int error;
#ifdef DEBUG
if (ldebug(lseek))
printf(ARGS(lseek, "%d, %ld, %d"),
args->fdes, (long)args->off, args->whence);
#endif
- tmp_args.fd = args->fdes;
- tmp_args.offset = (off_t)args->off;
- tmp_args.whence = args->whence;
- error = sys_lseek(td, &tmp_args);
- return error;
+ tmp_args.fd = args->fdes;
+ tmp_args.offset = (off_t)args->off;
+ tmp_args.whence = args->whence;
+ error = sys_lseek(td, &tmp_args);
+ return (error);
}
#if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
@@ -255,13 +250,13 @@ linux_llseek(struct thread *td, struct linux_llseek_args *args)
bsd_args.whence = args->whence;
if ((error = sys_lseek(td, &bsd_args)))
- return error;
+ return (error);
if ((error = copyout(td->td_retval, args->res, sizeof (off_t))))
- return error;
+ return (error);
td->td_retval[0] = 0;
- return 0;
+ return (0);
}
int
@@ -272,7 +267,7 @@ linux_readdir(struct thread *td, struct linux_readdir_args *args)
lda.fd = args->fd;
lda.dent = args->dent;
lda.count = 1;
- return linux_getdents(td, &lda);
+ return (linux_getdents(td, &lda));
}
#endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
@@ -949,7 +944,7 @@ linux_ftruncate(struct thread *td, struct linux_ftruncate_args *args)
int pad;
off_t length;
} */ nuap;
-
+
nuap.fd = args->fd;
nuap.length = args->length;
return (sys_ftruncate(td, &nuap));
@@ -1021,7 +1016,7 @@ linux_fdatasync(td, uap)
struct fsync_args bsd;
bsd.fd = uap->fd;
- return sys_fsync(td, &bsd);
+ return (sys_fsync(td, &bsd));
}
int
@@ -1038,9 +1033,7 @@ linux_pread(td, uap)
bsd.buf = uap->buf;
bsd.nbyte = uap->nbyte;
bsd.offset = uap->offset;
-
error = sys_pread(td, &bsd);
-
if (error == 0) {
/* This seems to violate POSIX but linux does it */
error = fgetvp(td, uap->fd,
@@ -1053,7 +1046,6 @@ linux_pread(td, uap)
}
vrele(vp);
}
-
return (error);
}
@@ -1068,7 +1060,7 @@ linux_pwrite(td, uap)
bsd.buf = uap->buf;
bsd.nbyte = uap->nbyte;
bsd.offset = uap->offset;
- return sys_pwrite(td, &bsd);
+ return (sys_pwrite(td, &bsd));
}
int
diff --git a/sys/compat/linux/linux_futex.c b/sys/compat/linux/linux_futex.c
index be7c8a0..059e9b3 100644
--- a/sys/compat/linux/linux_futex.c
+++ b/sys/compat/linux/linux_futex.c
@@ -1,7 +1,9 @@
/* $NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $ */
/*-
- * Copyright (c) 2005 Emmanuel Dreyfus, all rights reserved.
+ * Copyright (c) 2009-2016 Dmitry Chagin
+ * Copyright (c) 2005 Emmanuel Dreyfus
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -52,9 +54,10 @@ __KERNEL_RCSID(1, "$NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $")
#include <sys/queue.h>
#include <sys/sched.h>
#include <sys/sdt.h>
-#include <sys/sx.h>
#include <sys/umtx.h>
+#include <vm/vm_extern.h>
+
#ifdef COMPAT_LINUX32
#include <machine/../linux32/linux.h>
#include <machine/../linux32/linux32_proto.h>
@@ -106,7 +109,7 @@ LIN_SDT_PROBE_DEFINE3(futex, futex_get, entry, "uint32_t *",
LIN_SDT_PROBE_DEFINE0(futex, futex_get, error);
LIN_SDT_PROBE_DEFINE1(futex, futex_get, return, "int");
LIN_SDT_PROBE_DEFINE3(futex, futex_sleep, entry, "struct futex *",
- "struct waiting_proc **", "int");
+ "struct waiting_proc **", "struct timespec *");
LIN_SDT_PROBE_DEFINE5(futex, futex_sleep, requeue_error, "int", "uint32_t *",
"struct waiting_proc *", "uint32_t *", "uint32_t");
LIN_SDT_PROBE_DEFINE3(futex, futex_sleep, sleep_error, "int", "uint32_t *",
@@ -125,7 +128,7 @@ LIN_SDT_PROBE_DEFINE3(futex, futex_requeue, requeue, "uint32_t *",
"struct waiting_proc *", "uint32_t");
LIN_SDT_PROBE_DEFINE1(futex, futex_requeue, return, "int");
LIN_SDT_PROBE_DEFINE4(futex, futex_wait, entry, "struct futex *",
- "struct waiting_proc **", "int", "uint32_t");
+ "struct waiting_proc **", "struct timespec *", "uint32_t");
LIN_SDT_PROBE_DEFINE1(futex, futex_wait, sleep_error, "int");
LIN_SDT_PROBE_DEFINE1(futex, futex_wait, return, "int");
LIN_SDT_PROBE_DEFINE3(futex, futex_atomic_op, entry, "struct thread *",
@@ -139,7 +142,6 @@ LIN_SDT_PROBE_DEFINE1(futex, futex_atomic_op, return, "int");
LIN_SDT_PROBE_DEFINE2(futex, linux_sys_futex, entry, "struct thread *",
"struct linux_sys_futex_args *");
LIN_SDT_PROBE_DEFINE0(futex, linux_sys_futex, unimplemented_clockswitch);
-LIN_SDT_PROBE_DEFINE1(futex, linux_sys_futex, itimerfix_error, "int");
LIN_SDT_PROBE_DEFINE1(futex, linux_sys_futex, copyin_error, "int");
LIN_SDT_PROBE_DEFINE0(futex, linux_sys_futex, invalid_cmp_requeue_use);
LIN_SDT_PROBE_DEFINE3(futex, linux_sys_futex, debug_wait, "uint32_t *",
@@ -194,7 +196,7 @@ struct waiting_proc {
};
struct futex {
- struct sx f_lck;
+ struct mtx f_lck;
uint32_t *f_uaddr; /* user-supplied value, for debug */
struct umtx_key f_key;
uint32_t f_refcount;
@@ -205,20 +207,22 @@ struct futex {
struct futex_list futex_list;
-#define FUTEX_LOCK(f) sx_xlock(&(f)->f_lck)
-#define FUTEX_UNLOCK(f) sx_xunlock(&(f)->f_lck)
+#define FUTEX_LOCK(f) mtx_lock(&(f)->f_lck)
+#define FUTEX_LOCKED(f) mtx_owned(&(f)->f_lck)
+#define FUTEX_UNLOCK(f) mtx_unlock(&(f)->f_lck)
#define FUTEX_INIT(f) do { \
- sx_init_flags(&(f)->f_lck, "ftlk", \
- SX_DUPOK); \
+ mtx_init(&(f)->f_lck, "ftlk", NULL, \
+ MTX_DUPOK); \
LIN_SDT_PROBE1(futex, futex, create, \
&(f)->f_lck); \
} while (0)
#define FUTEX_DESTROY(f) do { \
LIN_SDT_PROBE1(futex, futex, destroy, \
&(f)->f_lck); \
- sx_destroy(&(f)->f_lck); \
+ mtx_destroy(&(f)->f_lck); \
} while (0)
-#define FUTEX_ASSERT_LOCKED(f) sx_assert(&(f)->f_lck, SA_XLOCKED)
+#define FUTEX_ASSERT_LOCKED(f) mtx_assert(&(f)->f_lck, MA_OWNED)
+#define FUTEX_ASSERT_UNLOCKED(f) mtx_assert(&(f)->f_lck, MA_NOTOWNED)
struct mtx futex_mtx; /* protects the futex list */
#define FUTEXES_LOCK do { \
@@ -237,6 +241,7 @@ struct mtx futex_mtx; /* protects the futex list */
#define FUTEX_DONTCREATE 0x2 /* don't create futex if not exists */
#define FUTEX_DONTEXISTS 0x4 /* return EINVAL if futex exists */
#define FUTEX_SHARED 0x8 /* shared futex */
+#define FUTEX_DONTLOCK 0x10 /* don't lock futex */
/* wp_flags */
#define FUTEX_WP_REQUEUED 0x1 /* wp requeued - wp moved from wp_list
@@ -251,11 +256,15 @@ static void futex_put(struct futex *, struct waiting_proc *);
static int futex_get0(uint32_t *, struct futex **f, uint32_t);
static int futex_get(uint32_t *, struct waiting_proc **, struct futex **,
uint32_t);
-static int futex_sleep(struct futex *, struct waiting_proc *, int);
+static int futex_sleep(struct futex *, struct waiting_proc *, struct timespec *);
static int futex_wake(struct futex *, int, uint32_t);
static int futex_requeue(struct futex *, int, struct futex *, int);
-static int futex_wait(struct futex *, struct waiting_proc *, int,
+static int futex_copyin_timeout(int, struct l_timespec *, int,
+ struct timespec *);
+static int futex_wait(struct futex *, struct waiting_proc *, struct timespec *,
uint32_t);
+static void futex_lock(struct futex *);
+static void futex_unlock(struct futex *);
static int futex_atomic_op(struct thread *, int, uint32_t *);
static int handle_futex_death(struct linux_emuldata *, uint32_t *,
unsigned int);
@@ -270,12 +279,39 @@ int futex_andl(int oparg, uint32_t *uaddr, int *oldval);
int futex_xorl(int oparg, uint32_t *uaddr, int *oldval);
+static int
+futex_copyin_timeout(int op, struct l_timespec *luts, int clockrt,
+ struct timespec *ts)
+{
+ struct l_timespec lts;
+ struct timespec kts;
+ int error;
+
+ error = copyin(luts, &lts, sizeof(lts));
+ if (error)
+ return (error);
+
+ error = linux_to_native_timespec(ts, &lts);
+ if (error)
+ return (error);
+ if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
+ return (EINVAL);
+
+ if (clockrt) {
+ nanotime(&kts);
+ timespecsub(ts, &kts);
+ } else if (op == LINUX_FUTEX_WAIT_BITSET) {
+ nanouptime(&kts);
+ timespecsub(ts, &kts);
+ }
+ return (error);
+}
+
static void
futex_put(struct futex *f, struct waiting_proc *wp)
{
LIN_SDT_PROBE2(futex, futex_put, entry, f, wp);
- FUTEX_ASSERT_LOCKED(f);
if (wp != NULL) {
if ((wp->wp_flags & FUTEX_WP_REMOVED) == 0)
TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
@@ -286,7 +322,8 @@ futex_put(struct futex *f, struct waiting_proc *wp)
if (--f->f_refcount == 0) {
LIST_REMOVE(f, f_list);
FUTEXES_UNLOCK;
- FUTEX_UNLOCK(f);
+ if (FUTEX_LOCKED(f))
+ futex_unlock(f);
LIN_SDT_PROBE3(futex, futex_put, destroy, f->f_uaddr,
f->f_refcount, f->f_key.shared);
@@ -305,7 +342,8 @@ futex_put(struct futex *f, struct waiting_proc *wp)
LINUX_CTR3(sys_futex, "futex_put uaddr %p ref %d shared %d",
f->f_uaddr, f->f_refcount, f->f_key.shared);
FUTEXES_UNLOCK;
- FUTEX_UNLOCK(f);
+ if (FUTEX_LOCKED(f))
+ futex_unlock(f);
LIN_SDT_PROBE0(futex, futex_put, return);
}
@@ -333,7 +371,8 @@ retry:
LIST_FOREACH(f, &futex_list, f_list) {
if (umtx_key_match(&f->f_key, &key)) {
if (tmpf != NULL) {
- FUTEX_UNLOCK(tmpf);
+ if (FUTEX_LOCKED(tmpf))
+ futex_unlock(tmpf);
FUTEX_DESTROY(tmpf);
free(tmpf, M_FUTEX);
}
@@ -354,7 +393,8 @@ retry:
FUTEXES_UNLOCK;
umtx_key_release(&key);
- FUTEX_LOCK(f);
+ if ((flags & FUTEX_DONTLOCK) == 0)
+ futex_lock(f);
*newf = f;
LIN_SDT_PROBE3(futex, futex_get0, shared, uaddr,
f->f_refcount, f->f_key.shared);
@@ -390,7 +430,8 @@ retry:
* Lock the new futex before an insert into the futex_list
* to prevent futex usage by other.
*/
- FUTEX_LOCK(tmpf);
+ if ((flags & FUTEX_DONTLOCK) == 0)
+ futex_lock(tmpf);
goto retry;
}
@@ -438,16 +479,56 @@ futex_get(uint32_t *uaddr, struct waiting_proc **wp, struct futex **f,
return (error);
}
+static inline void
+futex_lock(struct futex *f)
+{
+
+ LINUX_CTR3(sys_futex, "futex_lock uaddr %p ref %d shared %d",
+ f->f_uaddr, f->f_refcount, f->f_key.shared);
+ FUTEX_ASSERT_UNLOCKED(f);
+ FUTEX_LOCK(f);
+}
+
+static inline void
+futex_unlock(struct futex *f)
+{
+
+ LINUX_CTR3(sys_futex, "futex_unlock uaddr %p ref %d shared %d",
+ f->f_uaddr, f->f_refcount, f->f_key.shared);
+ FUTEX_ASSERT_LOCKED(f);
+ FUTEX_UNLOCK(f);
+}
+
static int
-futex_sleep(struct futex *f, struct waiting_proc *wp, int timeout)
+futex_sleep(struct futex *f, struct waiting_proc *wp, struct timespec *ts)
{
+ struct timespec uts;
+ sbintime_t sbt, prec, tmp;
+ time_t over;
int error;
FUTEX_ASSERT_LOCKED(f);
- LIN_SDT_PROBE3(futex, futex_sleep, entry, f, wp, timeout);
- LINUX_CTR4(sys_futex, "futex_sleep enter uaddr %p wp %p timo %d ref %d",
- f->f_uaddr, wp, timeout, f->f_refcount);
- error = sx_sleep(wp, &f->f_lck, PCATCH, "futex", timeout);
+ if (ts != NULL) {
+ uts = *ts;
+ if (uts.tv_sec > INT32_MAX / 2) {
+ over = uts.tv_sec - INT32_MAX / 2;
+ uts.tv_sec -= over;
+ }
+ tmp = tstosbt(uts);
+ if (TIMESEL(&sbt, tmp))
+ sbt += tc_tick_sbt;
+ sbt += tmp;
+ prec = tmp;
+ prec >>= tc_precexp;
+ } else {
+ sbt = 0;
+ prec = 0;
+ }
+ LIN_SDT_PROBE3(futex, futex_sleep, entry, f, wp, sbt);
+ LINUX_CTR4(sys_futex, "futex_sleep enter uaddr %p wp %p timo %ld ref %d",
+ f->f_uaddr, wp, sbt, f->f_refcount);
+
+ error = msleep_sbt(wp, &f->f_lck, PCATCH, "futex", sbt, prec, C_ABSOLUTE);
if (wp->wp_flags & FUTEX_WP_REQUEUED) {
KASSERT(f != wp->wp_futex, ("futex != wp_futex"));
@@ -463,7 +544,7 @@ futex_sleep(struct futex *f, struct waiting_proc *wp, int timeout)
wp->wp_futex->f_refcount);
futex_put(f, NULL);
f = wp->wp_futex;
- FUTEX_LOCK(f);
+ futex_lock(f);
} else {
if (error) {
LIN_SDT_PROBE3(futex, futex_sleep, sleep_error, error,
@@ -565,12 +646,12 @@ futex_requeue(struct futex *f, int n, struct futex *f2, int n2)
}
static int
-futex_wait(struct futex *f, struct waiting_proc *wp, int timeout_hz,
+futex_wait(struct futex *f, struct waiting_proc *wp, struct timespec *ts,
uint32_t bitset)
{
int error;
- LIN_SDT_PROBE4(futex, futex_wait, entry, f, wp, timeout_hz, bitset);
+ LIN_SDT_PROBE4(futex, futex_wait, entry, f, wp, ts, bitset);
if (bitset == 0) {
LIN_SDT_PROBE1(futex, futex_wait, return, EINVAL);
@@ -578,7 +659,7 @@ futex_wait(struct futex *f, struct waiting_proc *wp, int timeout_hz,
}
f->f_bitset = bitset;
- error = futex_sleep(f, wp, timeout_hz);
+ error = futex_sleep(f, wp, ts);
if (error)
LIN_SDT_PROBE1(futex, futex_wait, sleep_error, error);
if (error == EWOULDBLOCK)
@@ -604,7 +685,7 @@ futex_atomic_op(struct thread *td, int encoded_op, uint32_t *uaddr)
LIN_SDT_PROBE4(futex, futex_atomic_op, decoded_op, op, cmp, oparg,
cmparg);
-
+
/* XXX: Linux verifies access here and returns EFAULT */
LIN_SDT_PROBE0(futex, futex_atomic_op, missing_access_check);
@@ -670,11 +751,8 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
struct linux_pemuldata *pem;
struct waiting_proc *wp;
struct futex *f, *f2;
- struct l_timespec ltimeout;
- struct timespec timeout;
- struct timeval utv, ctv;
- int timeout_hz;
- int error;
+ struct timespec uts, *ts;
+ int error, save;
uint32_t flags, val;
LIN_SDT_PROBE2(futex, linux_sys_futex, entry, td, args);
@@ -716,37 +794,19 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
args->uaddr, args->val, args->val3);
if (args->timeout != NULL) {
- error = copyin(args->timeout, &ltimeout, sizeof(ltimeout));
+ error = futex_copyin_timeout(args->op, args->timeout,
+ clockrt, &uts);
if (error) {
LIN_SDT_PROBE1(futex, linux_sys_futex, copyin_error,
error);
LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
return (error);
}
- error = linux_to_native_timespec(&timeout, &ltimeout);
- if (error)
- return (error);
- TIMESPEC_TO_TIMEVAL(&utv, &timeout);
- error = itimerfix(&utv);
- if (error) {
- LIN_SDT_PROBE1(futex, linux_sys_futex, itimerfix_error,
- error);
- LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
- return (error);
- }
- if (clockrt) {
- microtime(&ctv);
- timevalsub(&utv, &ctv);
- } else if (args->op == LINUX_FUTEX_WAIT_BITSET) {
- microuptime(&ctv);
- timevalsub(&utv, &ctv);
- }
- if (utv.tv_sec < 0)
- timevalclear(&utv);
- timeout_hz = tvtohz(&utv);
+ ts = &uts;
} else
- timeout_hz = 0;
+ ts = NULL;
+retry0:
error = futex_get(args->uaddr, &wp, &f,
flags | FUTEX_CREATE_WP);
if (error) {
@@ -754,14 +814,16 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
return (error);
}
- error = copyin(args->uaddr, &val, sizeof(val));
+ error = copyin_nofault(args->uaddr, &val, sizeof(val));
if (error) {
+ futex_put(f, wp);
+ error = copyin(args->uaddr, &val, sizeof(val));
+ if (error == 0)
+ goto retry0;
LIN_SDT_PROBE1(futex, linux_sys_futex, copyin_error,
error);
LINUX_CTR1(sys_futex, "WAIT copyin failed %d",
error);
- futex_put(f, wp);
-
LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
return (error);
}
@@ -779,7 +841,7 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
return (EWOULDBLOCK);
}
- error = futex_wait(f, wp, timeout_hz, args->val3);
+ error = futex_wait(f, wp, ts, args->val3);
break;
case LINUX_FUTEX_WAKE:
@@ -829,7 +891,8 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
return (EINVAL);
}
- error = futex_get(args->uaddr, NULL, &f, flags);
+retry1:
+ error = futex_get(args->uaddr, NULL, &f, flags | FUTEX_DONTLOCK);
if (error) {
LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
return (error);
@@ -843,22 +906,26 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
* returned by FUTEX_CMP_REQUEUE.
*/
error = futex_get(args->uaddr2, NULL, &f2,
- flags | FUTEX_DONTEXISTS);
+ flags | FUTEX_DONTEXISTS | FUTEX_DONTLOCK);
if (error) {
futex_put(f, NULL);
LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
return (error);
}
- error = copyin(args->uaddr, &val, sizeof(val));
+ futex_lock(f);
+ futex_lock(f2);
+ error = copyin_nofault(args->uaddr, &val, sizeof(val));
if (error) {
+ futex_put(f2, NULL);
+ futex_put(f, NULL);
+ error = copyin(args->uaddr, &val, sizeof(val));
+ if (error == 0)
+ goto retry1;
LIN_SDT_PROBE1(futex, linux_sys_futex, copyin_error,
error);
LINUX_CTR1(sys_futex, "CMP_REQUEUE copyin failed %d",
error);
- futex_put(f2, NULL);
- futex_put(f, NULL);
-
LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
return (error);
}
@@ -888,50 +955,45 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
args->uaddr, args->val, args->uaddr2, args->val3,
args->timeout);
- error = futex_get(args->uaddr, NULL, &f, flags);
+retry2:
+ error = futex_get(args->uaddr, NULL, &f, flags | FUTEX_DONTLOCK);
if (error) {
LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
return (error);
}
if (args->uaddr != args->uaddr2)
- error = futex_get(args->uaddr2, NULL, &f2, flags);
+ error = futex_get(args->uaddr2, NULL, &f2,
+ flags | FUTEX_DONTLOCK);
if (error) {
futex_put(f, NULL);
LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
return (error);
}
+ futex_lock(f);
+ futex_lock(f2);
/*
* This function returns positive number as results and
* negative as errors
*/
+ save = vm_fault_disable_pagefaults();
op_ret = futex_atomic_op(td, args->val3, args->uaddr2);
+ vm_fault_enable_pagefaults(save);
LINUX_CTR2(sys_futex, "WAKE_OP atomic_op uaddr %p ret 0x%x",
args->uaddr, op_ret);
if (op_ret < 0) {
- /* XXX: We don't handle the EFAULT yet. */
- if (op_ret != -EFAULT) {
- if (f2 != NULL)
- futex_put(f2, NULL);
- futex_put(f, NULL);
-
- LIN_SDT_PROBE1(futex, linux_sys_futex, return,
- -op_ret);
- return (-op_ret);
- } else {
- LIN_SDT_PROBE0(futex, linux_sys_futex,
- unhandled_efault);
- }
if (f2 != NULL)
futex_put(f2, NULL);
futex_put(f, NULL);
-
- LIN_SDT_PROBE1(futex, linux_sys_futex, return, EFAULT);
- return (EFAULT);
+ error = copyin(args->uaddr2, &val, sizeof(val));
+ if (error == 0)
+ goto retry2;
+ LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
+ return (error);
}
ret = futex_wake(f, args->val, args->val3);
@@ -996,7 +1058,6 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
return (ENOSYS);
case LINUX_FUTEX_REQUEUE:
-
/*
* Glibc does not use this operation since version 2.3.3,
* as it is racy and replaced by FUTEX_CMP_REQUEUE operation.
diff --git a/sys/compat/linux/linux_socket.c b/sys/compat/linux/linux_socket.c
index 105aec5..6bc4cd8 100644
--- a/sys/compat/linux/linux_socket.c
+++ b/sys/compat/linux/linux_socket.c
@@ -462,12 +462,16 @@ bsd_to_linux_sockaddr(struct sockaddr *arg)
{
struct sockaddr sa;
size_t sa_len = sizeof(struct sockaddr);
- int error;
+ int error, bdom;
if ((error = copyin(arg, &sa, sa_len)))
return (error);
- *(u_short *)&sa = sa.sa_family;
+ bdom = bsd_to_linux_domain(sa.sa_family);
+ if (bdom == -1)
+ return (EAFNOSUPPORT);
+
+ *(u_short *)&sa = bdom;
return (copyout(&sa, arg, sa_len));
}
@@ -476,12 +480,16 @@ linux_to_bsd_sockaddr(struct sockaddr *arg, int len)
{
struct sockaddr sa;
size_t sa_len = sizeof(struct sockaddr);
- int error;
+ int error, bdom;
if ((error = copyin(arg, &sa, sa_len)))
return (error);
- sa.sa_family = *(sa_family_t *)&sa;
+ bdom = linux_to_bsd_domain(*(sa_family_t *)&sa);
+ if (bdom == -1)
+ return (EAFNOSUPPORT);
+
+ sa.sa_family = bdom;
sa.sa_len = len;
return (copyout(&sa, arg, sa_len));
}
@@ -1608,10 +1616,10 @@ linux_getsockopt(struct thread *td, struct linux_getsockopt_args *args)
} */ bsd_args;
l_timeval linux_tv;
struct timeval tv;
- socklen_t tv_len, xulen;
+ socklen_t tv_len, xulen, len;
struct xucred xu;
struct l_ucred lxu;
- int error, name;
+ int error, name, newval;
bsd_args.s = args->s;
bsd_args.level = linux_to_bsd_sockopt_level(args->level);
@@ -1650,6 +1658,15 @@ linux_getsockopt(struct thread *td, struct linux_getsockopt_args *args)
return (copyout(&lxu, PTRIN(args->optval), sizeof(lxu)));
/* NOTREACHED */
break;
+ case SO_ERROR:
+ len = sizeof(newval);
+ error = kern_getsockopt(td, args->s, bsd_args.level,
+ name, &newval, UIO_SYSSPACE, &len);
+ if (error)
+ return (error);
+ newval = -SV_ABI_ERRNO(td->td_proc, newval);
+ return (copyout(&newval, PTRIN(args->optval), len));
+ /* NOTREACHED */
default:
break;
}
diff --git a/sys/compat/linuxkpi/common/include/asm/atomic-long.h b/sys/compat/linuxkpi/common/include/asm/atomic-long.h
index 0cf91c9..6fbdc5e 100644
--- a/sys/compat/linuxkpi/common/include/asm/atomic-long.h
+++ b/sys/compat/linuxkpi/common/include/asm/atomic-long.h
@@ -41,6 +41,7 @@ typedef struct {
#define atomic_long_add(i, v) atomic_long_add_return((i), (v))
#define atomic_long_inc_return(v) atomic_long_add_return(1, (v))
+#define atomic_long_inc_not_zero(v) atomic_long_add_unless((v), 1, 0)
static inline long
atomic_long_add_return(long i, atomic_long_t *v)
@@ -72,6 +73,21 @@ atomic_long_dec(atomic_long_t *v)
return atomic_fetchadd_long(&v->counter, -1) - 1;
}
+static inline int
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+ long c;
+
+ for (;;) {
+ c = atomic_long_read(v);
+ if (unlikely(c == u))
+ break;
+ if (likely(atomic_cmpset_long(&v->counter, c, c + a)))
+ break;
+ }
+ return (c != u);
+}
+
static inline long
atomic_long_dec_and_test(atomic_long_t *v)
{
diff --git a/sys/compat/linuxkpi/common/include/asm/uaccess.h b/sys/compat/linuxkpi/common/include/asm/uaccess.h
index f3e743f..8f0f5eb 100644
--- a/sys/compat/linuxkpi/common/include/asm/uaccess.h
+++ b/sys/compat/linuxkpi/common/include/asm/uaccess.h
@@ -40,6 +40,7 @@ copy_to_user(void *to, const void *from, unsigned long n)
return n;
return 0;
}
+#define __copy_to_user(...) copy_to_user(__VA_ARGS__)
static inline long
copy_from_user(void *to, const void *from, unsigned long n)
@@ -48,5 +49,7 @@ copy_from_user(void *to, const void *from, unsigned long n)
return n;
return 0;
}
+#define __copy_from_user(...) copy_from_user(__VA_ARGS__)
+#define __copy_in_user(...) copy_from_user(__VA_ARGS__)
#endif /* _ASM_UACCESS_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/bitops.h b/sys/compat/linuxkpi/common/include/linux/bitops.h
index b12050a..9e1fa2b 100644
--- a/sys/compat/linuxkpi/common/include/linux/bitops.h
+++ b/sys/compat/linuxkpi/common/include/linux/bitops.h
@@ -46,7 +46,7 @@
#define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG)
#define BIT_MASK(nr) (1UL << ((nr) & (BITS_PER_LONG - 1)))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define GENMASK(lo, hi) (((2UL << ((hi) - (lo))) - 1UL) << (lo))
+#define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
#define BITS_PER_BYTE 8
static inline int
@@ -73,6 +73,12 @@ __flsl(long mask)
return (flsl(mask) - 1);
}
+static inline uint32_t
+ror32(uint32_t word, unsigned int shift)
+{
+
+ return ((word >> shift) | (word << (32 - shift)));
+}
#define ffz(mask) __ffs(~(mask))
@@ -87,7 +93,7 @@ static inline int get_count_order(unsigned int count)
}
static inline unsigned long
-find_first_bit(unsigned long *addr, unsigned long size)
+find_first_bit(const unsigned long *addr, unsigned long size)
{
long mask;
int bit;
@@ -109,7 +115,7 @@ find_first_bit(unsigned long *addr, unsigned long size)
}
static inline unsigned long
-find_first_zero_bit(unsigned long *addr, unsigned long size)
+find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
long mask;
int bit;
@@ -131,7 +137,7 @@ find_first_zero_bit(unsigned long *addr, unsigned long size)
}
static inline unsigned long
-find_last_bit(unsigned long *addr, unsigned long size)
+find_last_bit(const unsigned long *addr, unsigned long size)
{
long mask;
int offs;
@@ -157,7 +163,7 @@ find_last_bit(unsigned long *addr, unsigned long size)
}
static inline unsigned long
-find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset)
+find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
{
long mask;
int offs;
@@ -196,7 +202,7 @@ find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset)
}
static inline unsigned long
-find_next_zero_bit(unsigned long *addr, unsigned long size,
+find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
long mask;
@@ -300,23 +306,23 @@ bitmap_empty(unsigned long *addr, int size)
}
#define __set_bit(i, a) \
- atomic_set_long(&((volatile long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+ atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
#define set_bit(i, a) \
- atomic_set_long(&((volatile long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+ atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
#define __clear_bit(i, a) \
- atomic_clear_long(&((volatile long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+ atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
#define clear_bit(i, a) \
- atomic_clear_long(&((volatile long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+ atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
#define test_bit(i, a) \
- !!(atomic_load_acq_long(&((volatile long *)(a))[BIT_WORD(i)]) & \
+ !!(atomic_load_acq_long(&((volatile unsigned long *)(a))[BIT_WORD(i)]) & \
BIT_MASK(i))
-static inline long
-test_and_clear_bit(long bit, long *var)
+static inline int
+test_and_clear_bit(long bit, volatile unsigned long *var)
{
long val;
@@ -324,14 +330,14 @@ test_and_clear_bit(long bit, long *var)
bit %= BITS_PER_LONG;
bit = (1UL << bit);
do {
- val = *(volatile long *)var;
+ val = *var;
} while (atomic_cmpset_long(var, val, val & ~bit) == 0);
return !!(val & bit);
}
-static inline long
-test_and_set_bit(long bit, long *var)
+static inline int
+test_and_set_bit(long bit, volatile unsigned long *var)
{
long val;
@@ -339,7 +345,7 @@ test_and_set_bit(long bit, long *var)
bit %= BITS_PER_LONG;
bit = (1UL << bit);
do {
- val = *(volatile long *)var;
+ val = *var;
} while (atomic_cmpset_long(var, val, val | bit) == 0);
return !!(val & bit);
@@ -393,7 +399,8 @@ enum {
REG_OP_RELEASE,
};
-static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
+static inline int
+__reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
{
int nbits_reg;
int index;
diff --git a/sys/compat/linuxkpi/common/include/linux/cdev.h b/sys/compat/linuxkpi/common/include/linux/cdev.h
index 84b7b70..e60ee15 100644
--- a/sys/compat/linuxkpi/common/include/linux/cdev.h
+++ b/sys/compat/linuxkpi/common/include/linux/cdev.h
@@ -91,6 +91,18 @@ cdev_add(struct linux_cdev *cdev, dev_t dev, unsigned count)
return (0);
}
+static inline int
+cdev_add_ext(struct linux_cdev *cdev, dev_t dev, uid_t uid, gid_t gid, int mode)
+{
+ cdev->cdev = make_dev(&linuxcdevsw, MINOR(dev), uid, gid, mode,
+ "%s/%d", kobject_name(&cdev->kobj), MINOR(dev));
+ cdev->dev = dev;
+ cdev->cdev->si_drv1 = cdev;
+
+ kobject_get(cdev->kobj.parent);
+ return (0);
+}
+
static inline void
cdev_del(struct linux_cdev *cdev)
{
diff --git a/sys/compat/linuxkpi/common/include/linux/completion.h b/sys/compat/linuxkpi/common/include/linux/completion.h
index 92ccc61..73c1a99 100644
--- a/sys/compat/linuxkpi/common/include/linux/completion.h
+++ b/sys/compat/linuxkpi/common/include/linux/completion.h
@@ -32,6 +32,7 @@
#define _LINUX_COMPLETION_H_
#include <linux/errno.h>
+#include <linux/wait.h>
struct completion {
unsigned int done;
diff --git a/sys/compat/linuxkpi/common/include/linux/gfp.h b/sys/compat/linuxkpi/common/include/linux/gfp.h
index d0fb0ac..374ed79 100644
--- a/sys/compat/linuxkpi/common/include/linux/gfp.h
+++ b/sys/compat/linuxkpi/common/include/linux/gfp.h
@@ -46,6 +46,14 @@
#define __GFP_NOWARN 0
#define __GFP_HIGHMEM 0
#define __GFP_ZERO M_ZERO
+#define __GFP_NORETRY 0
+#define __GFP_RECLAIM 0
+#define __GFP_RECLAIMABLE 0
+
+#define __GFP_IO 0
+#define __GFP_NO_KSWAPD 0
+#define __GFP_WAIT M_WAITOK
+#define __GFP_DMA32 0
#define GFP_NOWAIT M_NOWAIT
#define GFP_ATOMIC (M_NOWAIT | M_USE_RESERVE)
@@ -55,6 +63,8 @@
#define GFP_HIGHUSER_MOVABLE M_WAITOK
#define GFP_IOFS M_NOWAIT
#define GFP_NOIO M_NOWAIT
+#define GFP_DMA32 0
+#define GFP_TEMPORARY 0
static inline void *
page_address(struct page *page)
@@ -147,4 +157,7 @@ static inline uintptr_t __get_free_pages(gfp_t gfp_mask, unsigned int order)
#define kmalloc_node(chunk, mask, node) kmalloc(chunk, mask)
+#define SetPageReserved(page) do { } while (0) /* NOP */
+#define ClearPageReserved(page) do { } while (0) /* NOP */
+
#endif /* _LINUX_GFP_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/io.h b/sys/compat/linuxkpi/common/include/linux/io.h
index d4376ef..f6c9405 100644
--- a/sys/compat/linuxkpi/common/include/linux/io.h
+++ b/sys/compat/linuxkpi/common/include/linux/io.h
@@ -172,6 +172,14 @@ readl(const volatile void *addr)
}
#if defined(__i386__) || defined(__amd64__)
+static inline void
+_outb(u_char data, u_int port)
+{
+ __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
+}
+#endif
+
+#if defined(__i386__) || defined(__amd64__)
void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
#else
#define _ioremap_attr(...) NULL
diff --git a/sys/compat/linuxkpi/common/include/linux/kernel.h b/sys/compat/linuxkpi/common/include/linux/kernel.h
index 9f280ce..b813bd4 100644
--- a/sys/compat/linuxkpi/common/include/linux/kernel.h
+++ b/sys/compat/linuxkpi/common/include/linux/kernel.h
@@ -52,6 +52,8 @@
#include <linux/log2.h>
#include <asm/byteorder.h>
+#include <machine/stdarg.h>
+
#define KERN_CONT ""
#define KERN_EMERG "<0>"
#define KERN_ALERT "<1>"
@@ -124,7 +126,37 @@
#define DIV_ROUND_UP_ULL(x, n) DIV_ROUND_UP((unsigned long long)(x), (n))
#define FIELD_SIZEOF(t, f) sizeof(((t *)0)->f)
-#define printk(X...) printf(X)
+#define printk(...) printf(__VA_ARGS__)
+#define vprintk(f, a) vprintf(f, a)
+
+struct va_format {
+ const char *fmt;
+ va_list *va;
+};
+
+static inline int
+vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+ ssize_t ssize = size;
+ int i;
+
+ i = vsnprintf(buf, size, fmt, args);
+
+ return ((i >= ssize) ? (ssize - 1) : i);
+}
+
+static inline int
+scnprintf(char *buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vscnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ return (i);
+}
/*
* The "pr_debug()" and "pr_devel()" macros should produce zero code
diff --git a/sys/compat/linuxkpi/common/include/linux/kref.h b/sys/compat/linuxkpi/common/include/linux/kref.h
index 584de8d..80fd271 100644
--- a/sys/compat/linuxkpi/common/include/linux/kref.h
+++ b/sys/compat/linuxkpi/common/include/linux/kref.h
@@ -36,6 +36,9 @@
#include <sys/refcount.h>
#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+
#include <asm/atomic.h>
struct kref {
@@ -88,4 +91,20 @@ kref_get_unless_zero(struct kref *kref)
return atomic_add_unless(&kref->refcount, 1, 0);
}
+static inline int kref_put_mutex(struct kref *kref,
+ void (*release)(struct kref *kref), struct mutex *lock)
+{
+ WARN_ON(release == NULL);
+ if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
+ mutex_lock(lock);
+ if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
+ mutex_unlock(lock);
+ return 0;
+ }
+ release(kref);
+ return 1;
+ }
+ return 0;
+}
+
#endif /* _LINUX_KREF_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/ktime.h b/sys/compat/linuxkpi/common/include/linux/ktime.h
index c9fa81c..7c6c40f 100644
--- a/sys/compat/linuxkpi/common/include/linux/ktime.h
+++ b/sys/compat/linuxkpi/common/include/linux/ktime.h
@@ -51,6 +51,24 @@ ktime_to_ns(ktime_t kt)
return kt.tv64;
}
+static inline int64_t
+ktime_divns(const ktime_t kt, int64_t div)
+{
+ return kt.tv64 / div;
+}
+
+static inline int64_t
+ktime_to_us(ktime_t kt)
+{
+ return ktime_divns(kt, NSEC_PER_USEC);
+}
+
+static inline int64_t
+ktime_to_ms(ktime_t kt)
+{
+ return ktime_divns(kt, NSEC_PER_MSEC);
+}
+
static inline struct timeval
ktime_to_timeval(ktime_t kt)
{
@@ -89,6 +107,20 @@ ktime_sub(ktime_t lhs, ktime_t rhs)
return (lhs);
}
+static inline int64_t
+ktime_us_delta(ktime_t later, ktime_t earlier)
+{
+ ktime_t diff = ktime_sub(later, earlier);
+ return ktime_to_us(diff);
+}
+
+static inline int64_t
+ktime_ms_delta(ktime_t later, ktime_t earlier)
+{
+ ktime_t diff = ktime_sub(later, earlier);
+ return ktime_to_ms(diff);
+}
+
static inline ktime_t
ktime_add(ktime_t lhs, ktime_t rhs)
{
diff --git a/sys/compat/linuxkpi/common/include/linux/list.h b/sys/compat/linuxkpi/common/include/linux/list.h
index 1357555..fff5243 100644
--- a/sys/compat/linuxkpi/common/include/linux/list.h
+++ b/sys/compat/linuxkpi/common/include/linux/list.h
@@ -109,6 +109,13 @@ list_replace(struct list_head *old, struct list_head *new)
}
static inline void
+list_replace_init(struct list_head *old, struct list_head *new)
+{
+ list_replace(old, new);
+ INIT_LIST_HEAD(old);
+}
+
+static inline void
linux_list_add(struct list_head *new, struct list_head *prev,
struct list_head *next)
{
@@ -132,9 +139,18 @@ list_del_init(struct list_head *entry)
#define list_first_entry(ptr, type, member) \
list_entry((ptr)->next, type, member)
+#define list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+#define list_first_entry_or_null(ptr, type, member) \
+ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+
#define list_next_entry(ptr, member) \
list_entry(((ptr)->member.next), typeof(*(ptr)), member)
+#define list_prev_entry(ptr, member) \
+ list_entry(((ptr)->member.prev), typeof(*(ptr)), member)
+
#define list_for_each(p, head) \
for (p = (head)->next; p != (head); p = (p)->next)
@@ -436,4 +452,7 @@ static inline int list_is_last(const struct list_head *list,
(pos) && ({ n = (pos)->member.next; 1; }); \
pos = hlist_entry_safe(n, typeof(*(pos)), member))
+extern void list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
+ struct list_head *a, struct list_head *b));
+
#endif /* _LINUX_LIST_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/time.h b/sys/compat/linuxkpi/common/include/linux/time.h
index 27516a4..0836b57 100644
--- a/sys/compat/linuxkpi/common/include/linux/time.h
+++ b/sys/compat/linuxkpi/common/include/linux/time.h
@@ -29,6 +29,7 @@
#define _LINUX_TIME_H_
#define NSEC_PER_USEC 1000L
+#define NSEC_PER_MSEC 1000000L
#define NSEC_PER_SEC 1000000000L
#include <sys/time.h>
diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c b/sys/compat/linuxkpi/common/src/linux_compat.c
index 77b18a3..670ffc0 100644
--- a/sys/compat/linuxkpi/common/src/linux_compat.c
+++ b/sys/compat/linuxkpi/common/src/linux_compat.c
@@ -72,6 +72,7 @@ __FBSDID("$FreeBSD$");
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <vm/vm_pager.h>
@@ -1358,6 +1359,47 @@ unregister_inetaddr_notifier(struct notifier_block *nb)
return (0);
}
+struct list_sort_thunk {
+ int (*cmp)(void *, struct list_head *, struct list_head *);
+ void *priv;
+};
+
+static inline int
+linux_le_cmp(void *priv, const void *d1, const void *d2)
+{
+ struct list_head *le1, *le2;
+ struct list_sort_thunk *thunk;
+
+ thunk = priv;
+ le1 = *(__DECONST(struct list_head **, d1));
+ le2 = *(__DECONST(struct list_head **, d2));
+ return ((thunk->cmp)(thunk->priv, le1, le2));
+}
+
+void
+list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
+ struct list_head *a, struct list_head *b))
+{
+ struct list_sort_thunk thunk;
+ struct list_head **ar, *le;
+ size_t count, i;
+
+ count = 0;
+ list_for_each(le, head)
+ count++;
+ ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK);
+ i = 0;
+ list_for_each(le, head)
+ ar[i++] = le;
+ thunk.cmp = cmp;
+ thunk.priv = priv;
+ qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp);
+ INIT_LIST_HEAD(head);
+ for (i = 0; i < count; i++)
+ list_add_tail(ar[i], head);
+ free(ar, M_KMALLOC);
+}
+
void
linux_irq_handler(void *ent)
{
diff --git a/sys/compat/linuxkpi/common/src/linux_pci.c b/sys/compat/linuxkpi/common/src/linux_pci.c
index e22d11e..a712649 100644
--- a/sys/compat/linuxkpi/common/src/linux_pci.c
+++ b/sys/compat/linuxkpi/common/src/linux_pci.c
@@ -139,7 +139,7 @@ linux_pci_attach(device_t dev)
if (rle)
pdev->dev.irq = rle->start;
else
- pdev->dev.irq = 0;
+ pdev->dev.irq = 255;
pdev->irq = pdev->dev.irq;
mtx_unlock(&Giant);
spin_lock(&pci_lock);
diff --git a/sys/compat/ndis/subr_ntoskrnl.c b/sys/compat/ndis/subr_ntoskrnl.c
index 224c032..cfa9727 100644
--- a/sys/compat/ndis/subr_ntoskrnl.c
+++ b/sys/compat/ndis/subr_ntoskrnl.c
@@ -3188,17 +3188,14 @@ atol(str)
static int
rand(void)
{
- struct timeval tv;
- microtime(&tv);
- srandom(tv.tv_usec);
- return ((int)random());
+ return (random());
}
static void
-srand(seed)
- unsigned int seed;
+srand(unsigned int seed)
{
+
srandom(seed);
}
diff --git a/sys/conf/files b/sys/conf/files
index 0968026..428b898 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1125,6 +1125,7 @@ dev/bge/if_bge.c optional bge
dev/bhnd/bhnd.c optional bhndbus | bhnd
dev/bhnd/bhnd_subr.c optional bhndbus | bhnd
dev/bhnd/bhnd_bus_if.m optional bhndbus | bhnd
+dev/bhnd/bhndb/bhnd_bhndb.c optional bhndbus | bhndb
dev/bhnd/bhndb/bhndb.c optional bhndbus | bhndb
dev/bhnd/bhndb/bhndb_bus_if.m optional bhndbus | bhndb
dev/bhnd/bhndb/bhndb_hwdata.c optional bhndbus | bhndb
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index c2dc1d9..ef356ea 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -277,6 +277,7 @@ dev/hyperv/vmbus/hv_hv.c optional hyperv
dev/hyperv/vmbus/hv_et.c optional hyperv
dev/hyperv/vmbus/hv_ring_buffer.c optional hyperv
dev/hyperv/vmbus/hv_vmbus_drv_freebsd.c optional hyperv
+dev/hyperv/vmbus/hyperv_busdma.c optional hyperv
dev/hyperv/vmbus/amd64/hv_vector.S optional hyperv
dev/nfe/if_nfe.c optional nfe pci
dev/ntb/if_ntb/if_ntb.c optional if_ntb
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
index 436eded..d0cf1ed 100644
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -82,7 +82,7 @@ dev/vnic/thunder_bgx_fdt.c optional vnic fdt
dev/vnic/thunder_bgx.c optional vnic pci
dev/vnic/thunder_mdio_fdt.c optional vnic fdt
dev/vnic/thunder_mdio.c optional vnic
-dev/vnic/lmac_if.m optional vnic
+dev/vnic/lmac_if.m optional inet | inet6 | vnic
kern/kern_clocksource.c standard
kern/msi_if.m optional intrng
kern/pic_if.m optional intrng
diff --git a/sys/conf/files.i386 b/sys/conf/files.i386
index eb8585f..a64a916 100644
--- a/sys/conf/files.i386
+++ b/sys/conf/files.i386
@@ -252,6 +252,7 @@ dev/hyperv/vmbus/hv_hv.c optional hyperv
dev/hyperv/vmbus/hv_et.c optional hyperv
dev/hyperv/vmbus/hv_ring_buffer.c optional hyperv
dev/hyperv/vmbus/hv_vmbus_drv_freebsd.c optional hyperv
+dev/hyperv/vmbus/hyperv_busdma.c optional hyperv
dev/hyperv/vmbus/i386/hv_vector.S optional hyperv
dev/ichwd/ichwd.c optional ichwd
dev/if_ndis/if_ndis.c optional ndis
diff --git a/sys/conf/kern.post.mk b/sys/conf/kern.post.mk
index 3286c66..1160dbc 100644
--- a/sys/conf/kern.post.mk
+++ b/sys/conf/kern.post.mk
@@ -247,6 +247,14 @@ beforebuild: kernel-depend
${__obj}: ${OBJS_DEPEND_GUESS}
.endif
${__obj}: ${OBJS_DEPEND_GUESS.${__obj}}
+.elif defined(_meta_filemon)
+# For meta mode we still need to know which file to depend on to avoid
+# ambiguous suffix transformation rules from .PATH. Meta mode does not
+# use .depend files. We really only need source files, not headers.
+.if ${SYSTEM_OBJS:M${__obj}}
+${__obj}: ${OBJS_DEPEND_GUESS:N*.h}
+.endif
+${__obj}: ${OBJS_DEPEND_GUESS.${__obj}:N*.h}
.endif
.endfor
diff --git a/sys/contrib/ipfilter/netinet/ip_frag.c b/sys/contrib/ipfilter/netinet/ip_frag.c
index e0cc5f3..2826d67 100644
--- a/sys/contrib/ipfilter/netinet/ip_frag.c
+++ b/sys/contrib/ipfilter/netinet/ip_frag.c
@@ -112,7 +112,7 @@ static void ipf_frag_free __P((ipf_frag_softc_t *, ipfr_t *));
static frentry_t ipfr_block;
-const ipftuneable_t ipf_tuneables[] = {
+static ipftuneable_t ipf_frag_tuneables[] = {
{ { (void *)offsetof(ipf_frag_softc_t, ipfr_size) },
"frag_size", 1, 0x7fffffff,
stsizeof(ipf_frag_softc_t, ipfr_size),
@@ -189,6 +189,18 @@ ipf_frag_soft_create(softc)
RWLOCK_INIT(&softf->ipfr_frag, "ipf fragment rwlock");
RWLOCK_INIT(&softf->ipfr_natfrag, "ipf NAT fragment rwlock");
+ softf->ipf_frag_tune = ipf_tune_array_copy(softf,
+ sizeof(ipf_frag_tuneables),
+ ipf_frag_tuneables);
+ if (softf->ipf_frag_tune == NULL) {
+ ipf_frag_soft_destroy(softc, softf);
+ return NULL;
+ }
+ if (ipf_tune_array_link(softc, softf->ipf_frag_tune) == -1) {
+ ipf_frag_soft_destroy(softc, softf);
+ return NULL;
+ }
+
softf->ipfr_size = IPFT_SIZE;
softf->ipfr_ttl = IPF_TTLVAL(60);
softf->ipfr_lock = 1;
@@ -219,6 +231,12 @@ ipf_frag_soft_destroy(softc, arg)
RW_DESTROY(&softf->ipfr_frag);
RW_DESTROY(&softf->ipfr_natfrag);
+ if (softf->ipf_frag_tune != NULL) {
+ ipf_tune_array_unlink(softc, softf->ipf_frag_tune);
+ KFREES(softf->ipf_frag_tune, sizeof(ipf_frag_tuneables));
+ softf->ipf_frag_tune = NULL;
+ }
+
KFREE(softf);
}
diff --git a/sys/contrib/ipfilter/netinet/ip_frag.h b/sys/contrib/ipfilter/netinet/ip_frag.h
index baa767d..08f45ff 100644
--- a/sys/contrib/ipfilter/netinet/ip_frag.h
+++ b/sys/contrib/ipfilter/netinet/ip_frag.h
@@ -78,6 +78,7 @@ typedef struct ipf_frag_softc_s {
int ipfr_ttl;
int ipfr_lock;
int ipfr_inited;
+ ipftuneable_t *ipf_frag_tune;
ipfr_t *ipfr_list;
ipfr_t **ipfr_tail;
ipfr_t *ipfr_natlist;
diff --git a/sys/dev/acpi_support/acpi_asus_wmi.c b/sys/dev/acpi_support/acpi_asus_wmi.c
index d77cc76..7538d3d 100644
--- a/sys/dev/acpi_support/acpi_asus_wmi.c
+++ b/sys/dev/acpi_support/acpi_asus_wmi.c
@@ -498,7 +498,7 @@ acpi_asus_wmi_sysctl_get(struct acpi_asus_wmi_softc *sc, int dev_id)
switch(dev_id) {
case ASUS_WMI_DEVID_THERMAL_CTRL:
- val = (val - 2732 + 5) / 10;
+ val = (val - 2731 + 5) / 10;
break;
case ASUS_WMI_DEVID_PROCESSOR_STATE:
case ASUS_WMI_DEVID_FAN_CTRL:
diff --git a/sys/dev/acpi_support/acpi_ibm.c b/sys/dev/acpi_support/acpi_ibm.c
index bb9aac6..0788f05 100644
--- a/sys/dev/acpi_support/acpi_ibm.c
+++ b/sys/dev/acpi_support/acpi_ibm.c
@@ -941,7 +941,7 @@ acpi_ibm_thermal_sysctl(SYSCTL_HANDLER_ARGS)
temp[i] = -1;
else if (sc->thermal_updt_supported)
/* Temperature is reported in tenth of Kelvin */
- temp[i] = (temp[i] - 2732 + 5) / 10;
+ temp[i] = (temp[i] - 2731 + 5) / 10;
}
error = sysctl_handle_opaque(oidp, &temp, 8*sizeof(int), req);
diff --git a/sys/dev/acpi_support/atk0110.c b/sys/dev/acpi_support/atk0110.c
index 3a49988..e79a0e0 100644
--- a/sys/dev/acpi_support/atk0110.c
+++ b/sys/dev/acpi_support/atk0110.c
@@ -344,9 +344,9 @@ aibs_sysctl(SYSCTL_HANDLER_ARGS)
case AIBS_VOLT:
break;
case AIBS_TEMP:
- v += 2732;
- l += 2732;
- h += 2732;
+ v += 2731;
+ l += 2731;
+ h += 2731;
break;
case AIBS_FAN:
break;
diff --git a/sys/dev/acpica/acpi_pcib_pci.c b/sys/dev/acpica/acpi_pcib_pci.c
index a041bca..8f44642 100644
--- a/sys/dev/acpica/acpi_pcib_pci.c
+++ b/sys/dev/acpica/acpi_pcib_pci.c
@@ -66,6 +66,7 @@ struct acpi_pcib_lookup_info {
static int acpi_pcib_pci_probe(device_t bus);
static int acpi_pcib_pci_attach(device_t bus);
+static int acpi_pcib_pci_detach(device_t bus);
static int acpi_pcib_read_ivar(device_t dev, device_t child,
int which, uintptr_t *result);
static int acpi_pcib_pci_route_interrupt(device_t pcib,
@@ -75,6 +76,7 @@ static device_method_t acpi_pcib_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, acpi_pcib_pci_probe),
DEVMETHOD(device_attach, acpi_pcib_pci_attach),
+ DEVMETHOD(device_detach, acpi_pcib_pci_detach),
/* Bus interface */
DEVMETHOD(bus_read_ivar, acpi_pcib_read_ivar),
@@ -127,6 +129,21 @@ acpi_pcib_pci_attach(device_t dev)
}
static int
+acpi_pcib_pci_detach(device_t dev)
+{
+ struct acpi_pcib_softc *sc;
+ int error;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = device_get_softc(dev);
+ error = pcib_detach(dev);
+ if (error == 0)
+ AcpiOsFree(sc->ap_prt.Pointer);
+ return (error);
+}
+
+static int
acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
{
struct acpi_pcib_softc *sc = device_get_softc(dev);
diff --git a/sys/dev/acpica/acpi_thermal.c b/sys/dev/acpica/acpi_thermal.c
index dab1ac9..5b01de5 100644
--- a/sys/dev/acpica/acpi_thermal.c
+++ b/sys/dev/acpica/acpi_thermal.c
@@ -53,7 +53,7 @@ __FBSDID("$FreeBSD$");
#define _COMPONENT ACPI_THERMAL
ACPI_MODULE_NAME("THERMAL")
-#define TZ_ZEROC 2732
+#define TZ_ZEROC 2731
#define TZ_KELVTOC(x) (((x) - TZ_ZEROC) / 10), abs(((x) - TZ_ZEROC) % 10)
#define TZ_NOTIFY_TEMPERATURE 0x80 /* Temperature changed. */
diff --git a/sys/dev/ahci/ahci.c b/sys/dev/ahci/ahci.c
index d1d3d6b..f809937 100644
--- a/sys/dev/ahci/ahci.c
+++ b/sys/dev/ahci/ahci.c
@@ -2411,11 +2411,10 @@ ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb
fis[11] = ccb->ataio.cmd.features_exp;
if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) {
fis[12] = tag << 3;
- fis[13] = 0;
} else {
fis[12] = ccb->ataio.cmd.sector_count;
- fis[13] = ccb->ataio.cmd.sector_count_exp;
}
+ fis[13] = ccb->ataio.cmd.sector_count_exp;
fis[15] = ATA_A_4BIT;
} else {
fis[15] = ccb->ataio.cmd.control;
diff --git a/sys/dev/amdtemp/amdtemp.c b/sys/dev/amdtemp/amdtemp.c
index aa46661..1e658e6 100644
--- a/sys/dev/amdtemp/amdtemp.c
+++ b/sys/dev/amdtemp/amdtemp.c
@@ -505,7 +505,7 @@ amdtemp_sysctl(SYSCTL_HANDLER_ARGS)
return (error);
}
-#define AMDTEMP_ZERO_C_TO_K 2732
+#define AMDTEMP_ZERO_C_TO_K 2731
static int32_t
amdtemp_gettemp0f(device_t dev, amdsensor_t sensor)
diff --git a/sys/dev/ath/ath_hal/ah_regdomain.c b/sys/dev/ath/ath_hal/ah_regdomain.c
index 4f374c4..d388e12 100644
--- a/sys/dev/ath/ath_hal/ah_regdomain.c
+++ b/sys/dev/ath/ath_hal/ah_regdomain.c
@@ -35,7 +35,7 @@
*/
/* used throughout this file... */
-#define N(a) (sizeof (a) / sizeof (a[0]))
+#define N(a) nitems(a)
#define HAL_MODE_11A_TURBO HAL_MODE_108A
#define HAL_MODE_11G_TURBO HAL_MODE_108G
@@ -99,30 +99,32 @@
#include "ah_regdomain/ah_rd_domains.h"
static const struct cmode modes[] = {
- { HAL_MODE_TURBO, IEEE80211_CHAN_ST },
- { HAL_MODE_11A, IEEE80211_CHAN_A },
- { HAL_MODE_11B, IEEE80211_CHAN_B },
- { HAL_MODE_11G, IEEE80211_CHAN_G },
- { HAL_MODE_11G_TURBO, IEEE80211_CHAN_108G },
- { HAL_MODE_11A_TURBO, IEEE80211_CHAN_108A },
+ { HAL_MODE_TURBO, IEEE80211_CHAN_ST, &regDmn5GhzTurboFreq[0] },
+ { HAL_MODE_11A, IEEE80211_CHAN_A, &regDmn5GhzFreq[0] },
+ { HAL_MODE_11B, IEEE80211_CHAN_B, &regDmn2GhzFreq[0] },
+ { HAL_MODE_11G, IEEE80211_CHAN_G, &regDmn2Ghz11gFreq[0] },
+ { HAL_MODE_11G_TURBO, IEEE80211_CHAN_108G, &regDmn2Ghz11gTurboFreq[0] },
+ { HAL_MODE_11A_TURBO, IEEE80211_CHAN_108A, &regDmn5GhzTurboFreq[0] },
{ HAL_MODE_11A_QUARTER_RATE,
- IEEE80211_CHAN_A | IEEE80211_CHAN_QUARTER },
+ IEEE80211_CHAN_A | IEEE80211_CHAN_QUARTER, &regDmn5GhzFreq[0] },
{ HAL_MODE_11A_HALF_RATE,
- IEEE80211_CHAN_A | IEEE80211_CHAN_HALF },
+ IEEE80211_CHAN_A | IEEE80211_CHAN_HALF, &regDmn5GhzFreq[0] },
{ HAL_MODE_11G_QUARTER_RATE,
- IEEE80211_CHAN_G | IEEE80211_CHAN_QUARTER },
+ IEEE80211_CHAN_G | IEEE80211_CHAN_QUARTER, &regDmn2Ghz11gFreq[0] },
{ HAL_MODE_11G_HALF_RATE,
- IEEE80211_CHAN_G | IEEE80211_CHAN_HALF },
- { HAL_MODE_11NG_HT20, IEEE80211_CHAN_G | IEEE80211_CHAN_HT20 },
+ IEEE80211_CHAN_G | IEEE80211_CHAN_HALF, &regDmn2Ghz11gFreq[0] },
+ { HAL_MODE_11NG_HT20,
+ IEEE80211_CHAN_G | IEEE80211_CHAN_HT20, &regDmn2Ghz11gFreq[0] },
{ HAL_MODE_11NG_HT40PLUS,
- IEEE80211_CHAN_G | IEEE80211_CHAN_HT40U },
+ IEEE80211_CHAN_G | IEEE80211_CHAN_HT40U, &regDmn2Ghz11gFreq[0] },
{ HAL_MODE_11NG_HT40MINUS,
- IEEE80211_CHAN_G | IEEE80211_CHAN_HT40D },
- { HAL_MODE_11NA_HT20, IEEE80211_CHAN_A | IEEE80211_CHAN_HT20 },
+ IEEE80211_CHAN_G | IEEE80211_CHAN_HT40D, &regDmn2Ghz11gFreq[0] },
+ { HAL_MODE_11NA_HT20,
+ IEEE80211_CHAN_A | IEEE80211_CHAN_HT20, &regDmn5GhzFreq[0] },
{ HAL_MODE_11NA_HT40PLUS,
- IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U },
+ IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U, &regDmn5GhzFreq[0] },
{ HAL_MODE_11NA_HT40MINUS,
- IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D },
+ IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D, &regDmn5GhzFreq[0] },
};
static void ath_hal_update_dfsdomain(struct ath_hal *ah);
@@ -358,6 +360,235 @@ getregstate(struct ath_hal *ah, HAL_CTRY_CODE cc, HAL_REG_DOMAIN regDmn,
return HAL_OK;
}
+static uint64_t *
+getchannelBM(u_int mode, REG_DOMAIN *rd)
+{
+ switch (mode) {
+ case HAL_MODE_11B:
+ return (rd->chan11b);
+ case HAL_MODE_11G_QUARTER_RATE:
+ return (rd->chan11g_quarter);
+ case HAL_MODE_11G_HALF_RATE:
+ return (rd->chan11g_half);
+ case HAL_MODE_11G:
+ case HAL_MODE_11NG_HT20:
+ case HAL_MODE_11NG_HT40PLUS:
+ case HAL_MODE_11NG_HT40MINUS:
+ return (rd->chan11g);
+ case HAL_MODE_11G_TURBO:
+ return (rd->chan11g_turbo);
+ case HAL_MODE_11A_QUARTER_RATE:
+ return (rd->chan11a_quarter);
+ case HAL_MODE_11A_HALF_RATE:
+ return (rd->chan11a_half);
+ case HAL_MODE_11A:
+ case HAL_MODE_11NA_HT20:
+ case HAL_MODE_11NA_HT40PLUS:
+ case HAL_MODE_11NA_HT40MINUS:
+ return (rd->chan11a);
+ case HAL_MODE_TURBO:
+ return (rd->chan11a_turbo);
+ case HAL_MODE_11A_TURBO:
+ return (rd->chan11a_dyn_turbo);
+ default:
+ return (AH_NULL);
+ }
+}
+
+static void
+setchannelflags(struct ieee80211_channel *c, REG_DMN_FREQ_BAND *fband,
+ REG_DOMAIN *rd)
+{
+ if (fband->usePassScan & rd->pscan)
+ c->ic_flags |= IEEE80211_CHAN_PASSIVE;
+ if (fband->useDfs & rd->dfsMask)
+ c->ic_flags |= IEEE80211_CHAN_DFS;
+ if (IEEE80211_IS_CHAN_5GHZ(c) && (rd->flags & DISALLOW_ADHOC_11A))
+ c->ic_flags |= IEEE80211_CHAN_NOADHOC;
+ if (IEEE80211_IS_CHAN_TURBO(c) &&
+ (rd->flags & DISALLOW_ADHOC_11A_TURB))
+ c->ic_flags |= IEEE80211_CHAN_NOADHOC;
+ if (rd->flags & NO_HOSTAP)
+ c->ic_flags |= IEEE80211_CHAN_NOHOSTAP;
+ if (rd->flags & LIMIT_FRAME_4MS)
+ c->ic_flags |= IEEE80211_CHAN_4MSXMIT;
+ if (rd->flags & NEED_NFC)
+ c->ic_flags |= CHANNEL_NFCREQUIRED;
+}
+
+static int
+addchan(struct ath_hal *ah, struct ieee80211_channel chans[],
+ u_int maxchans, int *nchans, uint16_t freq, uint32_t flags,
+ REG_DMN_FREQ_BAND *fband, REG_DOMAIN *rd)
+{
+ struct ieee80211_channel *c;
+
+ if (*nchans >= maxchans)
+ return (HAL_ENOMEM);
+
+ c = &chans[(*nchans)++];
+ c->ic_freq = freq;
+ c->ic_flags = flags;
+ setchannelflags(c, fband, rd);
+ c->ic_maxregpower = fband->powerDfs;
+ ath_hal_getpowerlimits(ah, c);
+ c->ic_maxantgain = fband->antennaMax;
+
+ return (0);
+}
+
+static int
+copychan_prev(struct ath_hal *ah, struct ieee80211_channel chans[],
+ u_int maxchans, int *nchans, uint16_t freq)
+{
+ struct ieee80211_channel *c;
+
+ if (*nchans == 0)
+ return (HAL_EINVAL);
+
+ if (*nchans >= maxchans)
+ return (HAL_ENOMEM);
+
+ c = &chans[(*nchans)++];
+ c[0] = c[-1];
+ c->ic_freq = freq;
+ /* XXX is it needed here? */
+ ath_hal_getpowerlimits(ah, c);
+
+ return (0);
+}
+
+static int
+add_chanlist_band(struct ath_hal *ah, struct ieee80211_channel chans[],
+ int maxchans, int *nchans, uint16_t freq_lo, uint16_t freq_hi, int step,
+ uint32_t flags, REG_DMN_FREQ_BAND *fband, REG_DOMAIN *rd)
+{
+ uint16_t freq = freq_lo;
+ int error;
+
+ if (freq_hi < freq_lo)
+ return (0);
+
+ error = addchan(ah, chans, maxchans, nchans, freq, flags, fband, rd);
+ for (freq += step; freq <= freq_hi && error == 0; freq += step)
+ error = copychan_prev(ah, chans, maxchans, nchans, freq);
+
+ return (error);
+}
+
+static void
+adj_freq_ht40(u_int mode, int *low_adj, int *hi_adj, int *channelSep)
+{
+
+ *low_adj = *hi_adj = *channelSep = 0;
+ switch (mode) {
+ case HAL_MODE_11NA_HT40PLUS:
+ *channelSep = 40;
+ /* FALLTHROUGH */
+ case HAL_MODE_11NG_HT40PLUS:
+ *hi_adj = -20;
+ break;
+ case HAL_MODE_11NA_HT40MINUS:
+ *channelSep = 40;
+ /* FALLTHROUGH */
+ case HAL_MODE_11NG_HT40MINUS:
+ *low_adj = 20;
+ break;
+ }
+}
+
+static void
+add_chanlist_mode(struct ath_hal *ah, struct ieee80211_channel chans[],
+ u_int maxchans, int *nchans, const struct cmode *cm, REG_DOMAIN *rd,
+ HAL_BOOL enableExtendedChannels)
+{
+ uint64_t *channelBM;
+ uint16_t freq_lo, freq_hi;
+ int b, error, low_adj, hi_adj, channelSep;
+
+ if (!ath_hal_getChannelEdges(ah, cm->flags, &freq_lo, &freq_hi)) {
+ /* channel not supported by hardware, skip it */
+ HALDEBUG(ah, HAL_DEBUG_REGDOMAIN,
+ "%s: channels 0x%x not supported by hardware\n",
+ __func__, cm->flags);
+ return;
+ }
+
+ channelBM = getchannelBM(cm->mode, rd);
+ if (isChanBitMaskZero(channelBM))
+ return;
+
+ /*
+ * Setup special handling for HT40 channels; e.g.
+ * 5G HT40 channels require 40Mhz channel separation.
+ */
+ adj_freq_ht40(cm->mode, &low_adj, &hi_adj, &channelSep);
+
+ for (b = 0; b < 64*BMLEN; b++) {
+ REG_DMN_FREQ_BAND *fband;
+ uint16_t bfreq_lo, bfreq_hi;
+ int step;
+
+ if (!IS_BIT_SET(b, channelBM))
+ continue;
+ fband = &cm->freqs[b];
+
+ if ((fband->usePassScan & IS_ECM_CHAN) &&
+ !enableExtendedChannels) {
+ HALDEBUG(ah, HAL_DEBUG_REGDOMAIN,
+ "skip ecm channels\n");
+ continue;
+ }
+#if 0
+ if ((fband->useDfs & rd->dfsMask) &&
+ (cm->flags & IEEE80211_CHAN_HT40)) {
+ /* NB: DFS and HT40 don't mix */
+ HALDEBUG(ah, HAL_DEBUG_REGDOMAIN,
+ "skip HT40 chan, DFS required\n");
+ continue;
+ }
+#endif
+ bfreq_lo = MAX(fband->lowChannel + low_adj, freq_lo);
+ bfreq_hi = MIN(fband->highChannel + hi_adj, freq_hi);
+ if (fband->channelSep >= channelSep)
+ step = fband->channelSep;
+ else
+ step = roundup(channelSep, fband->channelSep);
+
+ error = add_chanlist_band(ah, chans, maxchans, nchans,
+ bfreq_lo, bfreq_hi, step, cm->flags, fband, rd);
+ if (error != 0) {
+ HALDEBUG(ah, HAL_DEBUG_REGDOMAIN,
+ "%s: too many channels for channel table\n",
+ __func__);
+ return;
+ }
+ }
+}
+
+static u_int
+getmodesmask(struct ath_hal *ah, REG_DOMAIN *rd5GHz, u_int modeSelect)
+{
+#define HAL_MODE_11A_ALL \
+ (HAL_MODE_11A | HAL_MODE_11A_TURBO | HAL_MODE_TURBO | \
+ HAL_MODE_11A_QUARTER_RATE | HAL_MODE_11A_HALF_RATE)
+ u_int modesMask;
+
+ /* get modes that HW is capable of */
+ modesMask = ath_hal_getWirelessModes(ah);
+ modesMask &= modeSelect;
+ /* optimize work below if no 11a channels */
+ if (isChanBitMaskZero(rd5GHz->chan11a) &&
+ (modesMask & HAL_MODE_11A_ALL)) {
+ HALDEBUG(ah, HAL_DEBUG_REGDOMAIN,
+ "%s: disallow all 11a\n", __func__);
+ modesMask &= ~HAL_MODE_11A_ALL;
+ }
+
+ return (modesMask);
+#undef HAL_MODE_11A_ALL
+}
+
/*
* Construct the channel list for the specified regulatory config.
*/
@@ -369,16 +600,9 @@ getchannels(struct ath_hal *ah,
COUNTRY_CODE_TO_ENUM_RD **pcountry,
REG_DOMAIN **prd2GHz, REG_DOMAIN **prd5GHz)
{
-#define CHANNEL_HALF_BW 10
-#define CHANNEL_QUARTER_BW 5
-#define HAL_MODE_11A_ALL \
- (HAL_MODE_11A | HAL_MODE_11A_TURBO | HAL_MODE_TURBO | \
- HAL_MODE_11A_QUARTER_RATE | HAL_MODE_11A_HALF_RATE)
REG_DOMAIN *rd5GHz, *rd2GHz;
- u_int modesAvail;
+ u_int modesMask;
const struct cmode *cm;
- struct ieee80211_channel *ic;
- int next, b;
HAL_STATUS status;
HALDEBUG(ah, HAL_DEBUG_REGDOMAIN, "%s: cc %u regDmn 0x%x mode 0x%x%s\n",
@@ -389,206 +613,43 @@ getchannels(struct ath_hal *ah,
if (status != HAL_OK)
return status;
- /* get modes that HW is capable of */
- modesAvail = ath_hal_getWirelessModes(ah);
- /* optimize work below if no 11a channels */
- if (isChanBitMaskZero(rd5GHz->chan11a) &&
- (modesAvail & HAL_MODE_11A_ALL)) {
- HALDEBUG(ah, HAL_DEBUG_REGDOMAIN,
- "%s: disallow all 11a\n", __func__);
- modesAvail &= ~HAL_MODE_11A_ALL;
- }
+ modesMask = getmodesmask(ah, rd5GHz, modeSelect);
+ /* XXX error? */
+ if (modesMask == 0)
+ goto done;
- next = 0;
- ic = &chans[0];
for (cm = modes; cm < &modes[N(modes)]; cm++) {
- uint16_t c, c_hi, c_lo;
- uint64_t *channelBM = AH_NULL;
- REG_DMN_FREQ_BAND *fband = AH_NULL,*freqs;
- int low_adj, hi_adj, channelSep, lastc;
- uint32_t rdflags;
- uint64_t dfsMask;
- uint64_t pscan;
-
- if ((cm->mode & modeSelect) == 0) {
+ REG_DOMAIN *rd;
+
+ if ((cm->mode & modesMask) == 0) {
HALDEBUG(ah, HAL_DEBUG_REGDOMAIN,
"%s: skip mode 0x%x flags 0x%x\n",
__func__, cm->mode, cm->flags);
continue;
}
- if ((cm->mode & modesAvail) == 0) {
- HALDEBUG(ah, HAL_DEBUG_REGDOMAIN,
- "%s: !avail mode 0x%x (0x%x) flags 0x%x\n",
- __func__, modesAvail, cm->mode, cm->flags);
- continue;
- }
- if (!ath_hal_getChannelEdges(ah, cm->flags, &c_lo, &c_hi)) {
- /* channel not supported by hardware, skip it */
- HALDEBUG(ah, HAL_DEBUG_REGDOMAIN,
- "%s: channels 0x%x not supported by hardware\n",
- __func__,cm->flags);
- continue;
- }
- switch (cm->mode) {
- case HAL_MODE_TURBO:
- case HAL_MODE_11A_TURBO:
- rdflags = rd5GHz->flags;
- dfsMask = rd5GHz->dfsMask;
- pscan = rd5GHz->pscan;
- if (cm->mode == HAL_MODE_TURBO)
- channelBM = rd5GHz->chan11a_turbo;
- else
- channelBM = rd5GHz->chan11a_dyn_turbo;
- freqs = &regDmn5GhzTurboFreq[0];
- break;
- case HAL_MODE_11G_TURBO:
- rdflags = rd2GHz->flags;
- dfsMask = rd2GHz->dfsMask;
- pscan = rd2GHz->pscan;
- channelBM = rd2GHz->chan11g_turbo;
- freqs = &regDmn2Ghz11gTurboFreq[0];
- break;
- case HAL_MODE_11A:
- case HAL_MODE_11A_HALF_RATE:
- case HAL_MODE_11A_QUARTER_RATE:
- case HAL_MODE_11NA_HT20:
- case HAL_MODE_11NA_HT40PLUS:
- case HAL_MODE_11NA_HT40MINUS:
- rdflags = rd5GHz->flags;
- dfsMask = rd5GHz->dfsMask;
- pscan = rd5GHz->pscan;
- if (cm->mode == HAL_MODE_11A_HALF_RATE)
- channelBM = rd5GHz->chan11a_half;
- else if (cm->mode == HAL_MODE_11A_QUARTER_RATE)
- channelBM = rd5GHz->chan11a_quarter;
- else
- channelBM = rd5GHz->chan11a;
- freqs = &regDmn5GhzFreq[0];
- break;
- case HAL_MODE_11B:
- case HAL_MODE_11G:
- case HAL_MODE_11G_HALF_RATE:
- case HAL_MODE_11G_QUARTER_RATE:
- case HAL_MODE_11NG_HT20:
- case HAL_MODE_11NG_HT40PLUS:
- case HAL_MODE_11NG_HT40MINUS:
- rdflags = rd2GHz->flags;
- dfsMask = rd2GHz->dfsMask;
- pscan = rd2GHz->pscan;
- if (cm->mode == HAL_MODE_11G_HALF_RATE)
- channelBM = rd2GHz->chan11g_half;
- else if (cm->mode == HAL_MODE_11G_QUARTER_RATE)
- channelBM = rd2GHz->chan11g_quarter;
- else if (cm->mode == HAL_MODE_11B)
- channelBM = rd2GHz->chan11b;
- else
- channelBM = rd2GHz->chan11g;
- if (cm->mode == HAL_MODE_11B)
- freqs = &regDmn2GhzFreq[0];
- else
- freqs = &regDmn2Ghz11gFreq[0];
- break;
- default:
- HALDEBUG(ah, HAL_DEBUG_REGDOMAIN,
- "%s: Unknown HAL mode 0x%x\n", __func__, cm->mode);
- continue;
- }
- if (isChanBitMaskZero(channelBM))
- continue;
- /*
- * Setup special handling for HT40 channels; e.g.
- * 5G HT40 channels require 40Mhz channel separation.
- */
- hi_adj = (cm->mode == HAL_MODE_11NA_HT40PLUS ||
- cm->mode == HAL_MODE_11NG_HT40PLUS) ? -20 : 0;
- low_adj = (cm->mode == HAL_MODE_11NA_HT40MINUS ||
- cm->mode == HAL_MODE_11NG_HT40MINUS) ? 20 : 0;
- channelSep = (cm->mode == HAL_MODE_11NA_HT40PLUS ||
- cm->mode == HAL_MODE_11NA_HT40MINUS) ? 40 : 0;
-
- for (b = 0; b < 64*BMLEN; b++) {
- if (!IS_BIT_SET(b, channelBM))
- continue;
- fband = &freqs[b];
- lastc = 0;
-
- for (c = fband->lowChannel + low_adj;
- c <= fband->highChannel + hi_adj;
- c += fband->channelSep) {
- if (!(c_lo <= c && c <= c_hi)) {
- HALDEBUG(ah, HAL_DEBUG_REGDOMAIN,
- "%s: c %u out of range [%u..%u]\n",
- __func__, c, c_lo, c_hi);
- continue;
- }
- if (next >= maxchans){
- HALDEBUG(ah, HAL_DEBUG_REGDOMAIN,
- "%s: too many channels for channel table\n",
- __func__);
- goto done;
- }
- if ((fband->usePassScan & IS_ECM_CHAN) &&
- !enableExtendedChannels) {
- HALDEBUG(ah, HAL_DEBUG_REGDOMAIN,
- "skip ecm channel\n");
- continue;
- }
-#if 0
- if ((fband->useDfs & dfsMask) &&
- (cm->flags & IEEE80211_CHAN_HT40)) {
- /* NB: DFS and HT40 don't mix */
- HALDEBUG(ah, HAL_DEBUG_REGDOMAIN,
- "skip HT40 chan, DFS required\n");
- continue;
- }
-#endif
- /*
- * Make sure that channel separation
- * meets the requirement.
- */
- if (lastc && channelSep &&
- (c-lastc) < channelSep)
- continue;
- lastc = c;
-
- OS_MEMZERO(ic, sizeof(*ic));
- ic->ic_freq = c;
- ic->ic_flags = cm->flags;
- ic->ic_maxregpower = fband->powerDfs;
- ath_hal_getpowerlimits(ah, ic);
- ic->ic_maxantgain = fband->antennaMax;
- if (fband->usePassScan & pscan)
- ic->ic_flags |= IEEE80211_CHAN_PASSIVE;
- if (fband->useDfs & dfsMask)
- ic->ic_flags |= IEEE80211_CHAN_DFS;
- if (IEEE80211_IS_CHAN_5GHZ(ic) &&
- (rdflags & DISALLOW_ADHOC_11A))
- ic->ic_flags |= IEEE80211_CHAN_NOADHOC;
- if (IEEE80211_IS_CHAN_TURBO(ic) &&
- (rdflags & DISALLOW_ADHOC_11A_TURB))
- ic->ic_flags |= IEEE80211_CHAN_NOADHOC;
- if (rdflags & NO_HOSTAP)
- ic->ic_flags |= IEEE80211_CHAN_NOHOSTAP;
- if (rdflags & LIMIT_FRAME_4MS)
- ic->ic_flags |= IEEE80211_CHAN_4MSXMIT;
- if (rdflags & NEED_NFC)
- ic->ic_flags |= CHANNEL_NFCREQUIRED;
-
- ic++, next++;
- }
+
+ if (cm->flags & IEEE80211_CHAN_5GHZ)
+ rd = rd5GHz;
+ else if (cm->flags & IEEE80211_CHAN_2GHZ)
+ rd = rd2GHz;
+ else {
+ ath_hal_printf(ah, "%s: Unkonwn HAL flags 0x%x\n",
+ __func__, cm->flags);
+ return HAL_EINVAL;
}
+
+ add_chanlist_mode(ah, chans, maxchans, nchans, cm,
+ rd, enableExtendedChannels);
+ if (*nchans >= maxchans)
+ goto done;
}
done:
- *nchans = next;
/* NB: pcountry set above by getregstate */
if (prd2GHz != AH_NULL)
*prd2GHz = rd2GHz;
if (prd5GHz != AH_NULL)
*prd5GHz = rd5GHz;
return HAL_OK;
-#undef HAL_MODE_11A_ALL
-#undef CHANNEL_HALF_BW
-#undef CHANNEL_QUARTER_BW
}
/*
diff --git a/sys/dev/ath/ath_hal/ah_regdomain.h b/sys/dev/ath/ath_hal/ah_regdomain.h
index e17e79a..9d6e59b 100644
--- a/sys/dev/ath/ath_hal/ah_regdomain.h
+++ b/sys/dev/ath/ath_hal/ah_regdomain.h
@@ -157,7 +157,8 @@ typedef struct regDomain {
} REG_DOMAIN;
struct cmode {
- u_int mode;
- u_int flags;
+ u_int mode;
+ u_int flags;
+ REG_DMN_FREQ_BAND *freqs;
};
#endif
diff --git a/sys/dev/bhnd/bcma/bcma_bhndb.c b/sys/dev/bhnd/bcma/bcma_bhndb.c
index aa612f3..a118eb1 100644
--- a/sys/dev/bhnd/bcma/bcma_bhndb.c
+++ b/sys/dev/bhnd/bcma/bcma_bhndb.c
@@ -166,20 +166,6 @@ bcma_bhndb_resume_child(device_t dev, device_t child)
return (0);
}
-static int
-bcma_bhndb_read_board_info(device_t dev, device_t child,
- struct bhnd_board_info *info)
-{
- int error;
-
- /* Initialize with NVRAM-derived values */
- if ((error = bhnd_bus_generic_read_board_info(dev, child, info)))
- return (error);
-
- /* Let the bridge fill in any additional data */
- return (BHNDB_POPULATE_BOARD_INFO(device_get_parent(dev), dev, info));
-}
-
static device_method_t bcma_bhndb_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, bcma_bhndb_probe),
@@ -189,14 +175,11 @@ static device_method_t bcma_bhndb_methods[] = {
DEVMETHOD(bus_suspend_child, bcma_bhndb_suspend_child),
DEVMETHOD(bus_resume_child, bcma_bhndb_resume_child),
- /* BHND interface */
- DEVMETHOD(bhnd_bus_read_board_info, bcma_bhndb_read_board_info),
-
DEVMETHOD_END
};
-DEFINE_CLASS_1(bhnd, bcma_bhndb_driver, bcma_bhndb_methods,
- sizeof(struct bcma_softc), bcma_driver);
+DEFINE_CLASS_2(bhnd, bcma_bhndb_driver, bcma_bhndb_methods,
+ sizeof(struct bcma_softc), bhnd_bhndb_driver, bcma_driver);
DRIVER_MODULE(bcma_bhndb, bhndb, bcma_bhndb_driver, bhnd_devclass, NULL, NULL);
diff --git a/sys/dev/bhnd/bcma/bcma_nexus.c b/sys/dev/bhnd/bcma/bcma_nexus.c
new file mode 100644
index 0000000..241e5d1
--- /dev/null
+++ b/sys/dev/bhnd/bcma/bcma_nexus.c
@@ -0,0 +1,111 @@
+/*-
+ * Copyright (c) 2016 Michael Zhilin <mizhka@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/module.h>
+#include <sys/errno.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/bhnd/bhnd.h>
+
+#include "bhnd_bus_if.h"
+#include "bcmavar.h"
+#include "bcma_eromreg.h"
+
+#define BCMA_NEXUS_EROM_RID 10
+
+static int
+bcma_nexus_probe(device_t dev)
+{
+ const struct bhnd_chipid *cid = BHND_BUS_GET_CHIPID(device_get_parent(dev), dev);
+
+ /* Check bus type */
+ if (cid->chip_type != BHND_CHIPTYPE_BCMA)
+ return (ENXIO);
+
+ /* Delegate to default probe implementation */
+ return (bcma_probe(dev));
+}
+
+static int
+bcma_nexus_attach(device_t dev)
+{
+ int erom_rid;
+ int error;
+ struct resource *erom_res;
+ const struct bhnd_chipid *cid = BHND_BUS_GET_CHIPID(device_get_parent(dev), dev);
+
+ erom_rid = BCMA_NEXUS_EROM_RID;
+ error = bus_set_resource(dev, SYS_RES_MEMORY, erom_rid, cid->enum_addr, BCMA_EROM_TABLE_SIZE);
+ if (error != 0) {
+ BHND_ERROR_DEV(dev, "failed to set EROM resource");
+ return (error);
+ }
+
+ /* Map the EROM resource and enumerate our children. */
+ BHND_DEBUG_DEV(dev, "erom enum address: %jx", cid->enum_addr);
+ erom_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &erom_rid, RF_ACTIVE);
+ if (erom_res == NULL) {
+ BHND_ERROR_DEV(dev, "failed to allocate EROM resource");
+ return (ENXIO);
+ }
+
+ BHND_DEBUG_DEV(dev, "erom scanning start address: %p", rman_get_virtual(erom_res));
+ error = bcma_add_children(dev, erom_res, BCMA_EROM_TABLE_START);
+
+ /* Clean up */
+ bus_release_resource(dev, SYS_RES_MEMORY, erom_rid, erom_res);
+ if (error)
+ return (error);
+
+ /* Call our superclass' implementation */
+ return (bcma_attach(dev));
+}
+
+static device_method_t bcma_nexus_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, bcma_nexus_probe),
+ DEVMETHOD(device_attach, bcma_nexus_attach),
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(bhnd, bcma_nexus_driver, bcma_nexus_methods, sizeof(struct bcma_softc), bcma_driver);
+EARLY_DRIVER_MODULE(bcma_nexus, bhnd_soc, bcma_nexus_driver, bhnd_devclass,
+ NULL, NULL, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
+
+MODULE_VERSION(bcma_nexus, 1);
+MODULE_DEPEND(bcma_nexus, bcma, 1, 1, 1);
+MODULE_DEPEND(bcma_nexus, bhnd_soc, 1, 1, 1);
diff --git a/sys/dev/bhnd/bhnd.c b/sys/dev/bhnd/bhnd.c
index 11e34dd..8f85900 100644
--- a/sys/dev/bhnd/bhnd.c
+++ b/sys/dev/bhnd/bhnd.c
@@ -529,36 +529,36 @@ bhnd_generic_resume_child(device_t dev, device_t child)
/*
* Delegate all indirect I/O to the parent device. When inherited by
* non-bridged bus implementations, resources will never be marked as
- * indirect, and these methods should never be called.
+ * indirect, and these methods will never be called.
*/
-#define BHND_IO_READ(_type, _name, _method) \
-static _type \
-bhnd_read_ ## _name (device_t dev, device_t child, \
- struct bhnd_resource *r, bus_size_t offset) \
-{ \
- return (BHND_BUS_READ_ ## _method( \
- device_get_parent(dev), child, r, offset)); \
+#define BHND_IO_READ(_type, _name, _method) \
+static _type \
+bhnd_read_ ## _name (device_t dev, device_t child, \
+ struct bhnd_resource *r, bus_size_t offset) \
+{ \
+ return (BHND_BUS_READ_ ## _method( \
+ device_get_parent(dev), child, r, offset)); \
}
-#define BHND_IO_WRITE(_type, _name, _method) \
-static void \
-bhnd_write_ ## _name (device_t dev, device_t child, \
- struct bhnd_resource *r, bus_size_t offset, _type value) \
-{ \
- return (BHND_BUS_WRITE_ ## _method( \
- device_get_parent(dev), child, r, offset, \
+#define BHND_IO_WRITE(_type, _name, _method) \
+static void \
+bhnd_write_ ## _name (device_t dev, device_t child, \
+ struct bhnd_resource *r, bus_size_t offset, _type value) \
+{ \
+ return (BHND_BUS_WRITE_ ## _method( \
+ device_get_parent(dev), child, r, offset, \
value)); \
}
-#define BHND_IO_MULTI(_type, _rw, _name, _method) \
+#define BHND_IO_MISC(_type, _op, _method) \
static void \
-bhnd_ ## _rw ## _multi_ ## _name (device_t dev, device_t child, \
- struct bhnd_resource *r, bus_size_t offset, _type *datap, \
+bhnd_ ## _op (device_t dev, device_t child, \
+ struct bhnd_resource *r, bus_size_t offset, _type datap, \
bus_size_t count) \
{ \
BHND_BUS_ ## _method(device_get_parent(dev), child, r, \
offset, datap, count); \
-}
+}
#define BHND_IO_METHODS(_type, _size) \
BHND_IO_READ(_type, _size, _size) \
@@ -567,13 +567,28 @@ bhnd_ ## _rw ## _multi_ ## _name (device_t dev, device_t child, \
BHND_IO_READ(_type, stream_ ## _size, STREAM_ ## _size) \
BHND_IO_WRITE(_type, stream_ ## _size, STREAM_ ## _size) \
\
- BHND_IO_MULTI(_type, read, _size, READ_MULTI_ ## _size) \
- BHND_IO_MULTI(_type, write, _size, WRITE_MULTI_ ## _size) \
+ BHND_IO_MISC(_type*, read_multi_ ## _size, \
+ READ_MULTI_ ## _size) \
+ BHND_IO_MISC(_type*, write_multi_ ## _size, \
+ WRITE_MULTI_ ## _size) \
\
- BHND_IO_MULTI(_type, read, stream_ ## _size, \
+ BHND_IO_MISC(_type*, read_multi_stream_ ## _size, \
READ_MULTI_STREAM_ ## _size) \
- BHND_IO_MULTI(_type, write, stream_ ## _size, \
+ BHND_IO_MISC(_type*, write_multi_stream_ ## _size, \
WRITE_MULTI_STREAM_ ## _size) \
+ \
+ BHND_IO_MISC(_type, set_multi_ ## _size, SET_MULTI_ ## _size) \
+ BHND_IO_MISC(_type, set_region_ ## _size, SET_REGION_ ## _size) \
+ \
+ BHND_IO_MISC(_type*, read_region_ ## _size, \
+ READ_REGION_ ## _size) \
+ BHND_IO_MISC(_type*, write_region_ ## _size, \
+ WRITE_REGION_ ## _size) \
+ \
+ BHND_IO_MISC(_type*, read_region_stream_ ## _size, \
+ READ_REGION_STREAM_ ## _size) \
+ BHND_IO_MISC(_type*, write_region_stream_ ## _size, \
+ WRITE_REGION_STREAM_ ## _size) \
BHND_IO_METHODS(uint8_t, 1);
BHND_IO_METHODS(uint16_t, 2);
@@ -627,12 +642,15 @@ static device_method_t bhnd_methods[] = {
DEVMETHOD(bhnd_bus_is_region_valid, bhnd_generic_is_region_valid),
DEVMETHOD(bhnd_bus_is_hw_disabled, bhnd_bus_generic_is_hw_disabled),
DEVMETHOD(bhnd_bus_get_nvram_var, bhnd_bus_generic_get_nvram_var),
+
+ /* BHND interface (bus I/O) */
DEVMETHOD(bhnd_bus_read_1, bhnd_read_1),
DEVMETHOD(bhnd_bus_read_2, bhnd_read_2),
DEVMETHOD(bhnd_bus_read_4, bhnd_read_4),
DEVMETHOD(bhnd_bus_write_1, bhnd_write_1),
DEVMETHOD(bhnd_bus_write_2, bhnd_write_2),
DEVMETHOD(bhnd_bus_write_4, bhnd_write_4),
+
DEVMETHOD(bhnd_bus_read_stream_1, bhnd_read_stream_1),
DEVMETHOD(bhnd_bus_read_stream_2, bhnd_read_stream_2),
DEVMETHOD(bhnd_bus_read_stream_4, bhnd_read_stream_4),
@@ -654,7 +672,29 @@ static device_method_t bhnd_methods[] = {
DEVMETHOD(bhnd_bus_write_multi_stream_2,bhnd_write_multi_stream_2),
DEVMETHOD(bhnd_bus_write_multi_stream_4,bhnd_write_multi_stream_4),
- DEVMETHOD(bhnd_bus_barrier, bhnd_barrier),
+ DEVMETHOD(bhnd_bus_set_multi_1, bhnd_set_multi_1),
+ DEVMETHOD(bhnd_bus_set_multi_2, bhnd_set_multi_2),
+ DEVMETHOD(bhnd_bus_set_multi_4, bhnd_set_multi_4),
+
+ DEVMETHOD(bhnd_bus_set_region_1, bhnd_set_region_1),
+ DEVMETHOD(bhnd_bus_set_region_2, bhnd_set_region_2),
+ DEVMETHOD(bhnd_bus_set_region_4, bhnd_set_region_4),
+
+ DEVMETHOD(bhnd_bus_read_region_1, bhnd_read_region_1),
+ DEVMETHOD(bhnd_bus_read_region_2, bhnd_read_region_2),
+ DEVMETHOD(bhnd_bus_read_region_4, bhnd_read_region_4),
+ DEVMETHOD(bhnd_bus_write_region_1, bhnd_write_region_1),
+ DEVMETHOD(bhnd_bus_write_region_2, bhnd_write_region_2),
+ DEVMETHOD(bhnd_bus_write_region_4, bhnd_write_region_4),
+
+ DEVMETHOD(bhnd_bus_read_region_stream_1,bhnd_read_region_stream_1),
+ DEVMETHOD(bhnd_bus_read_region_stream_2,bhnd_read_region_stream_2),
+ DEVMETHOD(bhnd_bus_read_region_stream_4,bhnd_read_region_stream_4),
+ DEVMETHOD(bhnd_bus_write_region_stream_1, bhnd_write_region_stream_1),
+ DEVMETHOD(bhnd_bus_write_region_stream_2, bhnd_write_region_stream_2),
+ DEVMETHOD(bhnd_bus_write_region_stream_4, bhnd_write_region_stream_4),
+
+ DEVMETHOD(bhnd_bus_barrier, bhnd_barrier),
DEVMETHOD_END
};
diff --git a/sys/dev/bhnd/bhnd.h b/sys/dev/bhnd/bhnd.h
index d01903f..6420636 100644
--- a/sys/dev/bhnd/bhnd.h
+++ b/sys/dev/bhnd/bhnd.h
@@ -561,6 +561,21 @@ bhnd_get_chipid(device_t dev) {
};
/**
+ * Return the BHND attachment type of the parent bhnd bus.
+ *
+ * @param dev A bhnd bus child device.
+ *
+ * @retval BHND_ATTACH_ADAPTER if the bus is resident on a bridged adapter,
+ * such as a WiFi chipset.
+ * @retval BHND_ATTACH_NATIVE if the bus provides hardware services (clock,
+ * CPU, etc) to a directly attached native host.
+ */
+static inline bhnd_attach_type
+bhnd_get_attach_type (device_t dev) {
+ return (BHND_BUS_GET_ATTACH_TYPE(device_get_parent(dev), dev));
+}
+
+/**
* Attempt to read the BHND board identification from the bhnd bus.
*
* This relies on NVRAM access, and will fail if a valid NVRAM device cannot
@@ -869,6 +884,12 @@ bhnd_get_region_addr(device_t dev, bhnd_port_type port_type, u_int port,
BHND_BUS_READ_MULTI_1( \
device_get_parent(rman_get_device((r)->res)), \
rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_read_region_1(r, o, d, c) \
+ ((r)->direct) ? \
+ bus_read_region_1((r)->res, (o), (d), (c)) : \
+ BHND_BUS_READ_REGION_1( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (d), (c))
#define bhnd_bus_write_1(r, o, v) \
((r)->direct) ? \
bus_write_1((r)->res, (o), (v)) : \
@@ -881,6 +902,12 @@ bhnd_get_region_addr(device_t dev, bhnd_port_type port_type, u_int port,
BHND_BUS_WRITE_MULTI_1( \
device_get_parent(rman_get_device((r)->res)), \
rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_write_region_1(r, o, d, c) \
+ ((r)->direct) ? \
+ bus_write_region_1((r)->res, (o), (d), (c)) : \
+ BHND_BUS_WRITE_REGION_1( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (d), (c))
#define bhnd_bus_read_stream_1(r, o) \
((r)->direct) ? \
bus_read_stream_1((r)->res, (o)) : \
@@ -893,6 +920,12 @@ bhnd_get_region_addr(device_t dev, bhnd_port_type port_type, u_int port,
BHND_BUS_READ_MULTI_STREAM_1( \
device_get_parent(rman_get_device((r)->res)), \
rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_read_region_stream_1(r, o, d, c) \
+ ((r)->direct) ? \
+ bus_read_region_stream_1((r)->res, (o), (d), (c)) : \
+ BHND_BUS_READ_REGION_STREAM_1( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (d), (c))
#define bhnd_bus_write_stream_1(r, o, v) \
((r)->direct) ? \
bus_write_stream_1((r)->res, (o), (v)) : \
@@ -905,6 +938,24 @@ bhnd_get_region_addr(device_t dev, bhnd_port_type port_type, u_int port,
BHND_BUS_WRITE_MULTI_STREAM_1( \
device_get_parent(rman_get_device((r)->res)), \
rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_write_region_stream_1(r, o, d, c) \
+ ((r)->direct) ? \
+ bus_write_region_stream_1((r)->res, (o), (d), (c)) : \
+ BHND_BUS_WRITE_REGION_STREAM_1( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_set_multi_1(r, o, v, c) \
+ ((r)->direct) ? \
+ bus_set_multi_1((r)->res, (o), (v), (c)) : \
+ BHND_BUS_SET_MULTI_1( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (v), (c))
+#define bhnd_bus_set_region_1(r, o, v, c) \
+ ((r)->direct) ? \
+ bus_set_region_1((r)->res, (o), (v), (c)) : \
+ BHND_BUS_SET_REGION_1( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (v), (c))
#define bhnd_bus_read_2(r, o) \
((r)->direct) ? \
bus_read_2((r)->res, (o)) : \
@@ -917,6 +968,12 @@ bhnd_get_region_addr(device_t dev, bhnd_port_type port_type, u_int port,
BHND_BUS_READ_MULTI_2( \
device_get_parent(rman_get_device((r)->res)), \
rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_read_region_2(r, o, d, c) \
+ ((r)->direct) ? \
+ bus_read_region_2((r)->res, (o), (d), (c)) : \
+ BHND_BUS_READ_REGION_2( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (d), (c))
#define bhnd_bus_write_2(r, o, v) \
((r)->direct) ? \
bus_write_2((r)->res, (o), (v)) : \
@@ -929,6 +986,12 @@ bhnd_get_region_addr(device_t dev, bhnd_port_type port_type, u_int port,
BHND_BUS_WRITE_MULTI_2( \
device_get_parent(rman_get_device((r)->res)), \
rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_write_region_2(r, o, d, c) \
+ ((r)->direct) ? \
+ bus_write_region_2((r)->res, (o), (d), (c)) : \
+ BHND_BUS_WRITE_REGION_2( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (d), (c))
#define bhnd_bus_read_stream_2(r, o) \
((r)->direct) ? \
bus_read_stream_2((r)->res, (o)) : \
@@ -941,6 +1004,12 @@ bhnd_get_region_addr(device_t dev, bhnd_port_type port_type, u_int port,
BHND_BUS_READ_MULTI_STREAM_2( \
device_get_parent(rman_get_device((r)->res)), \
rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_read_region_stream_2(r, o, d, c) \
+ ((r)->direct) ? \
+ bus_read_region_stream_2((r)->res, (o), (d), (c)) : \
+ BHND_BUS_READ_REGION_STREAM_2( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (d), (c))
#define bhnd_bus_write_stream_2(r, o, v) \
((r)->direct) ? \
bus_write_stream_2((r)->res, (o), (v)) : \
@@ -953,6 +1022,24 @@ bhnd_get_region_addr(device_t dev, bhnd_port_type port_type, u_int port,
BHND_BUS_WRITE_MULTI_STREAM_2( \
device_get_parent(rman_get_device((r)->res)), \
rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_write_region_stream_2(r, o, d, c) \
+ ((r)->direct) ? \
+ bus_write_region_stream_2((r)->res, (o), (d), (c)) : \
+ BHND_BUS_WRITE_REGION_STREAM_2( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_set_multi_2(r, o, v, c) \
+ ((r)->direct) ? \
+ bus_set_multi_2((r)->res, (o), (v), (c)) : \
+ BHND_BUS_SET_MULTI_2( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (v), (c))
+#define bhnd_bus_set_region_2(r, o, v, c) \
+ ((r)->direct) ? \
+ bus_set_region_2((r)->res, (o), (v), (c)) : \
+ BHND_BUS_SET_REGION_2( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (v), (c))
#define bhnd_bus_read_4(r, o) \
((r)->direct) ? \
bus_read_4((r)->res, (o)) : \
@@ -965,6 +1052,12 @@ bhnd_get_region_addr(device_t dev, bhnd_port_type port_type, u_int port,
BHND_BUS_READ_MULTI_4( \
device_get_parent(rman_get_device((r)->res)), \
rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_read_region_4(r, o, d, c) \
+ ((r)->direct) ? \
+ bus_read_region_4((r)->res, (o), (d), (c)) : \
+ BHND_BUS_READ_REGION_4( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (d), (c))
#define bhnd_bus_write_4(r, o, v) \
((r)->direct) ? \
bus_write_4((r)->res, (o), (v)) : \
@@ -977,6 +1070,12 @@ bhnd_get_region_addr(device_t dev, bhnd_port_type port_type, u_int port,
BHND_BUS_WRITE_MULTI_4( \
device_get_parent(rman_get_device((r)->res)), \
rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_write_region_4(r, o, d, c) \
+ ((r)->direct) ? \
+ bus_write_region_4((r)->res, (o), (d), (c)) : \
+ BHND_BUS_WRITE_REGION_4( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (d), (c))
#define bhnd_bus_read_stream_4(r, o) \
((r)->direct) ? \
bus_read_stream_4((r)->res, (o)) : \
@@ -989,6 +1088,12 @@ bhnd_get_region_addr(device_t dev, bhnd_port_type port_type, u_int port,
BHND_BUS_READ_MULTI_STREAM_4( \
device_get_parent(rman_get_device((r)->res)), \
rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_read_region_stream_4(r, o, d, c) \
+ ((r)->direct) ? \
+ bus_read_region_stream_4((r)->res, (o), (d), (c)) : \
+ BHND_BUS_READ_REGION_STREAM_4( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (d), (c))
#define bhnd_bus_write_stream_4(r, o, v) \
((r)->direct) ? \
bus_write_stream_4((r)->res, (o), (v)) : \
@@ -1001,5 +1106,23 @@ bhnd_get_region_addr(device_t dev, bhnd_port_type port_type, u_int port,
BHND_BUS_WRITE_MULTI_STREAM_4( \
device_get_parent(rman_get_device((r)->res)), \
rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_write_region_stream_4(r, o, d, c) \
+ ((r)->direct) ? \
+ bus_write_region_stream_4((r)->res, (o), (d), (c)) : \
+ BHND_BUS_WRITE_REGION_STREAM_4( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (d), (c))
+#define bhnd_bus_set_multi_4(r, o, v, c) \
+ ((r)->direct) ? \
+ bus_set_multi_4((r)->res, (o), (v), (c)) : \
+ BHND_BUS_SET_MULTI_4( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (v), (c))
+#define bhnd_bus_set_region_4(r, o, v, c) \
+ ((r)->direct) ? \
+ bus_set_region_4((r)->res, (o), (v), (c)) : \
+ BHND_BUS_SET_REGION_4( \
+ device_get_parent(rman_get_device((r)->res)), \
+ rman_get_device((r)->res), (r), (o), (v), (c))
#endif /* _BHND_BHND_H_ */
diff --git a/sys/dev/bhnd/bhnd_bus_if.m b/sys/dev/bhnd/bhnd_bus_if.m
index 35402a4..e7a117c 100644
--- a/sys/dev/bhnd/bhnd_bus_if.m
+++ b/sys/dev/bhnd/bhnd_bus_if.m
@@ -55,6 +55,12 @@ CODE {
panic("bhnd_bus_get_chipid unimplemented");
}
+ static bhnd_attach_type
+ bhnd_bus_null_get_attach_type(device_t dev, device_t child)
+ {
+ panic("bhnd_bus_get_attach_type unimplemented");
+ }
+
static int
bhnd_bus_null_read_board_info(device_t dev, device_t child,
struct bhnd_board_info *info)
@@ -184,6 +190,22 @@ METHOD const struct bhnd_chipid * get_chipid {
} DEFAULT bhnd_bus_null_get_chipid;
/**
+ * Return the BHND attachment type of the parent bus.
+ *
+ * @param dev The device whose child is being examined.
+ * @param child The child device.
+ *
+ * @retval BHND_ATTACH_ADAPTER if the bus is resident on a bridged adapter,
+ * such as a WiFi chipset.
+ * @retval BHND_ATTACH_NATIVE if the bus provides hardware services (clock,
+ * CPU, etc) to a directly attached native host.
+ */
+METHOD bhnd_attach_type get_attach_type {
+ device_t dev;
+ device_t child;
+} DEFAULT bhnd_bus_null_get_attach_type;
+
+/**
* Attempt to read the BHND board identification from the parent bus.
*
* This relies on NVRAM access, and will fail if a valid NVRAM device cannot
@@ -670,6 +692,192 @@ METHOD void write_multi_stream_4 {
bus_size_t count;
}
+/** An implementation of bus_set_multi_1() compatible with bhnd_resource */
+METHOD void set_multi_1 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint8_t value;
+ bus_size_t count;
+}
+
+/** An implementation of bus_set_multi_2() compatible with bhnd_resource */
+METHOD void set_multi_2 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint16_t value;
+ bus_size_t count;
+}
+
+/** An implementation of bus_set_multi_4() compatible with bhnd_resource */
+METHOD void set_multi_4 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint32_t value;
+ bus_size_t count;
+}
+
+/** An implementation of bus_set_region_1() compatible with bhnd_resource */
+METHOD void set_region_1 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint8_t value;
+ bus_size_t count;
+}
+
+/** An implementation of bus_set_region_2() compatible with bhnd_resource */
+METHOD void set_region_2 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint16_t value;
+ bus_size_t count;
+}
+
+/** An implementation of bus_set_region_4() compatible with bhnd_resource */
+METHOD void set_region_4 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint32_t value;
+ bus_size_t count;
+}
+
+/** An implementation of bus_read_region_1() compatible with bhnd_resource */
+METHOD void read_region_1 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint8_t *datap;
+ bus_size_t count;
+}
+
+/** An implementation of bus_read_region_2() compatible with bhnd_resource */
+METHOD void read_region_2 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint16_t *datap;
+ bus_size_t count;
+}
+
+/** An implementation of bus_read_region_4() compatible with bhnd_resource */
+METHOD void read_region_4 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint32_t *datap;
+ bus_size_t count;
+}
+
+/** An implementation of bus_read_region_stream_1() compatible with
+ * bhnd_resource */
+METHOD void read_region_stream_1 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint8_t *datap;
+ bus_size_t count;
+}
+
+/** An implementation of bus_read_region_stream_2() compatible with
+ * bhnd_resource */
+METHOD void read_region_stream_2 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint16_t *datap;
+ bus_size_t count;
+}
+
+/** An implementation of bus_read_region_stream_4() compatible with
+ * bhnd_resource */
+METHOD void read_region_stream_4 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint32_t *datap;
+ bus_size_t count;
+}
+
+/** An implementation of bus_write_region_1() compatible with bhnd_resource */
+METHOD void write_region_1 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint8_t *datap;
+ bus_size_t count;
+}
+
+/** An implementation of bus_write_region_2() compatible with bhnd_resource */
+METHOD void write_region_2 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint16_t *datap;
+ bus_size_t count;
+}
+
+/** An implementation of bus_write_region_4() compatible with bhnd_resource */
+METHOD void write_region_4 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint32_t *datap;
+ bus_size_t count;
+}
+
+/** An implementation of bus_write_region_stream_1() compatible with
+ * bhnd_resource */
+METHOD void write_region_stream_1 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint8_t *datap;
+ bus_size_t count;
+}
+
+/** An implementation of bus_write_region_stream_2() compatible with
+ * bhnd_resource */
+METHOD void write_region_stream_2 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint16_t *datap;
+ bus_size_t count;
+}
+
+/** An implementation of bus_write_region_stream_4() compatible with
+ * bhnd_resource */
+METHOD void write_region_stream_4 {
+ device_t dev;
+ device_t child;
+ struct bhnd_resource *r;
+ bus_size_t offset;
+ uint32_t *datap;
+ bus_size_t count;
+}
+
/** An implementation of bus_barrier() compatible with bhnd_resource */
METHOD void barrier {
device_t dev;
diff --git a/sys/dev/bhnd/bhnd_types.h b/sys/dev/bhnd/bhnd_types.h
index d31c00b..6440f0e 100644
--- a/sys/dev/bhnd/bhnd_types.h
+++ b/sys/dev/bhnd/bhnd_types.h
@@ -73,6 +73,15 @@ typedef enum {
BHND_PORT_AGENT = 2, /**< interconnect agent/wrapper */
} bhnd_port_type;
+/**
+ * bhnd(4) attachment types.
+ */
+typedef enum {
+ BHND_ATTACH_ADAPTER = 0, /**< A bridged card, such as a PCI WiFi chipset */
+ BHND_ATTACH_NATIVE = 1 /**< A bus resident on the native host, such as
+ * the primary or secondary bus of an embedded
+ * SoC */
+} bhnd_attach_type;
/** Evaluates to true if @p cls is a device class that can be configured
* as a host bridge device. */
diff --git a/sys/dev/bhnd/bhndb/bhnd_bhndb.c b/sys/dev/bhnd/bhndb/bhnd_bhndb.c
new file mode 100644
index 0000000..c561b4d
--- /dev/null
+++ b/sys/dev/bhnd/bhndb/bhnd_bhndb.c
@@ -0,0 +1,77 @@
+/*-
+ * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/module.h>
+
+#include <dev/bhnd/bhnd_ids.h>
+#include <dev/bhnd/bhnd.h>
+
+#include "bhndbvar.h"
+
+/*
+ * bhnd(4) driver mix-in providing a shared common methods for
+ * bhnd devices attached via a bhndb bridge.
+ */
+
+static int
+bhnd_bhndb_read_board_info(device_t dev, device_t child,
+ struct bhnd_board_info *info)
+{
+ int error;
+
+ /* Initialize with NVRAM-derived values */
+ if ((error = bhnd_bus_generic_read_board_info(dev, child, info)))
+ return (error);
+
+ /* Let the bridge fill in any additional data */
+ return (BHNDB_POPULATE_BOARD_INFO(device_get_parent(dev), dev, info));
+}
+
+static bhnd_attach_type
+bhnd_bhndb_get_attach_type(device_t dev, device_t child)
+{
+ /* It's safe to assume that a bridged device is always an adapter */
+ return (BHND_ATTACH_ADAPTER);
+}
+
+static device_method_t bhnd_bhndb_methods[] = {
+ /* BHND interface */
+ DEVMETHOD(bhnd_bus_get_attach_type, bhnd_bhndb_get_attach_type),
+ DEVMETHOD(bhnd_bus_read_board_info, bhnd_bhndb_read_board_info),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(bhnd, bhnd_bhndb_driver, bhnd_bhndb_methods, 0);
diff --git a/sys/dev/bhnd/bhndb/bhndb.c b/sys/dev/bhnd/bhndb/bhndb.c
index 218827f..1591988 100644
--- a/sys/dev/bhnd/bhndb/bhndb.c
+++ b/sys/dev/bhnd/bhndb/bhndb.c
@@ -1144,11 +1144,16 @@ bhndb_adjust_resource(device_t dev, device_t child, int type,
{
struct bhndb_softc *sc;
struct rman *rm;
+ rman_res_t mstart, mend;
int error;
sc = device_get_softc(dev);
error = 0;
+ /* Verify basic constraints */
+ if (end <= start)
+ return (EINVAL);
+
/* Fetch resource manager */
rm = bhndb_get_rman(sc, child, type);
if (rm == NULL)
@@ -1157,16 +1162,29 @@ bhndb_adjust_resource(device_t dev, device_t child, int type,
if (!rman_is_region_manager(r, rm))
return (ENXIO);
- /* If active, adjustment is limited by the assigned window. */
BHNDB_LOCK(sc);
- // TODO: Currently unsupported
- error = ENODEV;
+ /* If not active, allow any range permitted by the resource manager */
+ if (!(rman_get_flags(r) & RF_ACTIVE))
+ goto done;
- BHNDB_UNLOCK(sc);
+ /* Otherwise, the range is limited to the existing register window
+ * mapping */
+ error = bhndb_find_resource_limits(sc->bus_res, r, &mstart, &mend);
+ if (error)
+ goto done;
+
+ if (start < mstart || end > mend) {
+ error = EINVAL;
+ goto done;
+ }
+
+ /* Fall through */
+done:
if (!error)
error = rman_adjust_resource(r, start, end);
+ BHNDB_UNLOCK(sc);
return (error);
}
@@ -1536,7 +1554,8 @@ bhndb_activate_bhnd_resource(device_t dev, device_t child,
if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) {
bhndb_priority_t r_prio;
- region = bhndb_find_resource_region(sc->bus_res, r_start, r_size);
+ region = bhndb_find_resource_region(sc->bus_res, r_start,
+ r_size);
if (region != NULL)
r_prio = region->priority;
else
@@ -1766,15 +1785,15 @@ bhndb_bus_write_ ## _name (device_t dev, device_t child, \
BHNDB_IO_COMMON_TEARDOWN(); \
}
-/* Defines a bhndb_bus_(read|write)_multi_* method implementation */
-#define BHNDB_IO_MULTI(_type, _rw, _name) \
+/* Defines a bhndb_bus_(read|write|set)_(multi|region)_* method */
+#define BHNDB_IO_MISC(_type, _ptr, _op, _size) \
static void \
-bhndb_bus_ ## _rw ## _multi_ ## _name (device_t dev, \
+bhndb_bus_ ## _op ## _ ## _size (device_t dev, \
device_t child, struct bhnd_resource *r, bus_size_t offset, \
- _type *datap, bus_size_t count) \
+ _type _ptr datap, bus_size_t count) \
{ \
BHNDB_IO_COMMON_SETUP(sizeof(_type) * count); \
- bus_ ## _rw ## _multi_ ## _name (io_res, io_offset, \
+ bus_ ## _op ## _ ## _size (io_res, io_offset, \
datap, count); \
BHNDB_IO_COMMON_TEARDOWN(); \
}
@@ -1787,11 +1806,19 @@ bhndb_bus_ ## _rw ## _multi_ ## _name (device_t dev, \
BHNDB_IO_READ(_type, stream_ ## _size) \
BHNDB_IO_WRITE(_type, stream_ ## _size) \
\
- BHNDB_IO_MULTI(_type, read, _size) \
- BHNDB_IO_MULTI(_type, write, _size) \
+ BHNDB_IO_MISC(_type, *, read_multi, _size) \
+ BHNDB_IO_MISC(_type, *, write_multi, _size) \
+ \
+ BHNDB_IO_MISC(_type, *, read_multi_stream, _size) \
+ BHNDB_IO_MISC(_type, *, write_multi_stream, _size) \
+ \
+ BHNDB_IO_MISC(_type, , set_multi, _size) \
+ BHNDB_IO_MISC(_type, , set_region, _size) \
+ BHNDB_IO_MISC(_type, *, read_region, _size) \
+ BHNDB_IO_MISC(_type, *, write_region, _size) \
\
- BHNDB_IO_MULTI(_type, read, stream_ ## _size) \
- BHNDB_IO_MULTI(_type, write, stream_ ## _size)
+ BHNDB_IO_MISC(_type, *, read_region_stream, _size) \
+ BHNDB_IO_MISC(_type, *, write_region_stream, _size)
BHNDB_IO_METHODS(uint8_t, 1);
BHNDB_IO_METHODS(uint16_t, 2);
@@ -1804,24 +1831,9 @@ static void
bhndb_bus_barrier(device_t dev, device_t child, struct bhnd_resource *r,
bus_size_t offset, bus_size_t length, int flags)
{
- bus_size_t remain;
-
BHNDB_IO_COMMON_SETUP(length);
- /* TODO: It's unclear whether we need a barrier implementation,
- * and if we do, what it needs to actually do. This may need
- * revisiting once we have a better idea of requirements after
- * porting the core drivers. */
- panic("implementation incorrect");
-
- /* Use 4-byte reads where possible */
- remain = length % sizeof(uint32_t);
- for (bus_size_t i = 0; i < (length - remain); i += 4)
- bus_read_4(io_res, io_offset + offset + i);
-
- /* Use 1 byte reads for the remainder */
- for (bus_size_t i = 0; i < remain; i++)
- bus_read_1(io_res, io_offset + offset + length + i);
+ bus_barrier(io_res, io_offset + offset, length, flags);
BHNDB_IO_COMMON_TEARDOWN();
}
@@ -1970,6 +1982,27 @@ static device_method_t bhndb_methods[] = {
DEVMETHOD(bhnd_bus_write_multi_stream_2,bhndb_bus_write_multi_stream_2),
DEVMETHOD(bhnd_bus_write_multi_stream_4,bhndb_bus_write_multi_stream_4),
+ DEVMETHOD(bhnd_bus_set_multi_1, bhndb_bus_set_multi_1),
+ DEVMETHOD(bhnd_bus_set_multi_2, bhndb_bus_set_multi_2),
+ DEVMETHOD(bhnd_bus_set_multi_4, bhndb_bus_set_multi_4),
+ DEVMETHOD(bhnd_bus_set_region_1, bhndb_bus_set_region_1),
+ DEVMETHOD(bhnd_bus_set_region_2, bhndb_bus_set_region_2),
+ DEVMETHOD(bhnd_bus_set_region_4, bhndb_bus_set_region_4),
+
+ DEVMETHOD(bhnd_bus_read_region_1, bhndb_bus_read_region_1),
+ DEVMETHOD(bhnd_bus_read_region_2, bhndb_bus_read_region_2),
+ DEVMETHOD(bhnd_bus_read_region_4, bhndb_bus_read_region_4),
+ DEVMETHOD(bhnd_bus_write_region_1, bhndb_bus_write_region_1),
+ DEVMETHOD(bhnd_bus_write_region_2, bhndb_bus_write_region_2),
+ DEVMETHOD(bhnd_bus_write_region_4, bhndb_bus_write_region_4),
+
+ DEVMETHOD(bhnd_bus_read_region_stream_1,bhndb_bus_read_region_stream_1),
+ DEVMETHOD(bhnd_bus_read_region_stream_2,bhndb_bus_read_region_stream_2),
+ DEVMETHOD(bhnd_bus_read_region_stream_4,bhndb_bus_read_region_stream_4),
+ DEVMETHOD(bhnd_bus_write_region_stream_1,bhndb_bus_write_region_stream_1),
+ DEVMETHOD(bhnd_bus_write_region_stream_2,bhndb_bus_write_region_stream_2),
+ DEVMETHOD(bhnd_bus_write_region_stream_4,bhndb_bus_write_region_stream_4),
+
DEVMETHOD(bhnd_bus_barrier, bhndb_bus_barrier),
DEVMETHOD_END
diff --git a/sys/dev/bhnd/bhndb/bhndb.h b/sys/dev/bhnd/bhndb/bhndb.h
index 366f119..5242ee1 100644
--- a/sys/dev/bhnd/bhndb/bhndb.h
+++ b/sys/dev/bhnd/bhndb/bhndb.h
@@ -44,6 +44,7 @@
#include "bhndb_bus_if.h"
extern devclass_t bhndb_devclass;
+DECLARE_CLASS(bhnd_bhndb_driver);
int bhndb_attach_bridge(device_t parent, device_t *bhndb, int unit);
diff --git a/sys/dev/bhnd/bhndb/bhndb_private.h b/sys/dev/bhnd/bhndb/bhndb_private.h
index 4171b53..7d5b7f6 100644
--- a/sys/dev/bhnd/bhndb/bhndb_private.h
+++ b/sys/dev/bhnd/bhndb/bhndb_private.h
@@ -71,6 +71,11 @@ int bhndb_add_resource_region(
bhndb_priority_t priority,
const struct bhndb_regwin *static_regwin);
+int bhndb_find_resource_limits(
+ struct bhndb_resources *br,
+ struct resource *r, rman_res_t *start,
+ rman_res_t *end);
+
struct bhndb_region *bhndb_find_resource_region(
struct bhndb_resources *br,
bhnd_addr_t addr, bhnd_size_t size);
@@ -133,7 +138,7 @@ const struct bhndb_hw_priority *bhndb_hw_priority_find_device(
* Dynamic register window allocation reference.
*/
struct bhndb_dw_rentry {
- struct resource *dw_res; /**< child resource */
+ struct resource *dw_res; /**< child resource */
LIST_ENTRY(bhndb_dw_rentry) dw_link;
};
diff --git a/sys/dev/bhnd/bhndb/bhndb_subr.c b/sys/dev/bhnd/bhndb/bhndb_subr.c
index c9dd99c..32ec8c8 100644
--- a/sys/dev/bhnd/bhndb/bhndb_subr.c
+++ b/sys/dev/bhnd/bhndb/bhndb_subr.c
@@ -563,8 +563,52 @@ bhndb_add_resource_region(struct bhndb_resources *br, bhnd_addr_t addr,
return (0);
}
+
+/**
+ * Find the maximum start and end limits of the register window mapping
+ * resource @p r.
+ *
+ * If the memory range is not mapped by an existing dynamic or static register
+ * window, ENOENT will be returned.
+ *
+ * @param br The resource state to search.
+ * @param r The resource to search for in @p br.
+ * @param addr The requested starting address.
+ * @param size The requested size.
+ *
+ * @retval bhndb_region A region that fully contains the requested range.
+ * @retval NULL If no mapping region can be found.
+ */
+int
+bhndb_find_resource_limits(struct bhndb_resources *br, struct resource *r,
+ rman_res_t *start, rman_res_t *end)
+{
+ struct bhndb_dw_alloc *dynamic;
+ struct bhndb_region *sregion;
+
+ /* Check for an enclosing dynamic register window */
+ if ((dynamic = bhndb_dw_find_resource(br, r))) {
+ *start = dynamic->target;
+ *end = dynamic->target + dynamic->win->win_size - 1;
+ return (0);
+ }
+
+ /* Check for a static region */
+ sregion = bhndb_find_resource_region(br, rman_get_start(r),
+ rman_get_size(r));
+ if (sregion != NULL && sregion->static_regwin != NULL) {
+ *start = sregion->addr;
+ *end = sregion->addr + sregion->size - 1;
+
+ return (0);
+ }
+
+ /* Not found */
+ return (ENOENT);
+}
+
/**
- * Find a bus region that maps @p size bytes at @p addr.
+ * Find the bus region that maps @p size bytes at @p addr.
*
* @param br The resource state to search.
* @param addr The requested starting address.
diff --git a/sys/dev/bhnd/nvram/bhnd_sprom.c b/sys/dev/bhnd/nvram/bhnd_sprom.c
index 54c1faa..6cddb60 100644
--- a/sys/dev/bhnd/nvram/bhnd_sprom.c
+++ b/sys/dev/bhnd/nvram/bhnd_sprom.c
@@ -508,7 +508,6 @@ sprom_direct_read(struct bhnd_sprom *sc, size_t offset, void *buf,
size_t nbytes, uint8_t *crc)
{
bus_size_t res_offset;
- size_t nread;
uint16_t *p;
KASSERT(nbytes % sizeof(uint16_t) == 0, ("unaligned sprom size"));
@@ -520,15 +519,12 @@ sprom_direct_read(struct bhnd_sprom *sc, size_t offset, void *buf,
return (EINVAL);
}
+ /* Perform read and update CRC */
p = (uint16_t *)buf;
res_offset = sc->sp_res_off + offset;
- /* Perform read */
- for (nread = 0; nread < nbytes; nread += 2) {
- *p = bhnd_bus_read_stream_2(sc->sp_res, res_offset+nread);
- *crc = bhnd_nvram_crc8(p, sizeof(*p), *crc);
- p++;
- };
+ bhnd_bus_read_region_stream_2(sc->sp_res, res_offset, p, nbytes);
+ *crc = bhnd_nvram_crc8(p, nbytes, *crc);
return (0);
}
@@ -569,4 +565,4 @@ sprom_var_defn(struct bhnd_sprom *sc, const char *name,
/* Not supported by this SPROM revision */
return (ENOENT);
-} \ No newline at end of file
+}
diff --git a/sys/dev/bhnd/siba/siba_bhndb.c b/sys/dev/bhnd/siba/siba_bhndb.c
index 4055c12..0a454a9 100644
--- a/sys/dev/bhnd/siba/siba_bhndb.c
+++ b/sys/dev/bhnd/siba/siba_bhndb.c
@@ -205,20 +205,6 @@ siba_bhndb_resume_child(device_t dev, device_t child)
return (0);
}
-static int
-siba_bhndb_read_board_info(device_t dev, device_t child,
- struct bhnd_board_info *info)
-{
- int error;
-
- /* Initialize with NVRAM-derived values */
- if ((error = bhnd_bus_generic_read_board_info(dev, child, info)))
- return (error);
-
- /* Let the bridge fill in any additional data */
- return (BHNDB_POPULATE_BOARD_INFO(device_get_parent(dev), dev, info));
-}
-
/* Work-around implementation for SIBA_QUIRK_PCIE_D11_SB_TIMEOUT */
static int
siba_bhndb_wars_pcie_clear_d11_timeout(struct siba_softc *sc)
@@ -285,14 +271,11 @@ static device_method_t siba_bhndb_methods[] = {
DEVMETHOD(bus_suspend_child, siba_bhndb_suspend_child),
DEVMETHOD(bus_resume_child, siba_bhndb_resume_child),
- /* BHND interface */
- DEVMETHOD(bhnd_bus_read_board_info, siba_bhndb_read_board_info),
-
DEVMETHOD_END
};
-DEFINE_CLASS_1(bhnd, siba_bhndb_driver, siba_bhndb_methods,
- sizeof(struct siba_softc), siba_driver);
+DEFINE_CLASS_2(bhnd, siba_bhndb_driver, siba_bhndb_methods,
+ sizeof(struct siba_softc), bhnd_bhndb_driver, siba_driver);
DRIVER_MODULE(siba_bhndb, bhndb, siba_bhndb_driver, bhnd_devclass, NULL, NULL);
diff --git a/sys/dev/bhnd/soc/bhnd_soc.c b/sys/dev/bhnd/soc/bhnd_soc.c
new file mode 100644
index 0000000..48a1893
--- /dev/null
+++ b/sys/dev/bhnd/soc/bhnd_soc.c
@@ -0,0 +1,266 @@
+/*-
+ * Copyright (c) 2016 Michael Zhilin <mizhka@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/errno.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+
+#include <machine/resource.h>
+
+#include <dev/bhnd/bhnd_debug.h>
+#include <dev/bhnd/bhndvar.h>
+#include <dev/bhnd/bhndreg.h>
+#include <dev/bhnd/bhndb/bhndb.h>
+#include <dev/bhnd/soc/bhnd_soc.h>
+
+#include "bhndb_if.h"
+
+/*
+ * **************************** VARIABLES *************************************
+ */
+
+struct resource_spec bhnd_soc_default_rspec = {SYS_RES_MEMORY, 0, RF_ACTIVE};
+
+/*
+ * **************************** PROTOTYPES ************************************
+ */
+
+static int bhnd_soc_attach_bus(device_t dev, struct bhnd_soc_softc* sc);
+static int bhnd_soc_probe(device_t dev);
+static int bhnd_soc_attach(device_t dev);
+int bhnd_soc_attach_by_class(device_t parent, device_t *child,
+ int unit, devclass_t child_devclass);
+
+/*
+ * **************************** IMPLEMENTATION ********************************
+ */
+
+int
+bhnd_soc_attach_by_class(device_t parent, device_t *child, int unit,
+ devclass_t child_devclass)
+{
+ int error;
+ struct bhnd_soc_devinfo* devinfo;
+
+ *child = device_add_child(parent, devclass_get_name(child_devclass),
+ unit);
+ if (*child == NULL)
+ return (ENXIO);
+
+ devinfo = malloc(sizeof(struct bhnd_soc_devinfo*), M_BHND, M_NOWAIT);
+ resource_list_init(&devinfo->resources);
+
+ for (int i = 0; i < BHND_SOC_MAXNUM_CORES; i++)
+ resource_list_add(&devinfo->resources, SYS_RES_MEMORY, i,
+ BHND_SOC_RAM_OFFSET, BHND_SOC_RAM_SIZE, 1);
+
+ device_set_ivars(*child, devinfo);
+
+ error = device_probe_and_attach(*child);
+ if (error && device_delete_child(parent, *child))
+ BHND_ERROR_DEV(parent, "failed to detach bhndb child");
+
+ return (error);
+}
+
+static int
+bhnd_soc_attach_bus(device_t dev, struct bhnd_soc_softc* sc)
+{
+ int error;
+
+ error = bhnd_read_chipid(dev, &bhnd_soc_default_rspec,
+ BHND_DEFAULT_CHIPC_ADDR, &sc->chipid);
+
+ if (error) {
+ return (error);
+ }
+
+ return (bhnd_soc_attach_by_class(dev, &(sc->bus), -1, bhnd_devclass));
+}
+
+static int
+bhnd_soc_probe(device_t dev)
+{
+ return (BUS_PROBE_GENERIC);
+}
+
+static int
+bhnd_soc_attach(device_t dev)
+{
+ struct bhnd_soc_softc* sc;
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ return (bhnd_soc_attach_bus(dev,sc));
+}
+
+static const struct bhnd_chipid *
+bhnd_soc_get_chipid (device_t dev, device_t child)
+{
+ struct bhnd_soc_softc* sc;
+ sc = device_get_softc(dev);
+ return (&sc->chipid);
+}
+
+static struct resource_list *
+bhnd_soc_get_rl(device_t dev, device_t child)
+{
+ struct bhnd_soc_devinfo *dinfo;
+ dinfo = device_get_ivars(child);
+ return (&dinfo->resources);
+}
+
+static struct bhnd_resource *
+bhnd_soc_alloc_resource(device_t dev, device_t child, int type, int *rid,
+ rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
+{
+ struct bhnd_soc_softc *sc;
+ struct bhnd_resource *br;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ /* Allocate resource wrapper */
+ br = malloc(sizeof(struct bhnd_resource), M_BHND, M_NOWAIT|M_ZERO);
+ if (br == NULL)
+ return (NULL);
+
+ BHND_TRACE_DEV(child,"trying to allocate resource %d: %jx-%jx (%ju)",
+ *rid, start, end, count);
+
+ /* Configure */
+ br->direct = true;
+ br->res = bus_alloc_resource(child, type, rid, start, end, count,
+ flags & ~RF_ACTIVE);
+ if (br->res == NULL) {
+ BHND_ERROR_DEV(child, "can't allocate resource %d: %jx-%jx (%ju)",
+ *rid, start, end, count);
+ goto failed;
+ }
+
+ if (flags & RF_ACTIVE) {
+ BHND_TRACE_DEV(child, "trying to activate resource: %d", *rid);
+ error = bhnd_activate_resource(child, type, *rid, br);
+ if (error) {
+ BHND_ERROR_DEV(child, "can't activate BHND resource %d:"
+ "%jx-%jx (%ju) with error: %d",
+ *rid, start, end, count, error);
+ goto failed;
+ }
+ }
+
+ return (br);
+
+failed:
+ if (br->res != NULL)
+ bus_release_resource(child, type, *rid, br->res);
+
+ free(br, M_BHND);
+ return (NULL);
+}
+
+static int
+bhnd_soc_activate_resource(device_t dev, device_t child, int type, int rid,
+ struct bhnd_resource *r)
+{
+ int error;
+
+ /*
+ * Fallback to direct
+ */
+ error = bus_activate_resource(child, type, rid, r->res);
+ if (error) {
+ BHND_ERROR_DEV(child, "can't activate resource %d, error: %d",
+ rman_get_rid(r->res), error);
+ return (error);
+ }
+ r->direct = true;
+ return (0);
+}
+
+static bool
+bhnd_soc_is_hw_disabled(device_t dev, device_t child)
+{
+ return false;
+}
+
+static int
+bhnd_soc_get_attach_type(device_t dev, device_t child)
+{
+ return (BHND_ATTACH_NATIVE);
+}
+
+/*
+ * **************************** DRIVER METADATA ****************************
+ */
+
+static device_method_t bhnd_soc_methods[] = {
+ //device interface
+ DEVMETHOD(device_probe, bhnd_soc_probe),
+ DEVMETHOD(device_attach, bhnd_soc_attach),
+ //resources
+ DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
+ DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource),
+ DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
+ DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
+ DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
+ //intr
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+ DEVMETHOD(bus_config_intr, bus_generic_config_intr),
+ DEVMETHOD(bus_bind_intr, bus_generic_bind_intr),
+ DEVMETHOD(bus_describe_intr, bus_generic_describe_intr),
+
+ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
+ DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
+ //resource list
+ DEVMETHOD(bus_get_resource_list, bhnd_soc_get_rl),
+
+ //bhnd - BCMA allocates agent resources
+ DEVMETHOD(bhnd_bus_alloc_resource, bhnd_soc_alloc_resource),
+ DEVMETHOD(bhnd_bus_activate_resource, bhnd_soc_activate_resource),
+ DEVMETHOD(bhnd_bus_is_hw_disabled, bhnd_soc_is_hw_disabled),
+ DEVMETHOD(bhnd_bus_get_chipid, bhnd_soc_get_chipid),
+ DEVMETHOD(bhnd_bus_get_attach_type, bhnd_soc_get_attach_type),
+
+ DEVMETHOD_END
+};
+
+devclass_t bhnd_soc_devclass;
+
+DEFINE_CLASS_0(bhnd_soc, bhnd_soc_driver, bhnd_soc_methods,
+ sizeof(struct bhnd_soc_softc));
+EARLY_DRIVER_MODULE(bhnd_soc, nexus, bhnd_soc_driver, bhnd_soc_devclass, NULL,
+ NULL, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/dev/bhnd/soc/bhnd_soc.h b/sys/dev/bhnd/soc/bhnd_soc.h
new file mode 100644
index 0000000..12b9d0e
--- /dev/null
+++ b/sys/dev/bhnd/soc/bhnd_soc.h
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2016 Michael Zhilin <mizhka@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ */
+
+/*
+ * $FreeBSD$
+ */
+#ifndef _BHND_SOC_BHND_SOC_H_
+#define _BHND_SOC_BHND_SOC_H_
+
+#define BHND_SOC_MAXNUM_CORES 0x20
+#define BHND_SOC_RAM_OFFSET 0x0
+#define BHND_SOC_RAM_SIZE 0x20000000
+
+struct bhnd_soc_softc {
+ device_t dev;
+ device_t bridge;
+ device_t bus;
+ struct bhnd_chipid chipid; /* chip identification */
+};
+
+struct bhnd_soc_devinfo {
+ struct resource_list resources;
+};
+
+#endif /* _BHND_SOC_BHND_SOC_H_ */
diff --git a/sys/dev/bhnd/tools/bus_macro.sh b/sys/dev/bhnd/tools/bus_macro.sh
index a11ed0c..b5de494 100644
--- a/sys/dev/bhnd/tools/bus_macro.sh
+++ b/sys/dev/bhnd/tools/bus_macro.sh
@@ -1,6 +1,6 @@
#!/bin/sh
#
-# Copyright (c) 2015 Landon Fuller <landon@landonf.org>
+# Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
# Copyright (c) 2004-2005 Poul-Henning Kamp.
# All rights reserved.
#
@@ -63,8 +63,6 @@ macro () {
macro barrier o l f
-# We only support a subset of the bus I/O methods; this may
-# be expanded when/if additional functions are required.
for w in 1 2 4 #8
do
# macro copy_region_$w so dh do c
@@ -74,11 +72,20 @@ do
do
macro read_$s$w o
macro read_multi_$s$w o d c
-# macro read_region_$s$w o d c
-# macro set_multi_$s$w o v c
-# macro set_region_$s$w o v c
+ macro read_region_$s$w o d c
macro write_$s$w o v
macro write_multi_$s$w o d c
-# macro write_region_$s$w o d c
+ macro write_region_$s$w o d c
+ done
+
+ # set_(multi_)?_stream is not supported on ARM/ARM64, and so for
+ # simplicity, we don't support their use with bhnd resources.
+ #
+ # if that changes, these can be merged back into the stream-eanbled
+ # loop above.
+ for s in ""
+ do
+ macro set_multi_$s$w o v c
+ macro set_region_$s$w o v c
done
done
diff --git a/sys/dev/bwn/if_bwn.c b/sys/dev/bwn/if_bwn.c
index 615a3ad..d130024 100644
--- a/sys/dev/bwn/if_bwn.c
+++ b/sys/dev/bwn/if_bwn.c
@@ -1498,7 +1498,7 @@ bwn_setup_channels(struct bwn_mac *mac, int have_bg, int have_a)
{
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
- uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
+ uint8_t bands[IEEE80211_MODE_BYTES];
memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
ic->ic_nchans = 0;
diff --git a/sys/dev/coretemp/coretemp.c b/sys/dev/coretemp/coretemp.c
index a7d5fdc..38f07fb 100644
--- a/sys/dev/coretemp/coretemp.c
+++ b/sys/dev/coretemp/coretemp.c
@@ -48,7 +48,7 @@ __FBSDID("$FreeBSD$");
#include <machine/cputypes.h>
#include <machine/md_var.h>
-#define TZ_ZEROC 2732
+#define TZ_ZEROC 2731
#define THERM_STATUS_LOG 0x02
#define THERM_STATUS 0x01
diff --git a/sys/dev/cpuctl/cpuctl.c b/sys/dev/cpuctl/cpuctl.c
index ec41a2e..a73595b 100644
--- a/sys/dev/cpuctl/cpuctl.c
+++ b/sys/dev/cpuctl/cpuctl.c
@@ -126,7 +126,7 @@ set_cpu(int cpu, struct thread *td)
sched_bind(td, cpu);
thread_unlock(td);
KASSERT(td->td_oncpu == cpu,
- ("[cpuctl,%d]: cannot bind to target cpu %d", __LINE__, cpu));
+ ("[cpuctl,%d]: cannot bind to target cpu %d on cpu %d", __LINE__, cpu, td->td_oncpu));
}
static void
diff --git a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
index 6332747..07a1f22 100644
--- a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
+++ b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
@@ -429,6 +429,7 @@ icl_cxgbei_new_conn(const char *name, struct mtx *lock)
ic->ic_max_data_segment_length = CXGBEI_MAX_DSL;
ic->ic_name = name;
ic->ic_offload = "cxgbei";
+ ic->ic_unmapped = false;
CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc);
diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c
index 8cba8ca..e8acdc9 100644
--- a/sys/dev/cxgbe/tom/t4_ddp.c
+++ b/sys/dev/cxgbe/tom/t4_ddp.c
@@ -296,8 +296,10 @@ complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db,
toep->ddp_active_id = -1;
} else
toep->ddp_active_id ^= 1;
+#ifdef VERBOSE_TRACES
CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__,
toep->ddp_active_id);
+#endif
} else {
KASSERT(toep->ddp_active_count != 0 &&
toep->ddp_active_id != -1,
diff --git a/sys/dev/gpio/gpioiic.c b/sys/dev/gpio/gpioiic.c
index 925cdb9..f547be4 100644
--- a/sys/dev/gpio/gpioiic.c
+++ b/sys/dev/gpio/gpioiic.c
@@ -33,28 +33,25 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
-#include <sys/conf.h>
+#include <sys/gpio.h>
#include <sys/kernel.h>
#include <sys/module.h>
-#include <sys/gpio.h>
-#include "gpiobus_if.h"
-
#ifdef FDT
-#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
#endif
#include <dev/gpio/gpiobusvar.h>
-
#include <dev/iicbus/iiconf.h>
#include <dev/iicbus/iicbus.h>
+#include "gpiobus_if.h"
#include "iicbb_if.h"
-#define SCL_PIN_DEFAULT 0 /* default index of SCL pin on gpiobus */
-#define SDA_PIN_DEFAULT 1
+#define GPIOIIC_SCL_DFLT 0
+#define GPIOIIC_SDA_DFLT 1
+#define GPIOIIC_MIN_PINS 2
struct gpioiic_softc
{
@@ -69,7 +66,6 @@ static int gpioiic_attach(device_t);
/* iicbb interface */
static void gpioiic_reset_bus(device_t);
-static int gpioiic_callback(device_t, int, caddr_t);
static void gpioiic_setsda(device_t, int);
static void gpioiic_setscl(device_t, int);
static int gpioiic_getsda(device_t);
@@ -79,14 +75,24 @@ static int gpioiic_reset(device_t, u_char, u_char, u_char *);
static int
gpioiic_probe(device_t dev)
{
+ struct gpiobus_ivar *devi;
#ifdef FDT
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
if (!ofw_bus_is_compatible(dev, "gpioiic"))
return (ENXIO);
#endif
+ devi = GPIOBUS_IVAR(dev);
+ if (devi->npins < GPIOIIC_MIN_PINS) {
+ device_printf(dev,
+ "gpioiic needs at least %d GPIO pins (only %d given).\n",
+ GPIOIIC_MIN_PINS, devi->npins);
+ return (ENXIO);
+ }
device_set_desc(dev, "GPIO I2C bit-banging driver");
- return (0);
+ return (BUS_PROBE_DEFAULT);
}
static int
@@ -105,10 +111,10 @@ gpioiic_attach(device_t dev)
sc->sc_busdev = device_get_parent(dev);
if (resource_int_value(device_get_name(dev),
device_get_unit(dev), "scl", &sc->scl_pin))
- sc->scl_pin = SCL_PIN_DEFAULT;
+ sc->scl_pin = GPIOIIC_SCL_DFLT;
if (resource_int_value(device_get_name(dev),
device_get_unit(dev), "sda", &sc->sda_pin))
- sc->sda_pin = SDA_PIN_DEFAULT;
+ sc->sda_pin = GPIOIIC_SDA_DFLT;
#ifdef FDT
if ((node = ofw_bus_get_node(dev)) == -1)
@@ -120,9 +126,9 @@ gpioiic_attach(device_t dev)
#endif
if (sc->scl_pin < 0 || sc->scl_pin > 1)
- sc->scl_pin = SCL_PIN_DEFAULT;
+ sc->scl_pin = GPIOIIC_SCL_DFLT;
if (sc->sda_pin < 0 || sc->sda_pin > 1)
- sc->sda_pin = SDA_PIN_DEFAULT;
+ sc->sda_pin = GPIOIIC_SDA_DFLT;
devi = GPIOBUS_IVAR(dev);
device_printf(dev, "SCL pin: %d, SDA pin: %d\n",
@@ -150,30 +156,6 @@ gpioiic_reset_bus(device_t dev)
GPIO_PIN_INPUT);
}
-static int
-gpioiic_callback(device_t dev, int index, caddr_t data)
-{
- struct gpioiic_softc *sc = device_get_softc(dev);
- int error, how;
-
- how = GPIOBUS_DONTWAIT;
- if (data != NULL && *(int*)data == IIC_WAIT)
- how = GPIOBUS_WAIT;
- error = 0;
- switch (index) {
- case IIC_REQUEST_BUS:
- error = GPIOBUS_ACQUIRE_BUS(sc->sc_busdev, sc->sc_dev, how);
- break;
- case IIC_RELEASE_BUS:
- GPIOBUS_RELEASE_BUS(sc->sc_busdev, sc->sc_dev);
- break;
- default:
- error = EINVAL;
- }
-
- return (error);
-}
-
static void
gpioiic_setsda(device_t dev, int val)
{
@@ -260,7 +242,6 @@ static device_method_t gpioiic_methods[] = {
DEVMETHOD(device_detach, bus_generic_detach),
/* iicbb interface */
- DEVMETHOD(iicbb_callback, gpioiic_callback),
DEVMETHOD(iicbb_setsda, gpioiic_setsda),
DEVMETHOD(iicbb_setscl, gpioiic_setscl),
DEVMETHOD(iicbb_getsda, gpioiic_getsda),
diff --git a/sys/dev/gpio/gpioled.c b/sys/dev/gpio/gpioled.c
index 760cd6e..bd77f53 100644
--- a/sys/dev/gpio/gpioled.c
+++ b/sys/dev/gpio/gpioled.c
@@ -54,12 +54,11 @@ __FBSDID("$FreeBSD$");
*/
#define GPIOLED_PIN 0
-#define GPIOLED_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
+#define GPIOLED_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define GPIOLED_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
-#define GPIOLED_LOCK_INIT(_sc) \
- mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
- "gpioled", MTX_DEF)
-#define GPIOLED_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
+#define GPIOLED_LOCK_INIT(_sc) mtx_init(&(_sc)->sc_mtx, \
+ device_get_nameunit((_sc)->sc_dev), "gpioled", MTX_DEF)
+#define GPIOLED_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx)
struct gpioled_softc
{
@@ -77,23 +76,15 @@ static int gpioled_detach(device_t);
static void
gpioled_control(void *priv, int onoff)
{
- int error;
struct gpioled_softc *sc;
sc = (struct gpioled_softc *)priv;
GPIOLED_LOCK(sc);
- error = GPIOBUS_ACQUIRE_BUS(sc->sc_busdev, sc->sc_dev,
- GPIOBUS_DONTWAIT);
- if (error != 0) {
- GPIOLED_UNLOCK(sc);
- return;
- }
- error = GPIOBUS_PIN_SETFLAGS(sc->sc_busdev, sc->sc_dev,
- GPIOLED_PIN, GPIO_PIN_OUTPUT);
- if (error == 0)
+ if (GPIOBUS_PIN_SETFLAGS(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
+ GPIO_PIN_OUTPUT) == 0) {
GPIOBUS_PIN_SET(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
onoff ? GPIO_PIN_HIGH : GPIO_PIN_LOW);
- GPIOBUS_RELEASE_BUS(sc->sc_busdev, sc->sc_dev);
+ }
GPIOLED_UNLOCK(sc);
}
@@ -159,7 +150,7 @@ gpioled_probe(device_t dev)
#endif
device_set_desc(dev, "GPIO led");
- return (0);
+ return (BUS_PROBE_DEFAULT);
}
static int
diff --git a/sys/dev/hyperv/include/hyperv_busdma.h b/sys/dev/hyperv/include/hyperv_busdma.h
new file mode 100644
index 0000000..f4ea8cf
--- /dev/null
+++ b/sys/dev/hyperv/include/hyperv_busdma.h
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 2016 Microsoft Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _HYPERV_BUSDMA_H_
+#define _HYPERV_BUSDMA_H_
+
+struct hyperv_dma {
+ bus_addr_t hv_paddr;
+ bus_dma_tag_t hv_dtag;
+ bus_dmamap_t hv_dmap;
+};
+
+void hyperv_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg,
+ int error);
+void *hyperv_dmamem_alloc(bus_dma_tag_t parent_dtag, bus_size_t alignment,
+ bus_addr_t boundary, bus_size_t size, struct hyperv_dma *dma,
+ int flags);
+void hyperv_dmamem_free(struct hyperv_dma *dma, void *ptr);
+
+#endif /* !_HYPERV_BUSDMA_H_ */
diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c
index 52c25a6..04e998e 100644
--- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c
+++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c
@@ -113,6 +113,7 @@ __FBSDID("$FreeBSD$");
#include <machine/in_cksum.h>
#include <dev/hyperv/include/hyperv.h>
+#include <dev/hyperv/include/hyperv_busdma.h>
#include "hv_net_vsc.h"
#include "hv_rndis.h"
#include "hv_rndis_filter.h"
@@ -2171,18 +2172,6 @@ hn_check_iplen(const struct mbuf *m, int hoff)
}
static void
-hn_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
-{
- bus_addr_t *paddr = arg;
-
- if (error)
- return;
-
- KASSERT(nseg == 1, ("too many segments %d!", nseg));
- *paddr = segs->ds_addr;
-}
-
-static void
hn_create_rx_data(struct hn_softc *sc, int ring_cnt)
{
struct sysctl_oid_list *child;
@@ -2472,7 +2461,7 @@ hn_create_tx_ring(struct hn_softc *sc, int id)
error = bus_dmamap_load(txr->hn_tx_rndis_dtag,
txd->rndis_msg_dmap,
txd->rndis_msg, HN_RNDIS_MSG_LEN,
- hn_dma_map_paddr, &txd->rndis_msg_paddr,
+ hyperv_dma_map_paddr, &txd->rndis_msg_paddr,
BUS_DMA_NOWAIT);
if (error) {
device_printf(sc->hn_dev,
diff --git a/sys/dev/hyperv/vmbus/hv_connection.c b/sys/dev/hyperv/vmbus/hv_connection.c
index 5cd474a..b642461 100644
--- a/sys/dev/hyperv/vmbus/hv_connection.c
+++ b/sys/dev/hyperv/vmbus/hv_connection.c
@@ -333,8 +333,8 @@ vmbus_event_proc(struct vmbus_softc *sc, int cpu)
{
hv_vmbus_synic_event_flags *event;
- event = ((hv_vmbus_synic_event_flags *)
- hv_vmbus_g_context.syn_ic_event_page[cpu]) + HV_VMBUS_MESSAGE_SINT;
+ event = hv_vmbus_g_context.syn_ic_event_page[cpu] +
+ HV_VMBUS_MESSAGE_SINT;
/*
* On Host with Win8 or above, the event page can be checked directly
@@ -349,8 +349,8 @@ vmbus_event_proc_compat(struct vmbus_softc *sc __unused, int cpu)
{
hv_vmbus_synic_event_flags *event;
- event = ((hv_vmbus_synic_event_flags *)
- hv_vmbus_g_context.syn_ic_event_page[cpu]) + HV_VMBUS_MESSAGE_SINT;
+ event = hv_vmbus_g_context.syn_ic_event_page[cpu] +
+ HV_VMBUS_MESSAGE_SINT;
if (atomic_testandclear_int(&event->flags32[0], 0)) {
vmbus_event_flags_proc(
diff --git a/sys/dev/hyperv/vmbus/hv_hv.c b/sys/dev/hyperv/vmbus/hv_hv.c
index 6e3f713..3cbdae6 100644
--- a/sys/dev/hyperv/vmbus/hv_hv.c
+++ b/sys/dev/hyperv/vmbus/hv_hv.c
@@ -43,13 +43,51 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_param.h>
#include <vm/pmap.h>
-
-#include "hv_vmbus_priv.h"
+#include <dev/hyperv/include/hyperv_busdma.h>
+#include <dev/hyperv/vmbus/hv_vmbus_priv.h>
+#include <dev/hyperv/vmbus/hyperv_reg.h>
+#include <dev/hyperv/vmbus/vmbus_var.h>
#define HV_NANOSECONDS_PER_SEC 1000000000L
#define HYPERV_INTERFACE 0x31237648 /* HV#1 */
+/*
+ * The guest OS needs to register the guest ID with the hypervisor.
+ * The guest ID is a 64 bit entity and the structure of this ID is
+ * specified in the Hyper-V specification:
+ *
+ * http://msdn.microsoft.com/en-us/library/windows/
+ * hardware/ff542653%28v=vs.85%29.aspx
+ *
+ * While the current guideline does not specify how FreeBSD guest ID(s)
+ * need to be generated, our plan is to publish the guidelines for
+ * FreeBSD and other guest operating systems that currently are hosted
+ * on Hyper-V. The implementation here conforms to this yet
+ * unpublished guidelines.
+ *
+ * Bit(s)
+ * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
+ * 62:56 - Os Type: FreeBSD is 0x02
+ * 55:48 - Distro specific identification
+ * 47:16 - FreeBSD kernel version number
+ * 15:0 - Distro specific identification
+ */
+#define HYPERV_GUESTID_OSS (0x1ULL << 63)
+#define HYPERV_GUESTID_FREEBSD (0x02ULL << 56)
+#define HYPERV_GUESTID(id) \
+ (HYPERV_GUESTID_OSS | HYPERV_GUESTID_FREEBSD | \
+ (((uint64_t)(((id) & 0xff0000) >> 16)) << 48) |\
+ (((uint64_t)__FreeBSD_version) << 16) | \
+ ((uint64_t)((id) & 0x00ffff)))
+
+struct hypercall_ctx {
+ void *hc_addr;
+ struct hyperv_dma hc_dma;
+};
+
+static struct hypercall_ctx hypercall_context;
+
static u_int hv_get_timecount(struct timecounter *tc);
u_int hyperv_features;
@@ -63,7 +101,6 @@ static u_int hyperv_features3;
*/
hv_vmbus_context hv_vmbus_g_context = {
.syn_ic_initialized = FALSE,
- .hypercall_page = NULL,
};
static struct timecounter hv_timecounter = {
@@ -87,7 +124,7 @@ hv_vmbus_do_hypercall(uint64_t control, void* input, void* output)
uint64_t hv_status = 0;
uint64_t input_address = (input) ? hv_get_phys_addr(input) : 0;
uint64_t output_address = (output) ? hv_get_phys_addr(output) : 0;
- volatile void* hypercall_page = hv_vmbus_g_context.hypercall_page;
+ volatile void *hypercall_page = hypercall_context.hc_addr;
__asm__ __volatile__ ("mov %0, %%r8" : : "r" (output_address): "r8");
__asm__ __volatile__ ("call *%3" : "=a"(hv_status):
@@ -105,7 +142,7 @@ hv_vmbus_do_hypercall(uint64_t control, void* input, void* output)
uint64_t output_address = (output) ? hv_get_phys_addr(output) : 0;
uint32_t output_address_high = output_address >> 32;
uint32_t output_address_low = output_address & 0xFFFFFFFF;
- volatile void* hypercall_page = hv_vmbus_g_context.hypercall_page;
+ volatile void *hypercall_page = hypercall_context.hc_addr;
__asm__ __volatile__ ("call *%8" : "=d"(hv_status_high),
"=a"(hv_status_low) : "d" (control_high),
@@ -118,94 +155,6 @@ hv_vmbus_do_hypercall(uint64_t control, void* input, void* output)
}
/**
- * @brief Main initialization routine.
- *
- * This routine must be called
- * before any other routines in here are called
- */
-int
-hv_vmbus_init(void)
-{
- hv_vmbus_x64_msr_hypercall_contents hypercall_msr;
- void* virt_addr = NULL;
-
- memset(
- hv_vmbus_g_context.syn_ic_event_page,
- 0,
- sizeof(hv_vmbus_handle) * MAXCPU);
-
- memset(
- hv_vmbus_g_context.syn_ic_msg_page,
- 0,
- sizeof(hv_vmbus_handle) * MAXCPU);
-
- if (vm_guest != VM_GUEST_HV)
- goto cleanup;
-
- /*
- * Write our OS info
- */
- uint64_t os_guest_info = HV_FREEBSD_GUEST_ID;
- wrmsr(HV_X64_MSR_GUEST_OS_ID, os_guest_info);
- hv_vmbus_g_context.guest_id = os_guest_info;
-
- /*
- * See if the hypercall page is already set
- */
- hypercall_msr.as_uint64_t = rdmsr(HV_X64_MSR_HYPERCALL);
- virt_addr = malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO);
-
- hypercall_msr.u.enable = 1;
- hypercall_msr.u.guest_physical_address =
- (hv_get_phys_addr(virt_addr) >> PAGE_SHIFT);
- wrmsr(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64_t);
-
- /*
- * Confirm that hypercall page did get set up
- */
- hypercall_msr.as_uint64_t = 0;
- hypercall_msr.as_uint64_t = rdmsr(HV_X64_MSR_HYPERCALL);
-
- if (!hypercall_msr.u.enable)
- goto cleanup;
-
- hv_vmbus_g_context.hypercall_page = virt_addr;
-
- return (0);
-
- cleanup:
- if (virt_addr != NULL) {
- if (hypercall_msr.u.enable) {
- hypercall_msr.as_uint64_t = 0;
- wrmsr(HV_X64_MSR_HYPERCALL,
- hypercall_msr.as_uint64_t);
- }
-
- free(virt_addr, M_DEVBUF);
- }
- return (ENOTSUP);
-}
-
-/**
- * @brief Cleanup routine, called normally during driver unloading or exiting
- */
-void
-hv_vmbus_cleanup(void)
-{
- hv_vmbus_x64_msr_hypercall_contents hypercall_msr;
-
- if (hv_vmbus_g_context.guest_id == HV_FREEBSD_GUEST_ID) {
- if (hv_vmbus_g_context.hypercall_page != NULL) {
- hypercall_msr.as_uint64_t = 0;
- wrmsr(HV_X64_MSR_HYPERCALL,
- hypercall_msr.as_uint64_t);
- free(hv_vmbus_g_context.hypercall_page, M_DEVBUF);
- hv_vmbus_g_context.hypercall_page = NULL;
- }
- }
-}
-
-/**
* @brief Post a message using the hypervisor message IPC.
* (This involves a hypercall.)
*/
@@ -272,8 +221,8 @@ hv_vmbus_signal_event(void *con_id)
*/
void
hv_vmbus_synic_init(void *arg)
-
{
+ struct vmbus_softc *sc = vmbus_get_softc();
int cpu;
uint64_t hv_vcpu_index;
hv_vmbus_synic_simp simp;
@@ -285,9 +234,6 @@ hv_vmbus_synic_init(void *arg)
cpu = PCPU_GET(cpuid);
- if (hv_vmbus_g_context.hypercall_page == NULL)
- return;
-
/*
* TODO: Check the version
*/
@@ -321,7 +267,7 @@ hv_vmbus_synic_init(void *arg)
/*HV_SHARED_SINT_IDT_VECTOR + 0x20; */
shared_sint.as_uint64_t = 0;
- shared_sint.u.vector = setup_args->vector;
+ shared_sint.u.vector = sc->vmbus_idtvec;
shared_sint.u.masked = FALSE;
shared_sint.u.auto_eoi = TRUE;
@@ -508,6 +454,9 @@ hyperv_init(void *dummy __unused)
return;
}
+ /* Write guest id */
+ wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_GUESTID(0));
+
if (hyperv_features & HV_FEATURE_MSR_TIME_REFCNT) {
/* Register virtual timecount */
tc_init(&hv_timecounter);
@@ -515,3 +464,74 @@ hyperv_init(void *dummy __unused)
}
SYSINIT(hyperv_initialize, SI_SUB_HYPERVISOR, SI_ORDER_FIRST, hyperv_init,
NULL);
+
+static void
+hypercall_memfree(void)
+{
+ hyperv_dmamem_free(&hypercall_context.hc_dma,
+ hypercall_context.hc_addr);
+ hypercall_context.hc_addr = NULL;
+}
+
+static void
+hypercall_create(void *arg __unused)
+{
+ uint64_t hc, hc_orig;
+
+ if (vm_guest != VM_GUEST_HV)
+ return;
+
+ hypercall_context.hc_addr = hyperv_dmamem_alloc(NULL, PAGE_SIZE, 0,
+ PAGE_SIZE, &hypercall_context.hc_dma, BUS_DMA_WAITOK);
+ if (hypercall_context.hc_addr == NULL) {
+ printf("hyperv: Hypercall page allocation failed\n");
+ /* Can't perform any Hyper-V specific actions */
+ vm_guest = VM_GUEST_VM;
+ return;
+ }
+
+ /* Get the 'reserved' bits, which requires preservation. */
+ hc_orig = rdmsr(MSR_HV_HYPERCALL);
+
+ /*
+ * Setup the Hypercall page.
+ *
+ * NOTE: 'reserved' bits MUST be preserved.
+ */
+ hc = ((hypercall_context.hc_dma.hv_paddr >> PAGE_SHIFT) <<
+ MSR_HV_HYPERCALL_PGSHIFT) |
+ (hc_orig & MSR_HV_HYPERCALL_RSVD_MASK) |
+ MSR_HV_HYPERCALL_ENABLE;
+ wrmsr(MSR_HV_HYPERCALL, hc);
+
+ /*
+ * Confirm that Hypercall page did get setup.
+ */
+ hc = rdmsr(MSR_HV_HYPERCALL);
+ if ((hc & MSR_HV_HYPERCALL_ENABLE) == 0) {
+ printf("hyperv: Hypercall setup failed\n");
+ hypercall_memfree();
+ /* Can't perform any Hyper-V specific actions */
+ vm_guest = VM_GUEST_VM;
+ return;
+ }
+ if (bootverbose)
+ printf("hyperv: Hypercall created\n");
+}
+SYSINIT(hypercall_ctor, SI_SUB_DRIVERS, SI_ORDER_FIRST, hypercall_create, NULL);
+
+static void
+hypercall_destroy(void *arg __unused)
+{
+ if (hypercall_context.hc_addr == NULL)
+ return;
+
+ /* Disable Hypercall */
+ wrmsr(MSR_HV_HYPERCALL, 0);
+ hypercall_memfree();
+
+ if (bootverbose)
+ printf("hyperv: Hypercall destroyed\n");
+}
+SYSUNINIT(hypercall_dtor, SI_SUB_DRIVERS, SI_ORDER_FIRST, hypercall_destroy,
+ NULL);
diff --git a/sys/dev/hyperv/vmbus/hv_vmbus_drv_freebsd.c b/sys/dev/hyperv/vmbus/hv_vmbus_drv_freebsd.c
index 7d51f61..e625da5 100644
--- a/sys/dev/hyperv/vmbus/hv_vmbus_drv_freebsd.c
+++ b/sys/dev/hyperv/vmbus/hv_vmbus_drv_freebsd.c
@@ -68,7 +68,6 @@ __FBSDID("$FreeBSD$");
struct vmbus_softc *vmbus_sc;
-static device_t vmbus_devp;
static int vmbus_inited;
static hv_setup_args setup_args; /* only CPU 0 supported at this time */
@@ -79,8 +78,9 @@ vmbus_msg_task(void *arg __unused, int pending __unused)
{
hv_vmbus_message *msg;
- msg = ((hv_vmbus_message *)hv_vmbus_g_context.syn_ic_msg_page[curcpu]) +
+ msg = hv_vmbus_g_context.syn_ic_msg_page[curcpu] +
HV_VMBUS_MESSAGE_SINT;
+
for (;;) {
const hv_vmbus_channel_msg_table_entry *entry;
hv_vmbus_channel_msg_header *hdr;
@@ -134,9 +134,8 @@ static inline int
hv_vmbus_isr(struct trapframe *frame)
{
struct vmbus_softc *sc = vmbus_get_softc();
+ hv_vmbus_message *msg, *msg_base;
int cpu = curcpu;
- hv_vmbus_message *msg;
- void *page_addr;
/*
* The Windows team has advised that we check for events
@@ -146,8 +145,8 @@ hv_vmbus_isr(struct trapframe *frame)
sc->vmbus_event_proc(sc, cpu);
/* Check if there are actual msgs to be process */
- page_addr = hv_vmbus_g_context.syn_ic_msg_page[cpu];
- msg = ((hv_vmbus_message *)page_addr) + HV_VMBUS_TIMER_SINT;
+ msg_base = hv_vmbus_g_context.syn_ic_msg_page[cpu];
+ msg = msg_base + HV_VMBUS_TIMER_SINT;
/* we call eventtimer process the message */
if (msg->header.message_type == HV_MESSAGE_TIMER_EXPIRED) {
@@ -178,7 +177,7 @@ hv_vmbus_isr(struct trapframe *frame)
}
}
- msg = ((hv_vmbus_message *)page_addr) + HV_VMBUS_MESSAGE_SINT;
+ msg = msg_base + HV_VMBUS_MESSAGE_SINT;
if (msg->header.message_type != HV_MESSAGE_TYPE_NONE) {
taskqueue_enqueue(hv_vmbus_g_context.hv_msg_tq[cpu],
&hv_vmbus_g_context.hv_msg_task[cpu]);
@@ -324,7 +323,7 @@ hv_vmbus_child_device_register(struct hv_device *child_dev)
printf("VMBUS: Class ID: %s\n", name);
}
- child = device_add_child(vmbus_devp, NULL, -1);
+ child = device_add_child(vmbus_get_device(), NULL, -1);
child_dev->device = child;
device_set_ivars(child, child_dev);
@@ -340,7 +339,7 @@ hv_vmbus_child_device_unregister(struct hv_device *child_dev)
* device_add_child()
*/
mtx_lock(&Giant);
- ret = device_delete_child(vmbus_devp, child_dev->device);
+ ret = device_delete_child(vmbus_get_device(), child_dev->device);
mtx_unlock(&Giant);
return(ret);
}
@@ -349,7 +348,7 @@ static int
vmbus_probe(device_t dev)
{
if (ACPI_ID_PROBE(device_get_parent(dev), dev, vmbus_ids) == NULL ||
- device_get_unit(dev) != 0)
+ device_get_unit(dev) != 0 || vm_guest != VM_GUEST_HV)
return (ENXIO);
device_set_desc(dev, "Hyper-V Vmbus");
@@ -385,33 +384,19 @@ vmbus_bus_init(void)
vmbus_inited = 1;
sc = vmbus_get_softc();
- ret = hv_vmbus_init();
-
- if (ret) {
- if(bootverbose)
- printf("Error VMBUS: Hypervisor Initialization Failed!\n");
- return (ret);
- }
-
/*
- * Find a free IDT slot for vmbus callback.
+ * Find a free IDT vector for vmbus messages/events.
*/
- hv_vmbus_g_context.hv_cb_vector = lapic_ipi_alloc(IDTVEC(hv_vmbus_callback));
- if (hv_vmbus_g_context.hv_cb_vector < 0) {
- if(bootverbose)
- printf("Error VMBUS: Cannot find free IDT slot for "
- "vmbus callback!\n");
+ sc->vmbus_idtvec = lapic_ipi_alloc(IDTVEC(hv_vmbus_callback));
+ if (sc->vmbus_idtvec < 0) {
+ device_printf(sc->vmbus_dev, "cannot find free IDT vector\n");
+ ret = ENXIO;
goto cleanup;
}
-
- if(bootverbose)
- printf("VMBUS: vmbus callback vector %d\n",
- hv_vmbus_g_context.hv_cb_vector);
-
- /*
- * Notify the hypervisor of our vector.
- */
- setup_args.vector = hv_vmbus_g_context.hv_cb_vector;
+ if(bootverbose) {
+ device_printf(sc->vmbus_dev, "vmbus IDT vector %d\n",
+ sc->vmbus_idtvec);
+ }
CPU_FOREACH(j) {
snprintf(buf, sizeof(buf), "cpu%d:hyperv", j);
@@ -478,8 +463,8 @@ vmbus_bus_init(void)
hv_vmbus_request_channel_offers();
vmbus_scan();
- bus_generic_attach(vmbus_devp);
- device_printf(vmbus_devp, "device scan, probe and attach done\n");
+ bus_generic_attach(sc->vmbus_dev);
+ device_printf(sc->vmbus_dev, "device scan, probe and attach done\n");
return (ret);
@@ -501,11 +486,9 @@ vmbus_bus_init(void)
}
}
- lapic_ipi_free(hv_vmbus_g_context.hv_cb_vector);
+ lapic_ipi_free(sc->vmbus_idtvec);
cleanup:
- hv_vmbus_cleanup();
-
return (ret);
}
@@ -517,11 +500,8 @@ vmbus_event_proc_dummy(struct vmbus_softc *sc __unused, int cpu __unused)
static int
vmbus_attach(device_t dev)
{
- if(bootverbose)
- device_printf(dev, "VMBUS: attach dev: %p\n", dev);
-
- vmbus_devp = dev;
vmbus_sc = device_get_softc(dev);
+ vmbus_sc->vmbus_dev = dev;
/*
* Event processing logic will be configured:
@@ -566,6 +546,7 @@ vmbus_sysinit(void *arg __unused)
static int
vmbus_detach(device_t dev)
{
+ struct vmbus_softc *sc = device_get_softc(dev);
int i;
hv_vmbus_release_unattached_channels();
@@ -578,8 +559,6 @@ vmbus_detach(device_t dev)
free(setup_args.page_buffers[i], M_DEVBUF);
}
- hv_vmbus_cleanup();
-
/* remove swi */
CPU_FOREACH(i) {
if (hv_vmbus_g_context.hv_event_queue[i] != NULL) {
@@ -588,7 +567,7 @@ vmbus_detach(device_t dev)
}
}
- lapic_ipi_free(hv_vmbus_g_context.hv_cb_vector);
+ lapic_ipi_free(sc->vmbus_idtvec);
return (0);
}
diff --git a/sys/dev/hyperv/vmbus/hv_vmbus_priv.h b/sys/dev/hyperv/vmbus/hv_vmbus_priv.h
index 563c18a..2619c0d 100644
--- a/sys/dev/hyperv/vmbus/hv_vmbus_priv.h
+++ b/sys/dev/hyperv/vmbus/hv_vmbus_priv.h
@@ -197,13 +197,14 @@ enum {
#define HV_HYPERCALL_PARAM_ALIGN sizeof(uint64_t)
+struct vmbus_message;
+union vmbus_event_flags;
+
typedef struct {
- uint64_t guest_id;
- void* hypercall_page;
hv_bool_uint8_t syn_ic_initialized;
- hv_vmbus_handle syn_ic_msg_page[MAXCPU];
- hv_vmbus_handle syn_ic_event_page[MAXCPU];
+ struct vmbus_message *syn_ic_msg_page[MAXCPU];
+ union vmbus_event_flags *syn_ic_event_page[MAXCPU];
/*
* For FreeBSD cpuid to Hyper-V vcpuid mapping.
*/
@@ -215,11 +216,6 @@ typedef struct {
struct taskqueue *hv_event_queue[MAXCPU];
struct taskqueue *hv_msg_tq[MAXCPU];
struct task hv_msg_task[MAXCPU];
- /*
- * Host use this vector to interrupt guest for vmbus channel
- * event and msg.
- */
- int hv_cb_vector;
} hv_vmbus_context;
/*
@@ -305,7 +301,7 @@ typedef struct {
/*
* Define synthetic interrupt controller message format
*/
-typedef struct {
+typedef struct vmbus_message {
hv_vmbus_msg_header header;
union {
uint64_t payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
@@ -580,7 +576,7 @@ typedef struct {
/*
* Define the synthetic interrupt controller event flags format
*/
-typedef union {
+typedef union vmbus_event_flags {
uint8_t flags8[HV_EVENT_FLAGS_BYTE_COUNT];
uint32_t flags32[HV_EVENT_FLAGS_DWORD_COUNT];
unsigned long flagsul[HV_EVENT_FLAGS_ULONG_COUNT];
@@ -723,8 +719,6 @@ hv_vmbus_channel* hv_vmbus_allocate_channel(void);
void hv_vmbus_free_vmbus_channel(hv_vmbus_channel *channel);
int hv_vmbus_request_channel_offers(void);
void hv_vmbus_release_unattached_channels(void);
-int hv_vmbus_init(void);
-void hv_vmbus_cleanup(void);
uint16_t hv_vmbus_post_msg_via_msg_ipc(
hv_vmbus_connection_id connection_id,
@@ -763,46 +757,7 @@ void hv_et_intr(struct trapframe*);
/* Wait for device creation */
void vmbus_scan(void);
-/*
- * The guest OS needs to register the guest ID with the hypervisor.
- * The guest ID is a 64 bit entity and the structure of this ID is
- * specified in the Hyper-V specification:
- *
- * http://msdn.microsoft.com/en-us/library/windows/
- * hardware/ff542653%28v=vs.85%29.aspx
- *
- * While the current guideline does not specify how FreeBSD guest ID(s)
- * need to be generated, our plan is to publish the guidelines for
- * FreeBSD and other guest operating systems that currently are hosted
- * on Hyper-V. The implementation here conforms to this yet
- * unpublished guidelines.
- *
- * Bit(s)
- * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
- * 62:56 - Os Type; Linux is 0x100, FreeBSD is 0x200
- * 55:48 - Distro specific identification
- * 47:16 - FreeBSD kernel version number
- * 15:0 - Distro specific identification
- *
- */
-
-#define HV_FREEBSD_VENDOR_ID 0x8200
-#define HV_FREEBSD_GUEST_ID hv_generate_guest_id(0,0)
-
-static inline uint64_t hv_generate_guest_id(
- uint8_t distro_id_part1,
- uint16_t distro_id_part2)
-{
- uint64_t guest_id;
- guest_id = (((uint64_t)HV_FREEBSD_VENDOR_ID) << 48);
- guest_id |= (((uint64_t)(distro_id_part1)) << 48);
- guest_id |= (((uint64_t)(__FreeBSD_version)) << 16); /* in param.h */
- guest_id |= ((uint64_t)(distro_id_part2));
- return guest_id;
-}
-
typedef struct {
- unsigned int vector;
void *page_buffers[2 * MAXCPU];
} hv_setup_args;
diff --git a/sys/dev/hyperv/vmbus/hyperv_busdma.c b/sys/dev/hyperv/vmbus/hyperv_busdma.c
new file mode 100644
index 0000000..07f4e74
--- /dev/null
+++ b/sys/dev/hyperv/vmbus/hyperv_busdma.c
@@ -0,0 +1,98 @@
+/*-
+ * Copyright (c) 2016 Microsoft Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <machine/bus.h>
+
+#include <dev/hyperv/include/hyperv_busdma.h>
+
+#define HYPERV_DMA_WAITMASK (BUS_DMA_WAITOK | BUS_DMA_NOWAIT)
+
+void
+hyperv_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ bus_addr_t *paddr = arg;
+
+ if (error)
+ return;
+
+ KASSERT(nseg == 1, ("too many segments %d!", nseg));
+ *paddr = segs->ds_addr;
+}
+
+void *
+hyperv_dmamem_alloc(bus_dma_tag_t parent_dtag, bus_size_t alignment,
+ bus_addr_t boundary, bus_size_t size, struct hyperv_dma *dma, int flags)
+{
+ void *ret;
+ int error;
+
+ error = bus_dma_tag_create(parent_dtag, /* parent */
+ alignment, /* alignment */
+ boundary, /* boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ size, /* maxsize */
+ 1, /* nsegments */
+ size, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &dma->hv_dtag);
+ if (error)
+ return NULL;
+
+ error = bus_dmamem_alloc(dma->hv_dtag, &ret,
+ (flags & HYPERV_DMA_WAITMASK) | BUS_DMA_COHERENT, &dma->hv_dmap);
+ if (error) {
+ bus_dma_tag_destroy(dma->hv_dtag);
+ return NULL;
+ }
+
+ error = bus_dmamap_load(dma->hv_dtag, dma->hv_dmap, ret, size,
+ hyperv_dma_map_paddr, &dma->hv_paddr, BUS_DMA_NOWAIT);
+ if (error) {
+ bus_dmamem_free(dma->hv_dtag, ret, dma->hv_dmap);
+ bus_dma_tag_destroy(dma->hv_dtag);
+ return NULL;
+ }
+ return ret;
+}
+
+void
+hyperv_dmamem_free(struct hyperv_dma *dma, void *ptr)
+{
+ bus_dmamap_unload(dma->hv_dtag, dma->hv_dmap);
+ bus_dmamem_free(dma->hv_dtag, ptr, dma->hv_dmap);
+ bus_dma_tag_destroy(dma->hv_dtag);
+}
diff --git a/sys/dev/hyperv/vmbus/hyperv_reg.h b/sys/dev/hyperv/vmbus/hyperv_reg.h
new file mode 100644
index 0000000..7d1d509
--- /dev/null
+++ b/sys/dev/hyperv/vmbus/hyperv_reg.h
@@ -0,0 +1,37 @@
+/*-
+ * Copyright (c) 2016 Microsoft Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _HYPERV_REG_H_
+#define _HYPERV_REG_H_
+
+#define MSR_HV_HYPERCALL 0x40000001
+#define MSR_HV_HYPERCALL_ENABLE 0x0001ULL
+#define MSR_HV_HYPERCALL_RSVD_MASK 0x0ffeULL
+#define MSR_HV_HYPERCALL_PGSHIFT 12
+
+#endif /* !_HYPERV_REG_H_ */
diff --git a/sys/dev/hyperv/vmbus/vmbus_var.h b/sys/dev/hyperv/vmbus/vmbus_var.h
index a3c790b..0aab7d2 100644
--- a/sys/dev/hyperv/vmbus/vmbus_var.h
+++ b/sys/dev/hyperv/vmbus/vmbus_var.h
@@ -38,6 +38,8 @@ struct vmbus_pcpu_data {
struct vmbus_softc {
void (*vmbus_event_proc)(struct vmbus_softc *, int);
struct vmbus_pcpu_data vmbus_pcpu[MAXCPU];
+ device_t vmbus_dev;
+ int vmbus_idtvec;
};
extern struct vmbus_softc *vmbus_sc;
@@ -48,6 +50,12 @@ vmbus_get_softc(void)
return vmbus_sc;
}
+static __inline device_t
+vmbus_get_device(void)
+{
+ return vmbus_sc->vmbus_dev;
+}
+
#define VMBUS_SC_PCPU_GET(sc, field, cpu) (sc)->vmbus_pcpu[(cpu)].field
#define VMBUS_SC_PCPU_PTR(sc, field, cpu) &(sc)->vmbus_pcpu[(cpu)].field
#define VMBUS_PCPU_GET(field, cpu) \
diff --git a/sys/dev/iicbus/ds3231reg.h b/sys/dev/iicbus/ds3231reg.h
index 093b2c6..41e011c 100644
--- a/sys/dev/iicbus/ds3231reg.h
+++ b/sys/dev/iicbus/ds3231reg.h
@@ -73,6 +73,6 @@
#define DS3231_0250C 0x40
#define DS3231_MSB 0x8000
#define DS3231_NEG_BIT DS3231_MSB
-#define TZ_ZEROC 2732
+#define TZ_ZEROC 2731
#endif /* _DS3231REG_H_ */
diff --git a/sys/dev/iicbus/iic.c b/sys/dev/iicbus/iic.c
index c6dd53d..e096241 100644
--- a/sys/dev/iicbus/iic.c
+++ b/sys/dev/iicbus/iic.c
@@ -300,9 +300,16 @@ iicrdwr(struct iic_cdevpriv *priv, struct iic_rdwr_data *d, int flags)
parent = device_get_parent(iicdev);
error = 0;
+ if (d->nmsgs > IIC_RDRW_MAX_MSGS)
+ return (EINVAL);
+
buf = malloc(sizeof(*d->msgs) * d->nmsgs, M_IIC, M_WAITOK);
error = copyin(d->msgs, buf, sizeof(*d->msgs) * d->nmsgs);
+ if (error != 0) {
+ free(buf, M_IIC);
+ return (error);
+ }
/* Alloc kernel buffers for userland data, copyin write data */
usrbufs = malloc(sizeof(void *) * d->nmsgs, M_IIC, M_WAITOK | M_ZERO);
@@ -318,6 +325,8 @@ iicrdwr(struct iic_cdevpriv *priv, struct iic_rdwr_data *d, int flags)
m->buf = NULL;
if (error != 0)
continue;
+
+ /* m->len is uint16_t, so allocation size is capped at 64K. */
m->buf = malloc(m->len, M_IIC, M_WAITOK);
if (!(m->flags & IIC_M_RD))
error = copyin(usrbufs[i], m->buf, m->len);
diff --git a/sys/dev/iicbus/iic.h b/sys/dev/iicbus/iic.h
index ba98d28..8ae1912 100644
--- a/sys/dev/iicbus/iic.h
+++ b/sys/dev/iicbus/iic.h
@@ -56,6 +56,8 @@ struct iic_rdwr_data {
uint32_t nmsgs;
};
+#define IIC_RDRW_MAX_MSGS 42
+
#define I2CSTART _IOW('i', 1, struct iiccmd) /* start condition */
#define I2CSTOP _IO('i', 2) /* stop condition */
#define I2CRSTCARD _IOW('i', 3, struct iiccmd) /* reset the card */
diff --git a/sys/dev/iicbus/iicbb_if.m b/sys/dev/iicbus/iicbb_if.m
index 5b4ffec..f363d5c 100644
--- a/sys/dev/iicbus/iicbb_if.m
+++ b/sys/dev/iicbus/iicbb_if.m
@@ -42,7 +42,6 @@ CODE {
static void
null_post_xfer(device_t dev)
-
{
}
diff --git a/sys/dev/iicbus/iiconf.c b/sys/dev/iicbus/iiconf.c
index 8ac8a47..155cba7 100644
--- a/sys/dev/iicbus/iiconf.c
+++ b/sys/dev/iicbus/iiconf.c
@@ -136,7 +136,6 @@ iicbus_request_bus(device_t bus, device_t dev, int how)
}
}
-
IICBUS_UNLOCK(sc);
return (error);
diff --git a/sys/dev/iicbus/lm75.c b/sys/dev/iicbus/lm75.c
index cbcd8c4..025c449 100644
--- a/sys/dev/iicbus/lm75.c
+++ b/sys/dev/iicbus/lm75.c
@@ -72,7 +72,7 @@ __FBSDID("$FreeBSD$");
#define LM75_0125C 0x20
#define LM75_MSB 0x8000
#define LM75_NEG_BIT LM75_MSB
-#define TZ_ZEROC 2732
+#define TZ_ZEROC 2731
/* LM75 supported models. */
#define HWTYPE_LM75 1
diff --git a/sys/dev/ipw/if_ipw.c b/sys/dev/ipw/if_ipw.c
index c4f47e4..a4cb28b 100644
--- a/sys/dev/ipw/if_ipw.c
+++ b/sys/dev/ipw/if_ipw.c
@@ -116,6 +116,7 @@ static void ipw_release(struct ipw_softc *);
static void ipw_media_status(struct ifnet *, struct ifmediareq *);
static int ipw_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static uint16_t ipw_read_prom_word(struct ipw_softc *, uint8_t);
+static uint16_t ipw_read_chanmask(struct ipw_softc *);
static void ipw_rx_cmd_intr(struct ipw_softc *, struct ipw_soft_buf *);
static void ipw_rx_newstate_intr(struct ipw_softc *, struct ipw_soft_buf *);
static void ipw_rx_data_intr(struct ipw_softc *, struct ipw_status *,
@@ -164,6 +165,8 @@ static void ipw_write_mem_1(struct ipw_softc *, bus_size_t,
static int ipw_scan(struct ipw_softc *);
static void ipw_scan_start(struct ieee80211com *);
static void ipw_scan_end(struct ieee80211com *);
+static void ipw_getradiocaps(struct ieee80211com *, int, int *,
+ struct ieee80211_channel[]);
static void ipw_set_channel(struct ieee80211com *);
static void ipw_scan_curchan(struct ieee80211_scan_state *,
unsigned long maxdwell);
@@ -221,7 +224,6 @@ ipw_attach(device_t dev)
{
struct ipw_softc *sc = device_get_softc(dev);
struct ieee80211com *ic = &sc->sc_ic;
- struct ieee80211_channel *c;
uint16_t val;
int error, i;
@@ -292,18 +294,9 @@ ipw_attach(device_t dev)
ic->ic_macaddr[4] = val >> 8;
ic->ic_macaddr[5] = val & 0xff;
- /* set supported .11b channels (read from EEPROM) */
- if ((val = ipw_read_prom_word(sc, IPW_EEPROM_CHANNEL_LIST)) == 0)
- val = 0x7ff; /* default to channels 1-11 */
- val <<= 1;
- for (i = 1; i < 16; i++) {
- if (val & (1 << i)) {
- c = &ic->ic_channels[ic->ic_nchans++];
- c->ic_freq = ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
- c->ic_flags = IEEE80211_CHAN_B;
- c->ic_ieee = i;
- }
- }
+ sc->chanmask = ipw_read_chanmask(sc);
+ ipw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
+ ic->ic_channels);
/* check support for radio transmitter switch in EEPROM */
if (!(ipw_read_prom_word(sc, IPW_EEPROM_RADIO) & 8))
@@ -312,6 +305,7 @@ ipw_attach(device_t dev)
ieee80211_ifattach(ic);
ic->ic_scan_start = ipw_scan_start;
ic->ic_scan_end = ipw_scan_end;
+ ic->ic_getradiocaps = ipw_getradiocaps;
ic->ic_set_channel = ipw_set_channel;
ic->ic_scan_curchan = ipw_scan_curchan;
ic->ic_scan_mindwell = ipw_scan_mindwell;
@@ -966,6 +960,19 @@ ipw_read_prom_word(struct ipw_softc *sc, uint8_t addr)
return le16toh(val);
}
+static uint16_t
+ipw_read_chanmask(struct ipw_softc *sc)
+{
+ uint16_t val;
+
+ /* set supported .11b channels (read from EEPROM) */
+ if ((val = ipw_read_prom_word(sc, IPW_EEPROM_CHANNEL_LIST)) == 0)
+ val = 0x7ff; /* default to channels 1-11 */
+ val <<= 1;
+
+ return (val);
+}
+
static void
ipw_rx_cmd_intr(struct ipw_softc *sc, struct ipw_soft_buf *sbuf)
{
@@ -2616,6 +2623,26 @@ ipw_scan_start(struct ieee80211com *ic)
}
static void
+ipw_getradiocaps(struct ieee80211com *ic,
+ int maxchans, int *nchans, struct ieee80211_channel chans[])
+{
+ struct ipw_softc *sc = ic->ic_softc;
+ uint8_t bands[IEEE80211_MODE_BYTES];
+ int i;
+
+ memset(bands, 0, sizeof(bands));
+ setbit(bands, IEEE80211_MODE_11B);
+
+ for (i = 1; i < 16; i++) {
+ if (sc->chanmask & (1 << i)) {
+ ieee80211_add_channel(chans, maxchans, nchans,
+ i, 0, 0, 0, bands);
+ }
+ }
+
+}
+
+static void
ipw_set_channel(struct ieee80211com *ic)
{
struct ipw_softc *sc = ic->ic_softc;
diff --git a/sys/dev/ipw/if_ipwvar.h b/sys/dev/ipw/if_ipwvar.h
index aa3ba5b..774d57c 100644
--- a/sys/dev/ipw/if_ipwvar.h
+++ b/sys/dev/ipw/if_ipwvar.h
@@ -157,6 +157,8 @@ struct ipw_softc {
uint32_t rxcur;
int txfree;
+ uint16_t chanmask;
+
struct ipw_rx_radiotap_header sc_rxtap;
struct ipw_tx_radiotap_header sc_txtap;
};
diff --git a/sys/dev/iscsi/icl.h b/sys/dev/iscsi/icl.h
index 735e5db..29d1153 100644
--- a/sys/dev/iscsi/icl.h
+++ b/sys/dev/iscsi/icl.h
@@ -110,8 +110,10 @@ struct icl_conn {
bool ic_send_running;
bool ic_receive_running;
size_t ic_max_data_segment_length;
+ size_t ic_maxtags;
bool ic_disconnecting;
bool ic_iser;
+ bool ic_unmapped;
const char *ic_name;
const char *ic_offload;
diff --git a/sys/dev/iscsi/icl_soft.c b/sys/dev/iscsi/icl_soft.c
index 1216375..c38493f 100644
--- a/sys/dev/iscsi/icl_soft.c
+++ b/sys/dev/iscsi/icl_soft.c
@@ -1190,6 +1190,7 @@ icl_soft_new_conn(const char *name, struct mtx *lock)
ic->ic_max_data_segment_length = ICL_MAX_DATA_SEGMENT_LENGTH;
ic->ic_name = name;
ic->ic_offload = "None";
+ ic->ic_unmapped = false;
return (ic);
}
@@ -1327,6 +1328,23 @@ icl_soft_conn_handoff(struct icl_conn *ic, int fd)
ICL_CONN_LOCK_ASSERT_NOT(ic);
+#ifdef ICL_KERNEL_PROXY
+ /*
+ * We're transitioning to Full Feature phase, and we don't
+ * really care.
+ */
+ if (fd == 0) {
+ ICL_CONN_LOCK(ic);
+ if (ic->ic_socket == NULL) {
+ ICL_CONN_UNLOCK(ic);
+ ICL_WARN("proxy handoff without connect");
+ return (EINVAL);
+ }
+ ICL_CONN_UNLOCK(ic);
+ return (0);
+ }
+#endif
+
/*
* Steal the socket from userland.
*/
diff --git a/sys/dev/iscsi/iscsi.c b/sys/dev/iscsi/iscsi.c
index 62a9c32..86e3fe4 100644
--- a/sys/dev/iscsi/iscsi.c
+++ b/sys/dev/iscsi/iscsi.c
@@ -713,6 +713,8 @@ iscsi_receive_callback(struct icl_pdu *response)
ISCSI_SESSION_LOCK(is);
+ iscsi_pdu_update_statsn(response);
+
#ifdef ICL_KERNEL_PROXY
if (is->is_login_phase) {
if (is->is_login_pdu == NULL)
@@ -725,8 +727,6 @@ iscsi_receive_callback(struct icl_pdu *response)
}
#endif
- iscsi_pdu_update_statsn(response);
-
/*
* The handling routine is responsible for freeing the PDU
* when it's no longer needed.
@@ -1403,6 +1403,7 @@ iscsi_ioctl_daemon_handoff(struct iscsi_softc *sc,
ic->ic_data_crc32c = true;
else
ic->ic_data_crc32c = false;
+ ic->ic_maxtags = maxtags;
is->is_cmdsn = 0;
is->is_expcmdsn = 0;
@@ -1415,21 +1416,17 @@ iscsi_ioctl_daemon_handoff(struct iscsi_softc *sc,
ISCSI_SESSION_UNLOCK(is);
-#ifdef ICL_KERNEL_PROXY
- if (handoff->idh_socket != 0) {
-#endif
- /*
- * Handoff without using ICL proxy.
- */
- error = icl_conn_handoff(ic, handoff->idh_socket);
- if (error != 0) {
- sx_sunlock(&sc->sc_lock);
- iscsi_session_terminate(is);
- return (error);
- }
-#ifdef ICL_KERNEL_PROXY
+ /*
+ * If we're going through the proxy, the idh_socket will be 0,
+ * and the ICL module can simply ignore this call. It can also
+ * use it to determine it's no longer in the Login phase.
+ */
+ error = icl_conn_handoff(ic, handoff->idh_socket);
+ if (error != 0) {
+ sx_sunlock(&sc->sc_lock);
+ iscsi_session_terminate(is);
+ return (error);
}
-#endif
sx_sunlock(&sc->sc_lock);
@@ -1446,7 +1443,7 @@ iscsi_ioctl_daemon_handoff(struct iscsi_softc *sc,
} else {
ISCSI_SESSION_LOCK(is);
- is->is_devq = cam_simq_alloc(maxtags);
+ is->is_devq = cam_simq_alloc(ic->ic_maxtags);
if (is->is_devq == NULL) {
ISCSI_SESSION_WARN(is, "failed to allocate simq");
iscsi_session_terminate(is);
@@ -1455,7 +1452,7 @@ iscsi_ioctl_daemon_handoff(struct iscsi_softc *sc,
is->is_sim = cam_sim_alloc(iscsi_action, iscsi_poll, "iscsi",
is, is->is_id /* unit */, &is->is_lock,
- 1, maxtags, is->is_devq);
+ 1, ic->ic_maxtags, is->is_devq);
if (is->is_sim == NULL) {
ISCSI_SESSION_UNLOCK(is);
ISCSI_SESSION_WARN(is, "failed to allocate SIM");
@@ -1555,6 +1552,10 @@ iscsi_ioctl_daemon_connect(struct iscsi_softc *sc,
}
ISCSI_SESSION_LOCK(is);
+ is->is_statsn = 0;
+ is->is_cmdsn = 0;
+ is->is_expcmdsn = 0;
+ is->is_maxcmdsn = 0;
is->is_waiting_for_iscsid = false;
is->is_login_phase = true;
is->is_timeout = 0;
@@ -1620,9 +1621,7 @@ iscsi_ioctl_daemon_send(struct iscsi_softc *sc,
KASSERT(error == 0, ("icl_pdu_append_data(..., M_WAITOK) failed"));
free(data, M_ISCSI);
}
- ISCSI_SESSION_LOCK(is);
- icl_pdu_queue(ip);
- ISCSI_SESSION_UNLOCK(is);
+ iscsi_pdu_queue(ip);
return (0);
}
@@ -1634,6 +1633,7 @@ iscsi_ioctl_daemon_receive(struct iscsi_softc *sc,
struct iscsi_session *is;
struct icl_pdu *ip;
void *data;
+ int error;
sx_slock(&sc->sc_lock);
TAILQ_FOREACH(is, &sc->sc_sessions, is_next) {
@@ -1652,8 +1652,13 @@ iscsi_ioctl_daemon_receive(struct iscsi_softc *sc,
ISCSI_SESSION_LOCK(is);
while (is->is_login_pdu == NULL &&
is->is_terminating == false &&
- is->is_reconnecting == false)
- cv_wait(&is->is_login_cv, &is->is_lock);
+ is->is_reconnecting == false) {
+ error = cv_wait_sig(&is->is_login_cv, &is->is_lock);
+ if (error != 0) {
+ ISCSI_SESSION_UNLOCK(is);
+ return (error);
+ }
+ }
if (is->is_terminating || is->is_reconnecting) {
ISCSI_SESSION_UNLOCK(is);
return (EIO);
@@ -2265,6 +2270,14 @@ iscsi_action(struct cam_sim *sim, union ccb *ccb)
cpi->hba_inquiry = PI_TAG_ABLE;
cpi->target_sprt = 0;
cpi->hba_misc = PIM_EXTLUNS;
+ /*
+ * XXX: It shouldn't ever be NULL; this could be turned
+ * into a KASSERT eventually.
+ */
+ if (is->is_conn == NULL)
+ ISCSI_WARN("NULL conn");
+ else if (is->is_conn->ic_unmapped)
+ cpi->hba_misc |= PIM_UNMAPPED;
cpi->hba_eng_cnt = 0;
cpi->max_target = 0;
/*
diff --git a/sys/dev/isp/isp.c b/sys/dev/isp/isp.c
index 920b43b..e6ee8ca 100644
--- a/sys/dev/isp/isp.c
+++ b/sys/dev/isp/isp.c
@@ -2431,6 +2431,7 @@ isp_fc_enable_vp(ispsoftc_t *isp, int chan)
__func__, chan, vp.vp_mod_hdr.rqs_flags, vp.vp_mod_status);
return (EIO);
}
+ GET_NANOTIME(&isp->isp_init_time);
return (0);
}
@@ -4699,6 +4700,8 @@ isp_control(ispsoftc_t *isp, ispctl_t ctl, ...)
tmf->tmf_tidhi = lp->portid >> 16;
tmf->tmf_vpidx = ISP_GET_VPIDX(isp, chan);
isp_put_24xx_tmf(isp, tmf, isp->isp_iocb);
+ if (isp->isp_dblev & ISP_LOGDEBUG1)
+ isp_print_bytes(isp, "TMF IOCB request", QENTRY_LEN, isp->isp_iocb);
MEMORYBARRIER(isp, SYNC_IFORDEV, 0, QENTRY_LEN, chan);
fcp->sendmarker = 1;
@@ -4715,6 +4718,8 @@ isp_control(ispsoftc_t *isp, ispctl_t ctl, ...)
break;
MEMORYBARRIER(isp, SYNC_IFORCPU, QENTRY_LEN, QENTRY_LEN, chan);
+ if (isp->isp_dblev & ISP_LOGDEBUG1)
+ isp_print_bytes(isp, "TMF IOCB response", QENTRY_LEN, &((isp24xx_statusreq_t *)isp->isp_iocb)[1]);
sp = (isp24xx_statusreq_t *) local;
isp_get_24xx_response(isp, &((isp24xx_statusreq_t *)isp->isp_iocb)[1], sp);
if (sp->req_completion_status == 0) {
@@ -4781,6 +4786,8 @@ isp_control(ispsoftc_t *isp, ispctl_t ctl, ...)
ab->abrt_tidhi = lp->portid >> 16;
ab->abrt_vpidx = ISP_GET_VPIDX(isp, chan);
isp_put_24xx_abrt(isp, ab, isp->isp_iocb);
+ if (isp->isp_dblev & ISP_LOGDEBUG1)
+ isp_print_bytes(isp, "AB IOCB quest", QENTRY_LEN, isp->isp_iocb);
MEMORYBARRIER(isp, SYNC_IFORDEV, 0, 2 * QENTRY_LEN, chan);
ISP_MEMZERO(&mbs, sizeof (mbs));
@@ -4796,6 +4803,8 @@ isp_control(ispsoftc_t *isp, ispctl_t ctl, ...)
break;
MEMORYBARRIER(isp, SYNC_IFORCPU, QENTRY_LEN, QENTRY_LEN, chan);
+ if (isp->isp_dblev & ISP_LOGDEBUG1)
+ isp_print_bytes(isp, "AB IOCB response", QENTRY_LEN, &((isp24xx_abrt_t *)isp->isp_iocb)[1]);
isp_get_24xx_abrt(isp, &((isp24xx_abrt_t *)isp->isp_iocb)[1], ab);
if (ab->abrt_nphdl == ISP24XX_ABRT_OKAY) {
return (0);
@@ -5857,6 +5866,7 @@ isp_parse_async_fc(ispsoftc_t *isp, uint16_t mbox)
* These are broadcast events that have to be sent across
* all active channels.
*/
+ GET_NANOTIME(&isp->isp_init_time);
for (chan = 0; chan < isp->isp_nchan; chan++) {
fcp = FCPARAM(isp, chan);
int topo = fcp->isp_topo;
@@ -5913,6 +5923,7 @@ isp_parse_async_fc(ispsoftc_t *isp, uint16_t mbox)
* This is a broadcast event that has to be sent across
* all active channels.
*/
+ GET_NANOTIME(&isp->isp_init_time);
for (chan = 0; chan < isp->isp_nchan; chan++) {
fcp = FCPARAM(isp, chan);
if (fcp->role == ISP_ROLE_NONE)
@@ -5956,6 +5967,7 @@ isp_parse_async_fc(ispsoftc_t *isp, uint16_t mbox)
* This is a broadcast event that has to be sent across
* all active channels.
*/
+ GET_NANOTIME(&isp->isp_init_time);
for (chan = 0; chan < isp->isp_nchan; chan++) {
fcp = FCPARAM(isp, chan);
if (fcp->role == ISP_ROLE_NONE)
@@ -6154,6 +6166,7 @@ isp_handle_other_response(ispsoftc_t *isp, int type, isphdr_t *hp, uint32_t *opt
portid = (uint32_t)rid.ridacq_vp_port_hi << 16 |
rid.ridacq_vp_port_lo;
if (rid.ridacq_format == 0) {
+ GET_NANOTIME(&isp->isp_init_time);
for (chan = 0; chan < isp->isp_nchan; chan++) {
fcparam *fcp = FCPARAM(isp, chan);
if (fcp->role == ISP_ROLE_NONE)
diff --git a/sys/dev/isp/isp_freebsd.c b/sys/dev/isp/isp_freebsd.c
index a734e55..32f8e1d 100644
--- a/sys/dev/isp/isp_freebsd.c
+++ b/sys/dev/isp/isp_freebsd.c
@@ -856,7 +856,7 @@ static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *);
static void isp_handle_platform_ctio(ispsoftc_t *, void *);
static void isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *);
static void isp_handle_platform_notify_24xx(ispsoftc_t *, in_fcentry_24xx_t *);
-static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *);
+static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *, uint32_t rsp);
static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *);
static void isp_target_mark_aborted(ispsoftc_t *, union ccb *);
static void isp_target_mark_aborted_early(ispsoftc_t *, tstate_t *, uint32_t);
@@ -2003,7 +2003,7 @@ noresrc:
ntp = isp_get_ntpd(isp, tptr);
if (ntp == NULL) {
rls_lun_statep(isp, tptr);
- isp_endcmd(isp, aep, nphdl, 0, SCSI_STATUS_BUSY, 0);
+ isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
return;
}
memcpy(ntp->rd.data, aep, QENTRY_LEN);
@@ -2055,7 +2055,7 @@ isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep)
* It's a bit tricky here as we need to stash this command *somewhere*.
*/
GET_NANOTIME(&now);
- if (NANOTIME_SUB(&isp->isp_init_time, &now) > 2000000000ULL) {
+ if (NANOTIME_SUB(&now, &isp->isp_init_time) > 2000000000ULL) {
isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel- dropping", __func__, aep->at_rxid, did);
isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, ECMD_TERMINATE, 0);
return;
@@ -2103,7 +2103,7 @@ isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep)
"%s: [0x%x] no state pointer for lun %jx or wildcard",
__func__, aep->at_rxid, (uintmax_t)lun);
if (lun == 0) {
- isp_endcmd(isp, aep, nphdl, SCSI_STATUS_BUSY, 0);
+ isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
} else {
isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0);
}
@@ -2761,7 +2761,7 @@ isp_handle_platform_notify_24xx(ispsoftc_t *isp, in_fcentry_24xx_t *inot)
}
static int
-isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp)
+isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp, uint32_t rsp)
{
if (isp->isp_state != ISP_RUNSTATE) {
@@ -2796,6 +2796,15 @@ isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp)
cto->ct_oxid = aep->at_hdr.ox_id;
cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1;
cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT;
+ if (rsp != 0) {
+ cto->ct_scsi_status |= (FCP_RSPLEN_VALID << 8);
+ cto->rsp.m1.ct_resplen = 4;
+ ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp));
+ cto->rsp.m1.ct_resp[0] = rsp & 0xff;
+ cto->rsp.m1.ct_resp[1] = (rsp >> 8) & 0xff;
+ cto->rsp.m1.ct_resp[2] = (rsp >> 16) & 0xff;
+ cto->rsp.m1.ct_resp[3] = (rsp >> 24) & 0xff;
+ }
return (isp_target_put_entry(isp, &local));
}
@@ -3642,7 +3651,8 @@ isp_action(struct cam_sim *sim, union ccb *ccb)
xpt_done(ccb);
break;
}
- if (isp_handle_platform_target_notify_ack(isp, &ntp->rd.nt)) {
+ if (isp_handle_platform_target_notify_ack(isp, &ntp->rd.nt,
+ (ccb->ccb_h.flags & CAM_SEND_STATUS) ? ccb->cna2.arg : 0)) {
rls_lun_statep(isp, tptr);
cam_freeze_devq(ccb->ccb_h.path);
cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0);
@@ -4408,11 +4418,11 @@ changed:
/*
* This is device arrival/departure notification
*/
- isp_handle_platform_target_notify_ack(isp, notify);
+ isp_handle_platform_target_notify_ack(isp, notify, 0);
break;
default:
isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode);
- isp_handle_platform_target_notify_ack(isp, notify);
+ isp_handle_platform_target_notify_ack(isp, notify, 0);
break;
}
break;
diff --git a/sys/dev/isp/isp_target.c b/sys/dev/isp/isp_target.c
index 702bcf9..c5c277f 100644
--- a/sys/dev/isp/isp_target.c
+++ b/sys/dev/isp/isp_target.c
@@ -539,13 +539,22 @@ isp_endcmd(ispsoftc_t *isp, ...)
} else if (code & ECMD_SVALID) {
cto->ct_flags |= CT7_FLAG_MODE1 | CT7_SENDSTATUS;
cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8);
- cto->rsp.m1.ct_resplen = cto->ct_senselen = min(16, MAXRESPLEN_24XX);
+ cto->ct_senselen = min(16, MAXRESPLEN_24XX);
ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp));
cto->rsp.m1.ct_resp[0] = 0xf0;
cto->rsp.m1.ct_resp[2] = (code >> 12) & 0xf;
cto->rsp.m1.ct_resp[7] = 8;
cto->rsp.m1.ct_resp[12] = (code >> 16) & 0xff;
cto->rsp.m1.ct_resp[13] = (code >> 24) & 0xff;
+ } else if (code & ECMD_RVALID) {
+ cto->ct_flags |= CT7_FLAG_MODE1 | CT7_SENDSTATUS;
+ cto->ct_scsi_status |= (FCP_RSPLEN_VALID << 8);
+ cto->rsp.m1.ct_resplen = 4;
+ ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp));
+ cto->rsp.m1.ct_resp[0] = (code >> 12) & 0xf;
+ cto->rsp.m1.ct_resp[1] = (code >> 16) & 0xff;
+ cto->rsp.m1.ct_resp[2] = (code >> 24) & 0xff;
+ cto->rsp.m1.ct_resp[3] = 0;
} else {
cto->ct_flags |= CT7_FLAG_MODE1 | CT7_SENDSTATUS;
}
@@ -764,6 +773,7 @@ isp_got_tmf_24xx(ispsoftc_t *isp, at7_entry_t *aep)
isp_notify_t notify;
static const char f1[] = "%s from PortID 0x%06x lun %x seq 0x%08x";
static const char f2[] = "unknown Task Flag 0x%x lun %x PortID 0x%x tag 0x%08x";
+ fcportdb_t *lp;
uint16_t chan;
uint32_t sid, did;
@@ -774,20 +784,23 @@ isp_got_tmf_24xx(ispsoftc_t *isp, at7_entry_t *aep)
notify.nt_tagval = aep->at_rxid;
notify.nt_tagval |= (((uint64_t)(isp->isp_serno++)) << 32);
notify.nt_lreserved = aep;
- sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | (aep->at_hdr.s_id[2]);
+ sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2];
if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) {
/* Channel has to be derived from D_ID */
isp_find_chan_by_did(isp, did, &chan);
if (chan == ISP_NOCHAN) {
isp_prt(isp, ISP_LOGWARN, "%s: D_ID 0x%x not found on any channel", __func__, did);
- /* just drop on the floor */
+ isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, ECMD_TERMINATE, 0);
return;
}
} else {
chan = 0;
}
- notify.nt_nphdl = NIL_HANDLE; /* unknown here */
+ if (isp_find_pdb_by_portid(isp, chan, sid, &lp))
+ notify.nt_nphdl = lp->handle;
+ else
+ notify.nt_nphdl = NIL_HANDLE;
notify.nt_sid = sid;
notify.nt_did = did;
notify.nt_channel = chan;
@@ -815,6 +828,7 @@ isp_got_tmf_24xx(ispsoftc_t *isp, at7_entry_t *aep)
} else {
isp_prt(isp, ISP_LOGWARN, f2, aep->at_cmnd.fcp_cmnd_task_management, notify.nt_lun, sid, aep->at_rxid);
notify.nt_ncode = NT_UNKNOWN;
+ isp_endcmd(isp, aep, notify.nt_nphdl, chan, ECMD_RVALID | (0x4 << 12), 0);
return;
}
isp_async(isp, ISPASYNC_TARGET_NOTIFY, &notify);
diff --git a/sys/dev/isp/ispvar.h b/sys/dev/isp/ispvar.h
index fef13e3..caaba74 100644
--- a/sys/dev/isp/ispvar.h
+++ b/sys/dev/isp/ispvar.h
@@ -1143,7 +1143,8 @@ int isp_target_put_atio(ispsoftc_t *, void *);
*/
int isp_endcmd(ispsoftc_t *, ...);
#define ECMD_SVALID 0x100
-#define ECMD_TERMINATE 0x200
+#define ECMD_RVALID 0x200
+#define ECMD_TERMINATE 0x400
/*
* Handle an asynchronous event
diff --git a/sys/dev/iwm/if_iwm.c b/sys/dev/iwm/if_iwm.c
index 4554966..86a6e1d 100644
--- a/sys/dev/iwm/if_iwm.c
+++ b/sys/dev/iwm/if_iwm.c
@@ -105,6 +105,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_wlan.h"
+
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
@@ -3836,6 +3838,7 @@ static void
iwm_watchdog(void *arg)
{
struct iwm_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
if (sc->sc_tx_timer > 0) {
if (--sc->sc_tx_timer == 0) {
@@ -3843,8 +3846,8 @@ iwm_watchdog(void *arg)
#ifdef IWM_DEBUG
iwm_nic_error(sc);
#endif
- iwm_stop(sc);
- counter_u64_add(sc->sc_ic.ic_oerrors, 1);
+ ieee80211_restart_all(ic);
+ counter_u64_add(ic->ic_oerrors, 1);
return;
}
}
diff --git a/sys/dev/iwm/if_iwm_binding.c b/sys/dev/iwm/if_iwm_binding.c
index 31acf2e..27b0e59 100644
--- a/sys/dev/iwm/if_iwm_binding.c
+++ b/sys/dev/iwm/if_iwm_binding.c
@@ -105,6 +105,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_wlan.h"
+
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
diff --git a/sys/dev/iwm/if_iwm_mac_ctxt.c b/sys/dev/iwm/if_iwm_mac_ctxt.c
index 50b1b13..8854b97 100644
--- a/sys/dev/iwm/if_iwm_mac_ctxt.c
+++ b/sys/dev/iwm/if_iwm_mac_ctxt.c
@@ -105,6 +105,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_wlan.h"
+
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
diff --git a/sys/dev/iwm/if_iwm_pcie_trans.c b/sys/dev/iwm/if_iwm_pcie_trans.c
index 343dd9f..63c1576 100644
--- a/sys/dev/iwm/if_iwm_pcie_trans.c
+++ b/sys/dev/iwm/if_iwm_pcie_trans.c
@@ -105,6 +105,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_wlan.h"
+
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
diff --git a/sys/dev/iwm/if_iwm_phy_ctxt.c b/sys/dev/iwm/if_iwm_phy_ctxt.c
index d1dfadf..fc8349d 100644
--- a/sys/dev/iwm/if_iwm_phy_ctxt.c
+++ b/sys/dev/iwm/if_iwm_phy_ctxt.c
@@ -105,6 +105,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_wlan.h"
+
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
diff --git a/sys/dev/iwm/if_iwm_phy_db.c b/sys/dev/iwm/if_iwm_phy_db.c
index 90c8da9..ae335b3 100644
--- a/sys/dev/iwm/if_iwm_phy_db.c
+++ b/sys/dev/iwm/if_iwm_phy_db.c
@@ -105,6 +105,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_wlan.h"
+
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
diff --git a/sys/dev/iwm/if_iwm_power.c b/sys/dev/iwm/if_iwm_power.c
index 5697cf4..93352ae 100644
--- a/sys/dev/iwm/if_iwm_power.c
+++ b/sys/dev/iwm/if_iwm_power.c
@@ -89,6 +89,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_wlan.h"
+
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
diff --git a/sys/dev/iwm/if_iwm_scan.c b/sys/dev/iwm/if_iwm_scan.c
index 60c2823..ebcadf7 100644
--- a/sys/dev/iwm/if_iwm_scan.c
+++ b/sys/dev/iwm/if_iwm_scan.c
@@ -105,6 +105,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_wlan.h"
+
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
diff --git a/sys/dev/iwm/if_iwm_time_event.c b/sys/dev/iwm/if_iwm_time_event.c
index 86cf2d1..8396218 100644
--- a/sys/dev/iwm/if_iwm_time_event.c
+++ b/sys/dev/iwm/if_iwm_time_event.c
@@ -105,6 +105,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_wlan.h"
+
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
diff --git a/sys/dev/iwm/if_iwm_util.c b/sys/dev/iwm/if_iwm_util.c
index 0cd2398..d4327a3 100644
--- a/sys/dev/iwm/if_iwm_util.c
+++ b/sys/dev/iwm/if_iwm_util.c
@@ -105,6 +105,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_wlan.h"
+
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index 104e7c3..d759cfd 100644
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -2264,6 +2264,10 @@ ixl_setup_queue_tqs(struct ixl_vsi *vsi)
{
struct ixl_queue *que = vsi->queues;
device_t dev = vsi->dev;
+#ifdef RSS
+ cpuset_t cpu_mask;
+ int cpu_id;
+#endif
/* Create queue tasks and start queue taskqueues */
for (int i = 0; i < vsi->num_queues; i++, que++) {
@@ -2272,6 +2276,7 @@ ixl_setup_queue_tqs(struct ixl_vsi *vsi)
que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
taskqueue_thread_enqueue, &que->tq);
#ifdef RSS
+ cpu_id = rss_getcpu(i % rss_getnumbuckets());
CPU_SETOF(cpu_id, &cpu_mask);
taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
&cpu_mask, "%s (bucket %d)",
@@ -2355,9 +2360,6 @@ ixl_setup_queue_msix(struct ixl_vsi *vsi)
struct ixl_queue *que = vsi->queues;
struct tx_ring *txr;
int error, rid, vector = 1;
-#ifdef RSS
- cpuset_t cpu_mask;
-#endif
/* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
diff --git a/sys/dev/mlx5/mlx5_en/en.h b/sys/dev/mlx5/mlx5_en/en.h
index 12a6922..e894a3f 100644
--- a/sys/dev/mlx5/mlx5_en/en.h
+++ b/sys/dev/mlx5/mlx5_en/en.h
@@ -391,6 +391,8 @@ struct mlx5e_params {
m(+1, u64 tx_coalesce_usecs, "tx_coalesce_usecs", "Limit in usec for joining tx packets") \
m(+1, u64 tx_coalesce_pkts, "tx_coalesce_pkts", "Maximum number of tx packets to join") \
m(+1, u64 tx_coalesce_mode, "tx_coalesce_mode", "0: EQE mode 1: CQE mode") \
+ m(+1, u64 tx_completion_fact, "tx_completion_fact", "1..MAX: Completion event ratio") \
+ m(+1, u64 tx_completion_fact_max, "tx_completion_fact_max", "Maximum completion event ratio") \
m(+1, u64 hw_lro, "hw_lro", "set to enable hw_lro") \
m(+1, u64 cqe_zipping, "cqe_zipping", "0 : CQE zipping disabled")
@@ -496,6 +498,17 @@ struct mlx5e_sq {
/* dirtied @xmit */
u16 pc __aligned(MLX5E_CACHELINE_SIZE);
u16 bf_offset;
+ u16 cev_counter; /* completion event counter */
+ u16 cev_factor; /* completion event factor */
+ u32 cev_next_state; /* next completion event state */
+#define MLX5E_CEV_STATE_INITIAL 0 /* timer not started */
+#define MLX5E_CEV_STATE_SEND_NOPS 1 /* send NOPs */
+#define MLX5E_CEV_STATE_HOLD_NOPS 2 /* don't send NOPs yet */
+ struct callout cev_callout;
+ union {
+ u32 d32[2];
+ u64 d64;
+ } doorbell;
struct mlx5e_sq_stats stats;
struct mlx5e_cq cq;
@@ -743,8 +756,7 @@ int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
static inline void
-mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
- struct mlx5e_tx_wqe *wqe, int bf_sz)
+mlx5e_tx_notify_hw(struct mlx5e_sq *sq, u32 *wqe, int bf_sz)
{
u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
@@ -760,13 +772,13 @@ mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
wmb();
if (bf_sz) {
- __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
+ __iowrite64_copy(sq->uar_bf_map + ofst, wqe, bf_sz);
/* flush the write-combining mapped buffer */
wmb();
} else {
- mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+ mlx5_write64(wqe, sq->uar_map + ofst, NULL);
}
sq->bf_offset ^= sq->bf_buf_size;
@@ -786,7 +798,8 @@ void mlx5e_create_ethtool(struct mlx5e_priv *);
void mlx5e_create_stats(struct sysctl_ctx_list *,
struct sysctl_oid_list *, const char *,
const char **, unsigned, u64 *);
-void mlx5e_send_nop(struct mlx5e_sq *, u32, bool);
+void mlx5e_send_nop(struct mlx5e_sq *, u32);
+void mlx5e_sq_cev_timeout(void *);
int mlx5e_refresh_channel_params(struct mlx5e_priv *);
#endif /* _MLX5_EN_H_ */
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c b/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
index f7993e9..45500d7 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
@@ -48,6 +48,45 @@ mlx5e_create_stats(struct sysctl_ctx_list *ctx,
}
}
+static void
+mlx5e_ethtool_sync_tx_completion_fact(struct mlx5e_priv *priv)
+{
+ /*
+ * Limit the maximum distance between completion events to
+ * half of the currently set TX queue size.
+ *
+ * The maximum number of queue entries a single IP packet can
+ * consume is given by MLX5_SEND_WQE_MAX_WQEBBS.
+ *
+ * The worst case max value is then given as below:
+ */
+ uint64_t max = priv->params_ethtool.tx_queue_size /
+ (2 * MLX5_SEND_WQE_MAX_WQEBBS);
+
+ /*
+ * Update the maximum completion factor value in case the
+ * tx_queue_size field changed. Ensure we don't overflow
+ * 16-bits.
+ */
+ if (max < 1)
+ max = 1;
+ else if (max > 65535)
+ max = 65535;
+ priv->params_ethtool.tx_completion_fact_max = max;
+
+ /*
+ * Verify that the current TX completion factor is within the
+ * given limits:
+ */
+ if (priv->params_ethtool.tx_completion_fact < 1)
+ priv->params_ethtool.tx_completion_fact = 1;
+ else if (priv->params_ethtool.tx_completion_fact > max)
+ priv->params_ethtool.tx_completion_fact = max;
+}
+
+#define MLX5_PARAM_OFFSET(n) \
+ __offsetof(struct mlx5e_priv, params_ethtool.n)
+
static int
mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
{
@@ -74,129 +113,222 @@ mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
error = ENXIO;
goto done;
}
- /* import RX coal time */
- if (priv->params_ethtool.rx_coalesce_usecs < 1)
- priv->params_ethtool.rx_coalesce_usecs = 0;
- else if (priv->params_ethtool.rx_coalesce_usecs >
- MLX5E_FLD_MAX(cqc, cq_period)) {
- priv->params_ethtool.rx_coalesce_usecs =
- MLX5E_FLD_MAX(cqc, cq_period);
- }
- priv->params.rx_cq_moderation_usec = priv->params_ethtool.rx_coalesce_usecs;
-
- /* import RX coal pkts */
- if (priv->params_ethtool.rx_coalesce_pkts < 1)
- priv->params_ethtool.rx_coalesce_pkts = 0;
- else if (priv->params_ethtool.rx_coalesce_pkts >
- MLX5E_FLD_MAX(cqc, cq_max_count)) {
- priv->params_ethtool.rx_coalesce_pkts =
- MLX5E_FLD_MAX(cqc, cq_max_count);
- }
- priv->params.rx_cq_moderation_pkts = priv->params_ethtool.rx_coalesce_pkts;
-
- /* import TX coal time */
- if (priv->params_ethtool.tx_coalesce_usecs < 1)
- priv->params_ethtool.tx_coalesce_usecs = 0;
- else if (priv->params_ethtool.tx_coalesce_usecs >
- MLX5E_FLD_MAX(cqc, cq_period)) {
- priv->params_ethtool.tx_coalesce_usecs =
- MLX5E_FLD_MAX(cqc, cq_period);
- }
- priv->params.tx_cq_moderation_usec = priv->params_ethtool.tx_coalesce_usecs;
-
- /* import TX coal pkts */
- if (priv->params_ethtool.tx_coalesce_pkts < 1)
- priv->params_ethtool.tx_coalesce_pkts = 0;
- else if (priv->params_ethtool.tx_coalesce_pkts >
- MLX5E_FLD_MAX(cqc, cq_max_count)) {
- priv->params_ethtool.tx_coalesce_pkts = MLX5E_FLD_MAX(cqc, cq_max_count);
- }
- priv->params.tx_cq_moderation_pkts = priv->params_ethtool.tx_coalesce_pkts;
-
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened) {
- u64 *xarg = priv->params_ethtool.arg + arg2;
-
- if (xarg == &priv->params_ethtool.tx_coalesce_pkts ||
- xarg == &priv->params_ethtool.rx_coalesce_pkts ||
- xarg == &priv->params_ethtool.tx_coalesce_usecs ||
- xarg == &priv->params_ethtool.rx_coalesce_usecs) {
- /* avoid downing and upping the network interface */
+
+ switch (MLX5_PARAM_OFFSET(arg[arg2])) {
+ case MLX5_PARAM_OFFSET(rx_coalesce_usecs):
+ /* import RX coal time */
+ if (priv->params_ethtool.rx_coalesce_usecs < 1)
+ priv->params_ethtool.rx_coalesce_usecs = 0;
+ else if (priv->params_ethtool.rx_coalesce_usecs >
+ MLX5E_FLD_MAX(cqc, cq_period)) {
+ priv->params_ethtool.rx_coalesce_usecs =
+ MLX5E_FLD_MAX(cqc, cq_period);
+ }
+ priv->params.rx_cq_moderation_usec =
+ priv->params_ethtool.rx_coalesce_usecs;
+
+ /* check to avoid down and up the network interface */
+ if (was_opened)
error = mlx5e_refresh_channel_params(priv);
- goto done;
+ break;
+
+ case MLX5_PARAM_OFFSET(rx_coalesce_pkts):
+ /* import RX coal pkts */
+ if (priv->params_ethtool.rx_coalesce_pkts < 1)
+ priv->params_ethtool.rx_coalesce_pkts = 0;
+ else if (priv->params_ethtool.rx_coalesce_pkts >
+ MLX5E_FLD_MAX(cqc, cq_max_count)) {
+ priv->params_ethtool.rx_coalesce_pkts =
+ MLX5E_FLD_MAX(cqc, cq_max_count);
}
- mlx5e_close_locked(priv->ifp);
- }
- /* import TX queue size */
- if (priv->params_ethtool.tx_queue_size <
- (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
- priv->params_ethtool.tx_queue_size =
- (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
- } else if (priv->params_ethtool.tx_queue_size >
- priv->params_ethtool.tx_queue_size_max) {
+ priv->params.rx_cq_moderation_pkts =
+ priv->params_ethtool.rx_coalesce_pkts;
+
+ /* check to avoid down and up the network interface */
+ if (was_opened)
+ error = mlx5e_refresh_channel_params(priv);
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_coalesce_usecs):
+ /* import TX coal time */
+ if (priv->params_ethtool.tx_coalesce_usecs < 1)
+ priv->params_ethtool.tx_coalesce_usecs = 0;
+ else if (priv->params_ethtool.tx_coalesce_usecs >
+ MLX5E_FLD_MAX(cqc, cq_period)) {
+ priv->params_ethtool.tx_coalesce_usecs =
+ MLX5E_FLD_MAX(cqc, cq_period);
+ }
+ priv->params.tx_cq_moderation_usec =
+ priv->params_ethtool.tx_coalesce_usecs;
+
+ /* check to avoid down and up the network interface */
+ if (was_opened)
+ error = mlx5e_refresh_channel_params(priv);
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_coalesce_pkts):
+ /* import TX coal pkts */
+ if (priv->params_ethtool.tx_coalesce_pkts < 1)
+ priv->params_ethtool.tx_coalesce_pkts = 0;
+ else if (priv->params_ethtool.tx_coalesce_pkts >
+ MLX5E_FLD_MAX(cqc, cq_max_count)) {
+ priv->params_ethtool.tx_coalesce_pkts =
+ MLX5E_FLD_MAX(cqc, cq_max_count);
+ }
+ priv->params.tx_cq_moderation_pkts =
+ priv->params_ethtool.tx_coalesce_pkts;
+
+ /* check to avoid down and up the network interface */
+ if (was_opened)
+ error = mlx5e_refresh_channel_params(priv);
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_queue_size):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import TX queue size */
+ if (priv->params_ethtool.tx_queue_size <
+ (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
+ priv->params_ethtool.tx_queue_size =
+ (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+ } else if (priv->params_ethtool.tx_queue_size >
+ priv->params_ethtool.tx_queue_size_max) {
+ priv->params_ethtool.tx_queue_size =
+ priv->params_ethtool.tx_queue_size_max;
+ }
+ /* store actual TX queue size */
+ priv->params.log_sq_size =
+ order_base_2(priv->params_ethtool.tx_queue_size);
priv->params_ethtool.tx_queue_size =
- priv->params_ethtool.tx_queue_size_max;
- }
- priv->params.log_sq_size =
- order_base_2(priv->params_ethtool.tx_queue_size);
+ 1 << priv->params.log_sq_size;
- /* import RX queue size */
- if (priv->params_ethtool.rx_queue_size <
- (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
- priv->params_ethtool.rx_queue_size =
- (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
- } else if (priv->params_ethtool.rx_queue_size >
- priv->params_ethtool.rx_queue_size_max) {
+ /* verify TX completion factor */
+ mlx5e_ethtool_sync_tx_completion_fact(priv);
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(rx_queue_size):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import RX queue size */
+ if (priv->params_ethtool.rx_queue_size <
+ (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+ priv->params_ethtool.rx_queue_size =
+ (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ } else if (priv->params_ethtool.rx_queue_size >
+ priv->params_ethtool.rx_queue_size_max) {
+ priv->params_ethtool.rx_queue_size =
+ priv->params_ethtool.rx_queue_size_max;
+ }
+ /* store actual RX queue size */
+ priv->params.log_rq_size =
+ order_base_2(priv->params_ethtool.rx_queue_size);
priv->params_ethtool.rx_queue_size =
- priv->params_ethtool.rx_queue_size_max;
- }
- priv->params.log_rq_size =
- order_base_2(priv->params_ethtool.rx_queue_size);
-
- priv->params.min_rx_wqes = min_t (u16,
- priv->params_ethtool.rx_queue_size - 1,
- MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
-
- /* import number of channels */
- if (priv->params_ethtool.channels < 1)
- priv->params_ethtool.channels = 1;
- else if (priv->params_ethtool.channels >
- (u64) priv->mdev->priv.eq_table.num_comp_vectors) {
- priv->params_ethtool.channels =
- (u64) priv->mdev->priv.eq_table.num_comp_vectors;
- }
- priv->params.num_channels = priv->params_ethtool.channels;
-
- /* import RX mode */
- if (priv->params_ethtool.rx_coalesce_mode != 0)
- priv->params_ethtool.rx_coalesce_mode = 1;
- priv->params.rx_cq_moderation_mode = priv->params_ethtool.rx_coalesce_mode;
-
- /* import TX mode */
- if (priv->params_ethtool.tx_coalesce_mode != 0)
- priv->params_ethtool.tx_coalesce_mode = 1;
- priv->params.tx_cq_moderation_mode = priv->params_ethtool.tx_coalesce_mode;
-
- /* we always agree to turn off HW LRO - but not always to turn on */
- if (priv->params_ethtool.hw_lro != 0) {
- if ((priv->ifp->if_capenable & IFCAP_LRO) &&
- MLX5_CAP_ETH(priv->mdev, lro_cap)) {
- priv->params.hw_lro_en = 1;
- priv->params_ethtool.hw_lro = 1;
+ 1 << priv->params.log_rq_size;
+
+ /* update least number of RX WQEs */
+ priv->params.min_rx_wqes = min(
+ priv->params_ethtool.rx_queue_size - 1,
+ MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(channels):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import number of channels */
+ if (priv->params_ethtool.channels < 1)
+ priv->params_ethtool.channels = 1;
+ else if (priv->params_ethtool.channels >
+ (u64) priv->mdev->priv.eq_table.num_comp_vectors) {
+ priv->params_ethtool.channels =
+ (u64) priv->mdev->priv.eq_table.num_comp_vectors;
+ }
+ priv->params.num_channels = priv->params_ethtool.channels;
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(rx_coalesce_mode):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import RX coalesce mode */
+ if (priv->params_ethtool.rx_coalesce_mode != 0)
+ priv->params_ethtool.rx_coalesce_mode = 1;
+ priv->params.rx_cq_moderation_mode =
+ priv->params_ethtool.rx_coalesce_mode;
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_coalesce_mode):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import TX coalesce mode */
+ if (priv->params_ethtool.tx_coalesce_mode != 0)
+ priv->params_ethtool.tx_coalesce_mode = 1;
+ priv->params.tx_cq_moderation_mode =
+ priv->params_ethtool.tx_coalesce_mode;
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(hw_lro):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import HW LRO mode */
+ if (priv->params_ethtool.hw_lro != 0) {
+ if ((priv->ifp->if_capenable & IFCAP_LRO) &&
+ MLX5_CAP_ETH(priv->mdev, lro_cap)) {
+ priv->params.hw_lro_en = 1;
+ priv->params_ethtool.hw_lro = 1;
+ } else {
+ priv->params.hw_lro_en = 0;
+ priv->params_ethtool.hw_lro = 0;
+ error = EINVAL;
+
+ if_printf(priv->ifp, "Can't enable HW LRO: "
+ "The HW or SW LRO feature is disabled\n");
+ }
} else {
priv->params.hw_lro_en = 0;
- priv->params_ethtool.hw_lro = 0;
- error = EINVAL;
-
- if_printf(priv->ifp, "Can't enable HW LRO: "
- "The HW or SW LRO feature is disabled");
}
- } else {
- priv->params.hw_lro_en = 0;
- }
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(cqe_zipping):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
- if (&priv->params_ethtool.arg[arg2] ==
- &priv->params_ethtool.cqe_zipping) {
+ /* import CQE zipping mode */
if (priv->params_ethtool.cqe_zipping &&
MLX5_CAP_GEN(priv->mdev, cqe_compression)) {
priv->params.cqe_zipping_en = true;
@@ -205,9 +337,27 @@ mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
priv->params.cqe_zipping_en = false;
priv->params_ethtool.cqe_zipping = 0;
}
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_completion_fact):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* verify parameter */
+ mlx5e_ethtool_sync_tx_completion_fact(priv);
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ default:
+ break;
}
- if (was_opened)
- mlx5e_open_locked(priv->ifp);
done:
PRIV_UNLOCK(priv);
return (error);
@@ -475,6 +625,7 @@ mlx5e_create_ethtool(struct mlx5e_priv *priv)
priv->params_ethtool.tx_coalesce_pkts = priv->params.tx_cq_moderation_pkts;
priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
priv->params_ethtool.cqe_zipping = priv->params.cqe_zipping_en;
+ mlx5e_ethtool_sync_tx_completion_fact(priv);
/* create root node */
node = SYSCTL_ADD_NODE(&priv->sysctl_ctx,
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
index a76d32e..d71cbb3 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
@@ -850,7 +850,6 @@ mlx5e_open_rq(struct mlx5e_channel *c,
struct mlx5e_rq *rq)
{
int err;
- int i;
err = mlx5e_create_rq(c, param, rq);
if (err)
@@ -866,12 +865,6 @@ mlx5e_open_rq(struct mlx5e_channel *c,
c->rq.enabled = 1;
- /*
- * Test send queues, which will trigger
- * "mlx5e_post_rx_wqes()":
- */
- for (i = 0; i != c->num_tc; i++)
- mlx5e_send_nop(&c->sq[i], 1, true);
return (0);
err_disable_rq:
@@ -1185,24 +1178,89 @@ err_destroy_sq:
}
static void
-mlx5e_close_sq(struct mlx5e_sq *sq)
+mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep)
+{
+ /* fill up remainder with NOPs */
+ while (sq->cev_counter != 0) {
+ while (!mlx5e_sq_has_room_for(sq, 1)) {
+ if (can_sleep != 0) {
+ mtx_unlock(&sq->lock);
+ msleep(4);
+ mtx_lock(&sq->lock);
+ } else {
+ goto done;
+ }
+ }
+ /* send a single NOP */
+ mlx5e_send_nop(sq, 1);
+ wmb();
+ }
+done:
+ /* Check if we need to write the doorbell */
+ if (likely(sq->doorbell.d64 != 0)) {
+ mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
+ sq->doorbell.d64 = 0;
+ }
+ return;
+}
+
+void
+mlx5e_sq_cev_timeout(void *arg)
{
+ struct mlx5e_sq *sq = arg;
- /* ensure hw is notified of all pending wqes */
- if (mlx5e_sq_has_room_for(sq, 1))
- mlx5e_send_nop(sq, 1, true);
+ mtx_assert(&sq->lock, MA_OWNED);
- mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+ /* check next state */
+ switch (sq->cev_next_state) {
+ case MLX5E_CEV_STATE_SEND_NOPS:
+ /* fill TX ring with NOPs, if any */
+ mlx5e_sq_send_nops_locked(sq, 0);
+
+ /* check if completed */
+ if (sq->cev_counter == 0) {
+ sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
+ return;
+ }
+ break;
+ default:
+ /* send NOPs on next timeout */
+ sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS;
+ break;
+ }
+
+ /* restart timer */
+ callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq);
}
static void
mlx5e_close_sq_wait(struct mlx5e_sq *sq)
{
+
+ mtx_lock(&sq->lock);
+ /* teardown event factor timer, if any */
+ sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
+ callout_stop(&sq->cev_callout);
+
+ /* send dummy NOPs in order to flush the transmit ring */
+ mlx5e_sq_send_nops_locked(sq, 1);
+ mtx_unlock(&sq->lock);
+
+ /* make sure it is safe to free the callout */
+ callout_drain(&sq->cev_callout);
+
+ /* error out remaining requests */
+ mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+
/* wait till SQ is empty */
+ mtx_lock(&sq->lock);
while (sq->cc != sq->pc) {
+ mtx_unlock(&sq->lock);
msleep(4);
sq->cq.mcq.comp(&sq->cq.mcq);
+ mtx_lock(&sq->lock);
}
+ mtx_unlock(&sq->lock);
mlx5e_disable_sq(sq);
mlx5e_destroy_sq(sq);
@@ -1412,24 +1470,13 @@ mlx5e_open_sqs(struct mlx5e_channel *c,
return (0);
err_close_sqs:
- for (tc--; tc >= 0; tc--) {
- mlx5e_close_sq(&c->sq[tc]);
+ for (tc--; tc >= 0; tc--)
mlx5e_close_sq_wait(&c->sq[tc]);
- }
return (err);
}
static void
-mlx5e_close_sqs(struct mlx5e_channel *c)
-{
- int tc;
-
- for (tc = 0; tc < c->num_tc; tc++)
- mlx5e_close_sq(&c->sq[tc]);
-}
-
-static void
mlx5e_close_sqs_wait(struct mlx5e_channel *c)
{
int tc;
@@ -1446,9 +1493,19 @@ mlx5e_chan_mtx_init(struct mlx5e_channel *c)
mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
for (tc = 0; tc < c->num_tc; tc++) {
- mtx_init(&c->sq[tc].lock, "mlx5tx", MTX_NETWORK_LOCK, MTX_DEF);
- mtx_init(&c->sq[tc].comp_lock, "mlx5comp", MTX_NETWORK_LOCK,
+ struct mlx5e_sq *sq = c->sq + tc;
+
+ mtx_init(&sq->lock, "mlx5tx", MTX_NETWORK_LOCK, MTX_DEF);
+ mtx_init(&sq->comp_lock, "mlx5comp", MTX_NETWORK_LOCK,
MTX_DEF);
+
+ callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
+
+ sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
+
+ /* ensure the TX completion event factor is not zero */
+ if (sq->cev_factor == 0)
+ sq->cev_factor = 1;
}
}
@@ -1529,7 +1586,6 @@ mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
return (0);
err_close_sqs:
- mlx5e_close_sqs(c);
mlx5e_close_sqs_wait(c);
err_close_rx_cq:
@@ -1554,7 +1610,6 @@ mlx5e_close_channel(struct mlx5e_channel *volatile *pp)
if (c == NULL)
return;
mlx5e_close_rq(&c->rq);
- mlx5e_close_sqs(c);
}
static void
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c b/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
index 483a7e1..29c8b4b 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
@@ -28,8 +28,20 @@
#include "en.h"
#include <machine/atomic.h>
+static inline bool
+mlx5e_do_send_cqe(struct mlx5e_sq *sq)
+{
+ sq->cev_counter++;
+ /* interleave the CQEs */
+ if (sq->cev_counter >= sq->cev_factor) {
+ sq->cev_counter = 0;
+ return (1);
+ }
+ return (0);
+}
+
void
-mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt, bool notify_hw)
+mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt)
{
u16 pi = sq->pc & sq->wq.sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
@@ -38,14 +50,18 @@ mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt, bool notify_hw)
wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ if (mlx5e_do_send_cqe(sq))
+ wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ else
+ wqe->ctrl.fm_ce_se = 0;
+
+ /* Copy data for doorbell */
+ memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
sq->mbuf[pi].mbuf = NULL;
sq->mbuf[pi].num_bytes = 0;
sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
sq->pc += sq->mbuf[pi].num_wqebbs;
- if (notify_hw)
- mlx5e_tx_notify_hw(sq, wqe, 0);
}
#if (__FreeBSD_version >= 1100000)
@@ -206,7 +222,7 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
pi = ((~sq->pc) & sq->wq.sz_m1);
if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
/* Send one multi NOP message instead of many */
- mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS, false);
+ mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS);
pi = ((~sq->pc) & sq->wq.sz_m1);
if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
m_freem(mb);
@@ -340,7 +356,13 @@ skip_dma:
wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ if (mlx5e_do_send_cqe(sq))
+ wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ else
+ wqe->ctrl.fm_ce_se = 0;
+
+ /* Copy data for doorbell */
+ memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
/* Store pointer to mbuf */
sq->mbuf[pi].mbuf = mb;
@@ -351,8 +373,6 @@ skip_dma:
if (mb != NULL)
bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, BUS_DMASYNC_PREWRITE);
- mlx5e_tx_notify_hw(sq, wqe, 0);
-
sq->stats.packets++;
return (0);
@@ -374,9 +394,10 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
*/
sqcc = sq->cc;
- while (budget--) {
+ while (budget > 0) {
struct mlx5_cqe64 *cqe;
struct mbuf *mb;
+ u16 x;
u16 ci;
cqe = mlx5e_get_cqe(&sq->cq);
@@ -385,24 +406,29 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
mlx5_cqwq_pop(&sq->cq.wq);
- ci = sqcc & sq->wq.sz_m1;
- mb = sq->mbuf[ci].mbuf;
- sq->mbuf[ci].mbuf = NULL; /* Safety clear */
+ /* update budget according to the event factor */
+ budget -= sq->cev_factor;
- if (mb == NULL) {
- if (sq->mbuf[ci].num_bytes == 0) {
- /* NOP */
- sq->stats.nop++;
- }
- } else {
- bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
+ for (x = 0; x != sq->cev_factor; x++) {
+ ci = sqcc & sq->wq.sz_m1;
+ mb = sq->mbuf[ci].mbuf;
+ sq->mbuf[ci].mbuf = NULL; /* Safety clear */
- /* Free transmitted mbuf */
- m_freem(mb);
+ if (mb == NULL) {
+ if (sq->mbuf[ci].num_bytes == 0) {
+ /* NOP */
+ sq->stats.nop++;
+ }
+ } else {
+ bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
+
+ /* Free transmitted mbuf */
+ m_freem(mb);
+ }
+ sqcc += sq->mbuf[ci].num_wqebbs;
}
- sqcc += sq->mbuf[ci].num_wqebbs;
}
mlx5_cqwq_update_db_record(&sq->cq.wq);
@@ -450,6 +476,23 @@ mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
break;
}
+ /* Check if we need to write the doorbell */
+ if (likely(sq->doorbell.d64 != 0)) {
+ mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
+ sq->doorbell.d64 = 0;
+ }
+ /*
+ * Check if we need to start the event timer which flushes the
+ * transmit ring on timeout:
+ */
+ if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
+ sq->cev_factor != 1)) {
+ /* start the timer */
+ mlx5e_sq_cev_timeout(sq);
+ } else {
+ /* don't send NOPs yet */
+ sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
+ }
return (err);
}
diff --git a/sys/dev/mwl/if_mwl.c b/sys/dev/mwl/if_mwl.c
index 100d7b5..f930734 100644
--- a/sys/dev/mwl/if_mwl.c
+++ b/sys/dev/mwl/if_mwl.c
@@ -4384,113 +4384,33 @@ mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
#define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
static void
-addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
-{
- c->ic_freq = freq;
- c->ic_flags = flags;
- c->ic_ieee = ieee;
- c->ic_minpower = 0;
- c->ic_maxpower = 2*txpow;
- c->ic_maxregpower = txpow;
-}
-
-static const struct ieee80211_channel *
-findchannel(const struct ieee80211_channel chans[], int nchans,
- int freq, int flags)
-{
- const struct ieee80211_channel *c;
- int i;
-
- for (i = 0; i < nchans; i++) {
- c = &chans[i];
- if (c->ic_freq == freq && c->ic_flags == flags)
- return c;
- }
- return NULL;
-}
-
-static void
addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
const MWL_HAL_CHANNELINFO *ci, int flags)
{
- struct ieee80211_channel *c;
- const struct ieee80211_channel *extc;
- const struct mwl_hal_channel *hc;
- int i;
+ int i, error;
- c = &chans[*nchans];
-
- flags &= ~IEEE80211_CHAN_HT;
for (i = 0; i < ci->nchannels; i++) {
- /*
- * Each entry defines an HT40 channel pair; find the
- * extension channel above and the insert the pair.
- */
- hc = &ci->channels[i];
- extc = findchannel(chans, *nchans, hc->freq+20,
- flags | IEEE80211_CHAN_HT20);
- if (extc != NULL) {
- if (*nchans >= maxchans)
- break;
- addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
- hc->ieee, hc->maxTxPow);
- c->ic_extieee = extc->ic_ieee;
- c++, (*nchans)++;
- if (*nchans >= maxchans)
- break;
- addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
- extc->ic_ieee, hc->maxTxPow);
- c->ic_extieee = hc->ieee;
- c++, (*nchans)++;
- }
+ const struct mwl_hal_channel *hc = &ci->channels[i];
+
+ error = ieee80211_add_channel_ht40(chans, maxchans, nchans,
+ hc->ieee, hc->maxTxPow, flags);
+ if (error != 0 && error != ENOENT)
+ break;
}
}
static void
addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
- const MWL_HAL_CHANNELINFO *ci, int flags)
+ const MWL_HAL_CHANNELINFO *ci, const uint8_t bands[])
{
- struct ieee80211_channel *c;
- int i;
+ int i, error;
- c = &chans[*nchans];
-
- for (i = 0; i < ci->nchannels; i++) {
- const struct mwl_hal_channel *hc;
+ error = 0;
+ for (i = 0; i < ci->nchannels && error == 0; i++) {
+ const struct mwl_hal_channel *hc = &ci->channels[i];
- hc = &ci->channels[i];
- if (*nchans >= maxchans)
- break;
- addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
- c++, (*nchans)++;
- if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
- /* g channel have a separate b-only entry */
- if (*nchans >= maxchans)
- break;
- c[0] = c[-1];
- c[-1].ic_flags = IEEE80211_CHAN_B;
- c++, (*nchans)++;
- }
- if (flags == IEEE80211_CHAN_HTG) {
- /* HT g channel have a separate g-only entry */
- if (*nchans >= maxchans)
- break;
- c[-1].ic_flags = IEEE80211_CHAN_G;
- c[0] = c[-1];
- c[0].ic_flags &= ~IEEE80211_CHAN_HT;
- c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */
- c++, (*nchans)++;
- }
- if (flags == IEEE80211_CHAN_HTA) {
- /* HT a channel have a separate a-only entry */
- if (*nchans >= maxchans)
- break;
- c[-1].ic_flags = IEEE80211_CHAN_A;
- c[0] = c[-1];
- c[0].ic_flags &= ~IEEE80211_CHAN_HT;
- c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */
- c++, (*nchans)++;
- }
+ error = ieee80211_add_channel(chans, maxchans, nchans,
+ hc->ieee, hc->freq, hc->maxTxPow, 0, bands);
}
}
@@ -4499,6 +4419,7 @@ getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
struct ieee80211_channel chans[])
{
const MWL_HAL_CHANNELINFO *ci;
+ uint8_t bands[IEEE80211_MODE_BYTES];
/*
* Use the channel info from the hal to craft the
@@ -4508,11 +4429,20 @@ getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
*/
*nchans = 0;
if (mwl_hal_getchannelinfo(sc->sc_mh,
- MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
- addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
+ MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
+ memset(bands, 0, sizeof(bands));
+ setbit(bands, IEEE80211_MODE_11B);
+ setbit(bands, IEEE80211_MODE_11G);
+ setbit(bands, IEEE80211_MODE_11NG);
+ addchannels(chans, maxchans, nchans, ci, bands);
+ }
if (mwl_hal_getchannelinfo(sc->sc_mh,
- MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
- addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
+ MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
+ memset(bands, 0, sizeof(bands));
+ setbit(bands, IEEE80211_MODE_11A);
+ setbit(bands, IEEE80211_MODE_11NA);
+ addchannels(chans, maxchans, nchans, ci, bands);
+ }
if (mwl_hal_getchannelinfo(sc->sc_mh,
MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
diff --git a/sys/dev/nvram2env/nvram2env.c b/sys/dev/nvram2env/nvram2env.c
index 4c0ce1e..a942348 100644
--- a/sys/dev/nvram2env/nvram2env.c
+++ b/sys/dev/nvram2env/nvram2env.c
@@ -193,10 +193,12 @@ static uint32_t read_4(struct nvram2env_softc * sc, int offset)
static int
nvram2env_attach(device_t dev)
{
- struct nvram2env_softc * sc = device_get_softc(dev);
- struct nvram * nv;
+ struct nvram2env_softc *sc;
+ struct nvram *nv;
char *pair, *value, *assign;
- uint32_t sig, size, i;
+ uint32_t sig, size, i, *tmp;
+
+ sc = device_get_softc(dev);
if (sc->bst == 0 || sc->addr == 0)
return (ENXIO);
@@ -217,16 +219,22 @@ nvram2env_attach(device_t dev)
if (sig == sc->sig || (sc->flags & NVRAM_FLAGS_UBOOT))
{
- /* align and shift size to 32bit size*/
+ /* align size to 32bit size*/
size += 3;
- size >>= 2;
+ size &= ~3;
- nv = malloc(size<<2, M_DEVBUF, M_WAITOK | M_ZERO);
+ nv = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
if (!nv)
return (ENOMEM);
+ /* set tmp pointer to begin of NVRAM */
+ tmp = (uint32_t *) nv;
- for (i = 0; i < size; i ++)
- ((uint32_t *)nv)[i] = read_4(sc, i<<2);
+ /* use read_4 to swap bytes if it's required */
+ for (i = 0; i < size; i += 4) {
+ *tmp = read_4(sc, i);
+ tmp++;
+ }
+ /* now tmp pointer is end of NVRAM */
if (sc->flags & NVRAM_FLAGS_BROADCOM) {
device_printf(dev, "sig = %#x\n", nv->sig);
@@ -246,49 +254,47 @@ nvram2env_attach(device_t dev)
else
pair = (char*)nv+4;
- for ( ;
- (u_int32_t)pair < ((u_int32_t)nv + size - 4);
- pair = pair + strlen(pair) + 1 + strlen(value) + 1 ) {
+ /* iterate over buffer till end. tmp points to end of NVRAM */
+ for ( ; pair < (char*)tmp;
+ pair += strlen(pair) + strlen(value) + 2 ) {
- if (pair && strlen(pair)) {
+ if (!pair || (strlen(pair) == 0))
+ break;
-#if 0
- printf("ENV: %s\n", pair);
-#endif
- /* hint.nvram.0. */
- assign = strchr(pair,'=');
- assign[0] = '\0';
- value = assign+1;
+ /* hint.nvram.0. */
+ assign = strchr(pair,'=');
+ assign[0] = '\0';
+ value = assign+1;
#if 1
- if (bootverbose)
- printf("ENV: %s=%s\n", pair, value);
+ if (bootverbose)
+ printf("ENV: %s=%s\n", pair, value);
+#else
+ printf("ENV: %s\n", pair);
#endif
- kern_setenv(pair, value);
-
- if (strcasecmp(pair, "WAN_MAC_ADDR") == 0) {
- /* Alias for MAC address of eth0 */
- if (bootverbose)
- printf("ENV: aliasing "
- "WAN_MAC_ADDR to ethaddr"
- " = %s\n", value);
- kern_setenv("ethaddr", value);
- }
- else if (strcasecmp(pair, "LAN_MAC_ADDR") == 0){
- /* Alias for MAC address of eth1 */
- if (bootverbose)
- printf("ENV: aliasing "
- "LAN_MAC_ADDR to eth1addr"
- " = %s\n", value);
- kern_setenv("eth1addr", value);
- }
-
- if (strcmp(pair, "bootverbose") == 0)
- bootverbose = strtoul(value, 0, 0);
- if (strcmp(pair, "boothowto" ) == 0)
- boothowto = strtoul(value, 0, 0);
+ kern_setenv(pair, value);
+
+ if (strcasecmp(pair, "WAN_MAC_ADDR") == 0) {
+ /* Alias for MAC address of eth0 */
+ if (bootverbose)
+ printf("ENV: aliasing "
+ "WAN_MAC_ADDR to ethaddr"
+ " = %s\n", value);
+ kern_setenv("ethaddr", value);
}
- else
- break;
+ else if (strcasecmp(pair, "LAN_MAC_ADDR") == 0){
+ /* Alias for MAC address of eth1 */
+ if (bootverbose)
+ printf("ENV: aliasing "
+ "LAN_MAC_ADDR to eth1addr"
+ " = %s\n", value);
+ kern_setenv("eth1addr", value);
+ }
+
+ if (strcmp(pair, "bootverbose") == 0)
+ bootverbose = strtoul(value, 0, 0);
+ if (strcmp(pair, "boothowto" ) == 0)
+ boothowto = strtoul(value, 0, 0);
+
}
free(nv, M_DEVBUF);
}
diff --git a/sys/dev/pccbb/pccbb_pci.c b/sys/dev/pccbb/pccbb_pci.c
index 3ccec52..7bf653a 100644
--- a/sys/dev/pccbb/pccbb_pci.c
+++ b/sys/dev/pccbb/pccbb_pci.c
@@ -435,6 +435,22 @@ err:
return (ENOMEM);
}
+static int
+cbb_pci_detach(device_t brdev)
+{
+#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
+ struct cbb_softc *sc = device_get_softc(brdev);
+#endif
+ int error;
+
+ error = cbb_detach(brdev);
+#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
+ if (error == 0)
+ pcib_free_secbus(brdev, &sc->bus);
+#endif
+ return (error);
+}
+
static void
cbb_chipinit(struct cbb_softc *sc)
{
@@ -917,7 +933,7 @@ static device_method_t cbb_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, cbb_pci_probe),
DEVMETHOD(device_attach, cbb_pci_attach),
- DEVMETHOD(device_detach, cbb_detach),
+ DEVMETHOD(device_detach, cbb_pci_detach),
DEVMETHOD(device_shutdown, cbb_pci_shutdown),
DEVMETHOD(device_suspend, cbb_pci_suspend),
DEVMETHOD(device_resume, cbb_pci_resume),
diff --git a/sys/dev/pci/pci_host_generic.c b/sys/dev/pci/pci_host_generic.c
index 8cc0b79..bf58e96 100644
--- a/sys/dev/pci/pci_host_generic.c
+++ b/sys/dev/pci/pci_host_generic.c
@@ -501,7 +501,14 @@ static int
generic_pcie_release_resource(device_t dev, device_t child, int type,
int rid, struct resource *res)
{
+#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
+ struct generic_pcie_softc *sc;
+ if (type == PCI_RES_BUS) {
+ sc = device_get_softc(dev);
+ return (pci_domain_release_bus(sc->ecam, child, rid, res));
+ }
+#endif
/* For PCIe devices that do not have FDT nodes, use PCIB method */
if ((int)ofw_bus_get_node(child) <= 0) {
return (generic_pcie_release_resource_pcie(dev,
@@ -517,7 +524,15 @@ struct resource *
pci_host_generic_alloc_resource(device_t dev, device_t child, int type, int *rid,
rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
+#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
+ struct generic_pcie_softc *sc;
+ if (type == PCI_RES_BUS) {
+ sc = device_get_softc(dev);
+ return (pci_domain_alloc_bus(sc->ecam, child, rid, start, end,
+ count, flags));
+ }
+#endif
/* For PCIe devices that do not have FDT nodes, use PCIB method */
if ((int)ofw_bus_get_node(child) <= 0)
return (generic_pcie_alloc_resource_pcie(dev, child, type, rid,
@@ -579,6 +594,11 @@ generic_pcie_adjust_resource(device_t dev, device_t child, int type,
struct rman *rm;
sc = device_get_softc(dev);
+#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
+ if (type == PCI_RES_BUS)
+ return (pci_domain_adjust_bus(sc->ecam, child, res, start,
+ end));
+#endif
rm = generic_pcie_rman(sc, type);
if (rm != NULL)
diff --git a/sys/dev/pci/pci_pci.c b/sys/dev/pci/pci_pci.c
index afc0a08..06e9e1c 100644
--- a/sys/dev/pci/pci_pci.c
+++ b/sys/dev/pci/pci_pci.c
@@ -81,7 +81,7 @@ static device_method_t pcib_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, pcib_probe),
DEVMETHOD(device_attach, pcib_attach),
- DEVMETHOD(device_detach, bus_generic_detach),
+ DEVMETHOD(device_detach, pcib_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
DEVMETHOD(device_suspend, pcib_suspend),
DEVMETHOD(device_resume, pcib_resume),
@@ -544,6 +544,42 @@ pcib_probe_windows(struct pcib_softc *sc)
}
}
+static void
+pcib_release_window(struct pcib_softc *sc, struct pcib_window *w, int type)
+{
+ device_t dev;
+ int error, i;
+
+ if (!w->valid)
+ return;
+
+ dev = sc->dev;
+ error = rman_fini(&w->rman);
+ if (error) {
+ device_printf(dev, "failed to release %s rman\n", w->name);
+ return;
+ }
+ free(__DECONST(char *, w->rman.rm_descr), M_DEVBUF);
+
+ for (i = 0; i < w->count; i++) {
+ error = bus_free_resource(dev, type, w->res[i]);
+ if (error)
+ device_printf(dev,
+ "failed to release %s resource: %d\n", w->name,
+ error);
+ }
+ free(w->res, M_DEVBUF);
+}
+
+static void
+pcib_free_windows(struct pcib_softc *sc)
+{
+
+ pcib_release_window(sc, &sc->pmem, SYS_RES_MEMORY);
+ pcib_release_window(sc, &sc->mem, SYS_RES_MEMORY);
+ pcib_release_window(sc, &sc->io, SYS_RES_IOPORT);
+}
+
#ifdef PCI_RES_BUS
/*
* Allocate a suitable secondary bus for this bridge if needed and
@@ -618,6 +654,24 @@ pcib_setup_secbus(device_t dev, struct pcib_secbus *bus, int min_count)
}
}
+void
+pcib_free_secbus(device_t dev, struct pcib_secbus *bus)
+{
+ int error;
+
+ error = rman_fini(&bus->rman);
+ if (error) {
+ device_printf(dev, "failed to release bus number rman\n");
+ return;
+ }
+ free(__DECONST(char *, bus->rman.rm_descr), M_DEVBUF);
+
+ error = bus_free_resource(dev, PCI_RES_BUS, bus->res);
+ if (error)
+ device_printf(dev,
+ "failed to release bus numbers resource: %d\n", error);
+}
+
static struct resource *
pcib_suballoc_bus(struct pcib_secbus *bus, device_t child, int *rid,
rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
@@ -896,7 +950,8 @@ pcib_pcie_hotplug_command(struct pcib_softc *sc, uint16_t val, uint16_t mask)
if (new == ctl)
return;
pcie_write_config(dev, PCIER_SLOT_CTL, new, 2);
- if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS)) {
+ if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS) &&
+ (ctl & new) & PCIEM_SLOT_CTL_CCIE) {
sc->flags |= PCIB_HOTPLUG_CMD_PENDING;
if (!cold)
callout_reset(&sc->pcie_cc_timer, hz,
@@ -917,6 +972,7 @@ pcib_pcie_hotplug_command_completed(struct pcib_softc *sc)
return;
callout_stop(&sc->pcie_cc_timer);
sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING;
+ wakeup(sc);
}
/*
@@ -1153,16 +1209,22 @@ pcib_pcie_cc_timeout(void *arg)
{
struct pcib_softc *sc;
device_t dev;
+ uint16_t sta;
sc = arg;
dev = sc->dev;
mtx_assert(&Giant, MA_OWNED);
- if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) {
+ sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
+ if (!(sta & PCIEM_SLOT_STA_CC)) {
device_printf(dev,
"Hotplug Command Timed Out - forcing detach\n");
sc->flags &= ~(PCIB_HOTPLUG_CMD_PENDING | PCIB_DETACH_PENDING);
sc->flags |= PCIB_DETACHING;
pcib_pcie_hotplug_update(sc, 0, 0, true);
+ } else {
+ device_printf(dev,
+ "Missed HotPlug interrupt waiting for Command Completion\n");
+ pcib_pcie_intr(sc);
}
}
@@ -1242,6 +1304,22 @@ pcib_alloc_pcie_irq(struct pcib_softc *sc)
return (0);
}
+static int
+pcib_release_pcie_irq(struct pcib_softc *sc)
+{
+ device_t dev;
+ int error;
+
+ dev = sc->dev;
+ error = bus_teardown_intr(dev, sc->pcie_irq, sc->pcie_ihand);
+ if (error)
+ return (error);
+ error = bus_free_resource(dev, SYS_RES_IRQ, sc->pcie_irq);
+ if (error)
+ return (error);
+ return (pci_release_msi(dev));
+}
+
static void
pcib_setup_hotplug(struct pcib_softc *sc)
{
@@ -1261,6 +1339,9 @@ pcib_setup_hotplug(struct pcib_softc *sc)
sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2);
sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
+ /* Clear any events previously pending. */
+ pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2);
+
/* Enable HotPlug events. */
mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE |
PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE |
@@ -1285,6 +1366,49 @@ pcib_setup_hotplug(struct pcib_softc *sc)
pcib_pcie_hotplug_update(sc, val, mask, false);
}
+
+static int
+pcib_detach_hotplug(struct pcib_softc *sc)
+{
+ uint16_t mask, val;
+ int error;
+
+ /* Disable the card in the slot and force it to detach. */
+ if (sc->flags & PCIB_DETACH_PENDING) {
+ sc->flags &= ~PCIB_DETACH_PENDING;
+ callout_stop(&sc->pcie_ab_timer);
+ }
+ sc->flags |= PCIB_DETACHING;
+
+ if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) {
+ callout_stop(&sc->pcie_cc_timer);
+ tsleep(sc, 0, "hpcmd", hz);
+ sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING;
+ }
+
+ /* Disable HotPlug events. */
+ mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE |
+ PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE |
+ PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE;
+ val = 0;
+
+ /* Turn the attention indicator off. */
+ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) {
+ mask |= PCIEM_SLOT_CTL_AIC;
+ val |= PCIEM_SLOT_CTL_AI_OFF;
+ }
+
+ pcib_pcie_hotplug_update(sc, val, mask, false);
+
+ error = pcib_release_pcie_irq(sc);
+ if (error)
+ return (error);
+ taskqueue_drain(taskqueue_thread, &sc->pcie_hp_task);
+ callout_drain(&sc->pcie_ab_timer);
+ callout_drain(&sc->pcie_cc_timer);
+ callout_drain(&sc->pcie_dll_timer);
+ return (0);
+}
#endif
/*
@@ -1571,6 +1695,39 @@ pcib_attach(device_t dev)
}
int
+pcib_detach(device_t dev)
+{
+#if defined(PCI_HP) || defined(NEW_PCIB)
+ struct pcib_softc *sc;
+#endif
+ int error;
+
+#if defined(PCI_HP) || defined(NEW_PCIB)
+ sc = device_get_softc(dev);
+#endif
+ error = bus_generic_detach(dev);
+ if (error)
+ return (error);
+#ifdef PCI_HP
+ if (sc->flags & PCIB_HOTPLUG) {
+ error = pcib_detach_hotplug(sc);
+ if (error)
+ return (error);
+ }
+#endif
+ error = device_delete_children(dev);
+ if (error)
+ return (error);
+#ifdef NEW_PCIB
+ pcib_free_windows(sc);
+#ifdef PCI_RES_BUS
+ pcib_free_secbus(dev, &sc->bus);
+#endif
+#endif
+ return (0);
+}
+
+int
pcib_suspend(device_t dev)
{
diff --git a/sys/dev/pci/pcib_private.h b/sys/dev/pci/pcib_private.h
index 2d805a9..65aec8d 100644
--- a/sys/dev/pci/pcib_private.h
+++ b/sys/dev/pci/pcib_private.h
@@ -158,6 +158,7 @@ int pci_domain_release_bus(int domain, device_t dev, int rid,
struct resource *pcib_alloc_subbus(struct pcib_secbus *bus, device_t child,
int *rid, rman_res_t start, rman_res_t end, rman_res_t count,
u_int flags);
+void pcib_free_secbus(device_t dev, struct pcib_secbus *bus);
void pcib_setup_secbus(device_t dev, struct pcib_secbus *bus,
int min_count);
#endif
@@ -169,6 +170,7 @@ void pcib_bridge_init(device_t dev);
const char *pcib_child_name(device_t child);
#endif
int pcib_child_present(device_t dev, device_t child);
+int pcib_detach(device_t dev);
int pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result);
int pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value);
struct resource *pcib_alloc_resource(device_t dev, device_t child, int type, int *rid,
diff --git a/sys/dev/sfxge/common/efsys.h b/sys/dev/sfxge/common/efsys.h
index 21c56c9..717f415 100644
--- a/sys/dev/sfxge/common/efsys.h
+++ b/sys/dev/sfxge/common/efsys.h
@@ -1110,22 +1110,6 @@ typedef struct efsys_lock_s {
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
-/* PREEMPT */
-
-#define EFSYS_PREEMPT_DISABLE(_state) \
- do { \
- (_state) = (_state); \
- critical_enter(); \
- _NOTE(CONSTANTCONDITION) \
- } while (B_FALSE)
-
-#define EFSYS_PREEMPT_ENABLE(_state) \
- do { \
- (_state) = (_state); \
- critical_exit(_state); \
- _NOTE(CONSTANTCONDITION) \
- } while (B_FALSE)
-
/* STAT */
typedef uint64_t efsys_stat_t;
diff --git a/sys/dev/siba/siba_bwn.c b/sys/dev/siba/siba_bwn.c
index 81f8a8e..0508b6c 100644
--- a/sys/dev/siba/siba_bwn.c
+++ b/sys/dev/siba/siba_bwn.c
@@ -93,9 +93,9 @@ static const struct siba_dev {
{ PCI_VENDOR_BROADCOM, 0x4324,
"Broadcom BCM4309 802.11a/b/g Wireless" },
{ PCI_VENDOR_BROADCOM, 0x4325, "Broadcom BCM4306 802.11b/g Wireless" },
- { PCI_VENDOR_BROADCOM, 0x4328, "Broadcom BCM4321 802.11a/b/g Wireless" },
+ { PCI_VENDOR_BROADCOM, 0x4328, "Broadcom BCM4321 802.11a/b/g/n Wireless" },
{ PCI_VENDOR_BROADCOM, 0x4329, "Unknown" },
- { PCI_VENDOR_BROADCOM, 0x432b, "Unknown" }
+ { PCI_VENDOR_BROADCOM, 0x432b, "Broadcom BCM4322 802.11a/b/g/n Wireless" }
};
int siba_core_attach(struct siba_softc *);
diff --git a/sys/dev/urtwn/if_urtwn.c b/sys/dev/urtwn/if_urtwn.c
index efbdaba..221852f 100644
--- a/sys/dev/urtwn/if_urtwn.c
+++ b/sys/dev/urtwn/if_urtwn.c
@@ -2838,26 +2838,24 @@ urtwn_tx_data(struct urtwn_softc *sc, struct ieee80211_node *ni,
struct ieee80211_channel *chan;
struct ieee80211_frame *wh;
struct r92c_tx_desc *txd;
- uint8_t macid, raid, rate, ridx, subtype, type, tid, qsel;
+ uint8_t macid, raid, rate, ridx, type, tid, qos, qsel;
int hasqos, ismcast;
URTWN_ASSERT_LOCKED(sc);
- /*
- * Software crypto.
- */
wh = mtod(m, struct ieee80211_frame *);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
- subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
hasqos = IEEE80211_QOS_HAS_SEQ(wh);
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
/* Select TX ring for this frame. */
if (hasqos) {
- tid = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
- tid &= IEEE80211_QOS_TID;
- } else
+ qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
+ tid = qos & IEEE80211_QOS_TID;
+ } else {
+ qos = 0;
tid = 0;
+ }
chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ?
ni->ni_chan : ic->ic_curchan;
@@ -2923,6 +2921,14 @@ urtwn_tx_data(struct urtwn_softc *sc, struct ieee80211_node *ni,
txd->txdw0 |= htole32(R92C_TXDW0_BMCAST);
if (!ismcast) {
+ /* Unicast frame, check if an ACK is expected. */
+ if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
+ IEEE80211_QOS_ACKPOLICY_NOACK) {
+ txd->txdw5 |= htole32(R92C_TXDW5_RTY_LMT_ENA);
+ txd->txdw5 |= htole32(SM(R92C_TXDW5_RTY_LMT,
+ tp->maxretry));
+ }
+
if (sc->chip & URTWN_CHIP_88E) {
struct urtwn_node *un = URTWN_NODE(ni);
macid = un->id;
@@ -3102,6 +3108,11 @@ urtwn_tx_raw(struct urtwn_softc *sc, struct ieee80211_node *ni,
if (IEEE80211_IS_MULTICAST(wh->i_addr1))
txd->txdw0 |= htole32(R92C_TXDW0_BMCAST);
+ if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) {
+ txd->txdw5 |= htole32(R92C_TXDW5_RTY_LMT_ENA);
+ txd->txdw5 |= htole32(SM(R92C_TXDW5_RTY_LMT,
+ params->ibp_try0));
+ }
if (params->ibp_flags & IEEE80211_BPF_RTS)
txd->txdw4 |= htole32(R92C_TXDW4_RTSEN);
if (params->ibp_flags & IEEE80211_BPF_CTS)
diff --git a/sys/dev/urtwn/if_urtwnreg.h b/sys/dev/urtwn/if_urtwnreg.h
index 45ab56d..7b618aa 100644
--- a/sys/dev/urtwn/if_urtwnreg.h
+++ b/sys/dev/urtwn/if_urtwnreg.h
@@ -1198,6 +1198,9 @@ struct r92c_tx_desc {
#define R92C_TXDW5_DATARATE_M 0x0000003f
#define R92C_TXDW5_DATARATE_S 0
#define R92C_TXDW5_SGI 0x00000040
+#define R92C_TXDW5_RTY_LMT_ENA 0x00020000
+#define R92C_TXDW5_RTY_LMT_M 0x00fc0000
+#define R92C_TXDW5_RTY_LMT_S 18
#define R92C_TXDW5_AGGNUM_M 0xff000000
#define R92C_TXDW5_AGGNUM_S 24
diff --git a/sys/dev/vnic/mrml_bridge.c b/sys/dev/vnic/mrml_bridge.c
index cab8da5..7f4dc12 100644
--- a/sys/dev/vnic/mrml_bridge.c
+++ b/sys/dev/vnic/mrml_bridge.c
@@ -85,6 +85,7 @@ static devclass_t mrmlbus_fdt_devclass;
EARLY_DRIVER_MODULE(mrmlbus, pcib, mrmlbus_fdt_driver, mrmlbus_fdt_devclass, 0, 0,
BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
+MODULE_VERSION(mrmlbus, 1);
static int mrmlb_ofw_fill_ranges(phandle_t, struct simplebus_softc *);
static int mrmlb_ofw_bus_attach(device_t);
diff --git a/sys/dev/vnic/nic_main.c b/sys/dev/vnic/nic_main.c
index df6079e..9391e6a 100644
--- a/sys/dev/vnic/nic_main.c
+++ b/sys/dev/vnic/nic_main.c
@@ -137,18 +137,19 @@ static device_method_t nicpf_methods[] = {
DEVMETHOD_END,
};
-static driver_t nicpf_driver = {
+static driver_t vnicpf_driver = {
"vnicpf",
nicpf_methods,
sizeof(struct nicpf),
};
-static devclass_t nicpf_devclass;
+static devclass_t vnicpf_devclass;
-DRIVER_MODULE(nicpf, pci, nicpf_driver, nicpf_devclass, 0, 0);
-MODULE_DEPEND(nicpf, pci, 1, 1, 1);
-MODULE_DEPEND(nicpf, ether, 1, 1, 1);
-MODULE_DEPEND(nicpf, thunder_bgx, 1, 1, 1);
+DRIVER_MODULE(vnicpf, pci, vnicpf_driver, vnicpf_devclass, 0, 0);
+MODULE_VERSION(vnicpf, 1);
+MODULE_DEPEND(vnicpf, pci, 1, 1, 1);
+MODULE_DEPEND(vnicpf, ether, 1, 1, 1);
+MODULE_DEPEND(vnicpf, thunder_bgx, 1, 1, 1);
static int nicpf_alloc_res(struct nicpf *);
static void nicpf_free_res(struct nicpf *);
@@ -246,7 +247,9 @@ static int
nicpf_detach(device_t dev)
{
struct nicpf *nic;
+ int err;
+ err = 0;
nic = device_get_softc(dev);
callout_drain(&nic->check_link);
@@ -256,7 +259,12 @@ nicpf_detach(device_t dev)
nicpf_free_res(nic);
pci_disable_busmaster(dev);
- return (0);
+#ifdef PCI_IOV
+ err = pci_iov_detach(dev);
+ if (err != 0)
+ device_printf(dev, "SR-IOV in use. Detach first.\n");
+#endif
+ return (err);
}
/*
@@ -1054,6 +1062,9 @@ nic_disable_msix(struct nicpf *nic)
nic->msix_enabled = 0;
nic->num_vec = 0;
}
+
+ bus_release_resource(nic->dev, SYS_RES_MEMORY,
+ rman_get_rid(nic->msix_table_res), nic->msix_table_res);
}
static void
@@ -1070,7 +1081,7 @@ nic_free_all_interrupts(struct nicpf *nic)
nic->msix_entries[irq].handle);
}
- bus_release_resource(nic->dev, SYS_RES_IRQ, irq,
+ bus_release_resource(nic->dev, SYS_RES_IRQ, irq + 1,
nic->msix_entries[irq].irq_res);
}
}
diff --git a/sys/dev/vnic/nicvf_main.c b/sys/dev/vnic/nicvf_main.c
index 6f025d3..9e9975a 100644
--- a/sys/dev/vnic/nicvf_main.c
+++ b/sys/dev/vnic/nicvf_main.c
@@ -129,10 +129,11 @@ static driver_t nicvf_driver = {
static devclass_t nicvf_devclass;
-DRIVER_MODULE(nicvf, pci, nicvf_driver, nicvf_devclass, 0, 0);
-MODULE_DEPEND(nicvf, pci, 1, 1, 1);
-MODULE_DEPEND(nicvf, ether, 1, 1, 1);
-MODULE_DEPEND(nicvf, vnic_pf, 1, 1, 1);
+DRIVER_MODULE(vnicvf, pci, nicvf_driver, nicvf_devclass, 0, 0);
+MODULE_VERSION(vnicvf, 1);
+MODULE_DEPEND(vnicvf, pci, 1, 1, 1);
+MODULE_DEPEND(vnicvf, ether, 1, 1, 1);
+MODULE_DEPEND(vnicvf, vnicpf, 1, 1, 1);
static int nicvf_allocate_misc_interrupt(struct nicvf *);
static int nicvf_enable_misc_interrupt(struct nicvf *);
diff --git a/sys/dev/vnic/thunder_bgx.c b/sys/dev/vnic/thunder_bgx.c
index 7a3a941..ef0ca54 100644
--- a/sys/dev/vnic/thunder_bgx.c
+++ b/sys/dev/vnic/thunder_bgx.c
@@ -109,9 +109,10 @@ static driver_t thunder_bgx_driver = {
static devclass_t thunder_bgx_devclass;
DRIVER_MODULE(thunder_bgx, pci, thunder_bgx_driver, thunder_bgx_devclass, 0, 0);
+MODULE_VERSION(thunder_bgx, 1);
MODULE_DEPEND(thunder_bgx, pci, 1, 1, 1);
MODULE_DEPEND(thunder_bgx, ether, 1, 1, 1);
-MODULE_DEPEND(thunder_bgx, octeon_mdio, 1, 1, 1);
+MODULE_DEPEND(thunder_bgx, thunder_mdio, 1, 1, 1);
static int
thunder_bgx_probe(device_t dev)
@@ -135,12 +136,16 @@ static int
thunder_bgx_attach(device_t dev)
{
struct bgx *bgx;
- uint8_t lmac;
+ uint8_t lmacid;
int err;
int rid;
+ struct lmac *lmac;
bgx = malloc(sizeof(*bgx), M_BGX, (M_WAITOK | M_ZERO));
bgx->dev = dev;
+
+ lmac = device_get_softc(dev);
+ lmac->bgx = bgx;
/* Enable bus mastering */
pci_enable_busmaster(dev);
/* Allocate resources - configuration registers */
@@ -167,11 +172,11 @@ thunder_bgx_attach(device_t dev)
bgx_init_hw(bgx);
/* Enable all LMACs */
- for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
- err = bgx_lmac_enable(bgx, lmac);
+ for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++) {
+ err = bgx_lmac_enable(bgx, lmacid);
if (err) {
device_printf(dev, "BGX%d failed to enable lmac%d\n",
- bgx->bgx_id, lmac);
+ bgx->bgx_id, lmacid);
goto err_free_res;
}
}
@@ -202,6 +207,12 @@ thunder_bgx_detach(device_t dev)
for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++)
bgx_lmac_disable(bgx, lmacid);
+ bgx_vnic[bgx->bgx_id] = NULL;
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ rman_get_rid(bgx->reg_base), bgx->reg_base);
+ free(bgx, M_BGX);
+ pci_disable_busmaster(dev);
+
return (0);
}
diff --git a/sys/dev/vnic/thunder_mdio.c b/sys/dev/vnic/thunder_mdio.c
index 8157623..3a4fc9c 100644
--- a/sys/dev/vnic/thunder_mdio.c
+++ b/sys/dev/vnic/thunder_mdio.c
@@ -122,8 +122,10 @@ DEFINE_CLASS_0(thunder_mdio, thunder_mdio_driver, thunder_mdio_methods,
sizeof(struct thunder_mdio_softc));
DRIVER_MODULE(miibus, thunder_mdio, miibus_driver, miibus_devclass, 0, 0);
+MODULE_VERSION(thunder_mdio, 1);
MODULE_DEPEND(thunder_mdio, ether, 1, 1, 1);
MODULE_DEPEND(thunder_mdio, miibus, 1, 1, 1);
+MODULE_DEPEND(thunder_mdio, mrmlbus, 1, 1, 1);
MALLOC_DEFINE(M_THUNDER_MDIO, "ThunderX MDIO",
"Cavium ThunderX MDIO dynamic memory");
diff --git a/sys/dev/wi/if_wi.c b/sys/dev/wi/if_wi.c
index e2ea3c9..e310257 100644
--- a/sys/dev/wi/if_wi.c
+++ b/sys/dev/wi/if_wi.c
@@ -155,9 +155,12 @@ static int wi_mwrite_bap(struct wi_softc *, int, int, struct mbuf *, int);
static int wi_read_rid(struct wi_softc *, int, void *, int *);
static int wi_write_rid(struct wi_softc *, int, const void *, int);
static int wi_write_appie(struct wi_softc *, int, const struct ieee80211_appie *);
+static u_int16_t wi_read_chanmask(struct wi_softc *);
static void wi_scan_start(struct ieee80211com *);
static void wi_scan_end(struct ieee80211com *);
+static void wi_getradiocaps(struct ieee80211com *, int, int *,
+ struct ieee80211_channel[]);
static void wi_set_channel(struct ieee80211com *);
static __inline int
@@ -335,23 +338,9 @@ wi_attach(device_t dev)
* Query the card for available channels and setup the
* channel table. We assume these are all 11b channels.
*/
- buflen = sizeof(val);
- if (wi_read_rid(sc, WI_RID_CHANNEL_LIST, &val, &buflen) != 0)
- val = htole16(0x1fff); /* assume 1-13 */
- KASSERT(val != 0, ("wi_attach: no available channels listed!"));
-
- val <<= 1; /* shift for base 1 indices */
- for (i = 1; i < 16; i++) {
- struct ieee80211_channel *c;
-
- if (!isset((u_int8_t*)&val, i))
- continue;
- c = &ic->ic_channels[ic->ic_nchans++];
- c->ic_freq = ieee80211_ieee2mhz(i, IEEE80211_CHAN_B);
- c->ic_flags = IEEE80211_CHAN_B;
- c->ic_ieee = i;
- /* XXX txpowers? */
- }
+ sc->sc_chanmask = wi_read_chanmask(sc);
+ wi_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
+ ic->ic_channels);
/*
* Set flags based on firmware version.
@@ -439,6 +428,7 @@ wi_attach(device_t dev)
ic->ic_raw_xmit = wi_raw_xmit;
ic->ic_scan_start = wi_scan_start;
ic->ic_scan_end = wi_scan_end;
+ ic->ic_getradiocaps = wi_getradiocaps;
ic->ic_set_channel = wi_set_channel;
ic->ic_vap_create = wi_vap_create;
ic->ic_vap_delete = wi_vap_delete;
@@ -697,6 +687,26 @@ wi_stop(struct wi_softc *sc, int disable)
}
static void
+wi_getradiocaps(struct ieee80211com *ic,
+ int maxchans, int *nchans, struct ieee80211_channel chans[])
+{
+ struct wi_softc *sc = ic->ic_softc;
+ u_int8_t bands[IEEE80211_MODE_BYTES];
+ int i;
+
+ memset(bands, 0, sizeof(bands));
+ setbit(bands, IEEE80211_MODE_11B);
+
+ for (i = 1; i < 16; i++) {
+ if (sc->sc_chanmask & (1 << i)) {
+ /* XXX txpowers? */
+ ieee80211_add_channel(chans, maxchans, nchans,
+ i, 0, 0, 0, bands);
+ }
+ }
+}
+
+static void
wi_set_channel(struct ieee80211com *ic)
{
struct wi_softc *sc = ic->ic_softc;
@@ -1988,6 +1998,22 @@ wi_write_appie(struct wi_softc *sc, int rid, const struct ieee80211_appie *ie)
return wi_write_rid(sc, rid, buf, ie->ie_len + sizeof(uint16_t));
}
+static u_int16_t
+wi_read_chanmask(struct wi_softc *sc)
+{
+ u_int16_t val;
+ int buflen;
+
+ buflen = sizeof(val);
+ if (wi_read_rid(sc, WI_RID_CHANNEL_LIST, &val, &buflen) != 0)
+ val = htole16(0x1fff); /* assume 1-13 */
+ KASSERT(val != 0, ("%s: no available channels listed!", __func__));
+
+ val <<= 1; /* shift for base 1 indices */
+
+ return (val);
+}
+
int
wi_alloc(device_t dev, int rid)
{
diff --git a/sys/dev/wi/if_wivar.h b/sys/dev/wi/if_wivar.h
index 8cc320d..55151fe 100644
--- a/sys/dev/wi/if_wivar.h
+++ b/sys/dev/wi/if_wivar.h
@@ -114,6 +114,7 @@ struct wi_softc {
u_int16_t sc_portnum;
u_int16_t sc_encryption;
u_int16_t sc_monitor_port;
+ u_int16_t sc_chanmask;
/* RSSI interpretation */
u_int16_t sc_min_rssi; /* clamp sc_min_rssi < RSSI */
diff --git a/sys/fs/cd9660/cd9660_vfsops.c b/sys/fs/cd9660/cd9660_vfsops.c
index 649b01a..a558464 100644
--- a/sys/fs/cd9660/cd9660_vfsops.c
+++ b/sys/fs/cd9660/cd9660_vfsops.c
@@ -226,11 +226,9 @@ iso_mountfs(devvp, mp)
dev = devvp->v_rdev;
dev_ref(dev);
- DROP_GIANT();
g_topology_lock();
error = g_vfs_open(devvp, &cp, "cd9660", 0);
g_topology_unlock();
- PICKUP_GIANT();
VOP_UNLOCK(devvp, 0);
if (error)
goto out;
@@ -481,11 +479,9 @@ out:
if (supbp != NULL)
brelse(supbp);
if (cp != NULL) {
- DROP_GIANT();
g_topology_lock();
g_vfs_close(cp);
g_topology_unlock();
- PICKUP_GIANT();
}
if (isomp) {
free(isomp, M_ISOFSMNT);
@@ -519,11 +515,9 @@ cd9660_unmount(mp, mntflags)
if (isomp->im_l2d)
cd9660_iconv->close(isomp->im_l2d);
}
- DROP_GIANT();
g_topology_lock();
g_vfs_close(isomp->im_cp);
g_topology_unlock();
- PICKUP_GIANT();
vrele(isomp->im_devvp);
dev_rel(isomp->im_dev);
free(isomp, M_ISOFSMNT);
diff --git a/sys/fs/devfs/devfs_devs.c b/sys/fs/devfs/devfs_devs.c
index 2c11fa4..6572639 100644
--- a/sys/fs/devfs/devfs_devs.c
+++ b/sys/fs/devfs/devfs_devs.c
@@ -127,16 +127,11 @@ devfs_alloc(int flags)
return (NULL);
cdp->cdp_dirents = &cdp->cdp_dirent0;
- cdp->cdp_dirent0 = NULL;
- cdp->cdp_maxdirent = 0;
- cdp->cdp_inode = 0;
cdev = &cdp->cdp_c;
-
LIST_INIT(&cdev->si_children);
vfs_timestamp(&ts);
cdev->si_atime = cdev->si_mtime = cdev->si_ctime = ts;
- cdev->si_cred = NULL;
return (cdev);
}
diff --git a/sys/fs/ext2fs/ext2_alloc.c b/sys/fs/ext2fs/ext2_alloc.c
index c3bd29b..35e24c3 100644
--- a/sys/fs/ext2fs/ext2_alloc.c
+++ b/sys/fs/ext2fs/ext2_alloc.c
@@ -408,7 +408,8 @@ ext2_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp)
/*
* Set up a new generation number for this inode.
*/
- ip->i_gen = arc4random();
+ while (ip->i_gen == 0 || ++ip->i_gen == 0)
+ ip->i_gen = arc4random();
vfs_timestamp(&ts);
ip->i_birthtime = ts.tv_sec;
diff --git a/sys/fs/ext2fs/ext2_vfsops.c b/sys/fs/ext2fs/ext2_vfsops.c
index 9f2b94c..9f73357 100644
--- a/sys/fs/ext2fs/ext2_vfsops.c
+++ b/sys/fs/ext2fs/ext2_vfsops.c
@@ -998,7 +998,8 @@ ext2_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
* already have one. This should only happen on old filesystems.
*/
if (ip->i_gen == 0) {
- ip->i_gen = random() + 1;
+ while (ip->i_gen == 0)
+ ip->i_gen = arc4random();
if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
ip->i_flag |= IN_MODIFIED;
}
diff --git a/sys/fs/msdosfs/msdosfs_vfsops.c b/sys/fs/msdosfs/msdosfs_vfsops.c
index d4b6a02..63d60dd 100644
--- a/sys/fs/msdosfs/msdosfs_vfsops.c
+++ b/sys/fs/msdosfs/msdosfs_vfsops.c
@@ -292,11 +292,9 @@ msdosfs_mount(struct mount *mp)
}
/* Downgrade the device from rw to ro. */
- DROP_GIANT();
g_topology_lock();
error = g_access(pmp->pm_cp, 0, -1, 0);
g_topology_unlock();
- PICKUP_GIANT();
if (error) {
(void)markvoldirty(pmp, 1);
return (error);
@@ -328,11 +326,9 @@ msdosfs_mount(struct mount *mp)
return (error);
}
VOP_UNLOCK(devvp, 0);
- DROP_GIANT();
g_topology_lock();
error = g_access(pmp->pm_cp, 0, 1, 0);
g_topology_unlock();
- PICKUP_GIANT();
if (error)
return (error);
@@ -401,8 +397,6 @@ msdosfs_mount(struct mount *mp)
return error;
}
- if (devvp->v_type == VCHR && devvp->v_rdev != NULL)
- devvp->v_rdev->si_mountpt = mp;
vfs_mountedfrom(mp, from);
#ifdef MSDOSFS_DEBUG
printf("msdosfs_mount(): mp %p, pmp %p, inusemap %p\n", mp, pmp, pmp->pm_inusemap);
@@ -431,15 +425,21 @@ mountmsdosfs(struct vnode *devvp, struct mount *mp)
ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
dev = devvp->v_rdev;
- dev_ref(dev);
- DROP_GIANT();
+ if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0,
+ (uintptr_t)mp) == 0) {
+ VOP_UNLOCK(devvp, 0);
+ return (EBUSY);
+ }
g_topology_lock();
error = g_vfs_open(devvp, &cp, "msdosfs", ronly ? 0 : 1);
g_topology_unlock();
- PICKUP_GIANT();
+ if (error != 0) {
+ atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
+ VOP_UNLOCK(devvp, 0);
+ return (error);
+ }
+ dev_ref(dev);
VOP_UNLOCK(devvp, 0);
- if (error)
- goto error_exit;
bo = &devvp->v_bufobj;
@@ -770,11 +770,9 @@ error_exit:
if (bp)
brelse(bp);
if (cp != NULL) {
- DROP_GIANT();
g_topology_lock();
g_vfs_close(cp);
g_topology_unlock();
- PICKUP_GIANT();
}
if (pmp) {
lockdestroy(&pmp->pm_fatlock);
@@ -783,6 +781,7 @@ error_exit:
free(pmp, M_MSDOSFSMNT);
mp->mnt_data = NULL;
}
+ atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
dev_rel(dev);
return (error);
}
@@ -846,13 +845,10 @@ msdosfs_unmount(struct mount *mp, int mntflags)
BO_UNLOCK(bo);
}
#endif
- DROP_GIANT();
- if (pmp->pm_devvp->v_type == VCHR && pmp->pm_devvp->v_rdev != NULL)
- pmp->pm_devvp->v_rdev->si_mountpt = NULL;
g_topology_lock();
g_vfs_close(pmp->pm_cp);
g_topology_unlock();
- PICKUP_GIANT();
+ atomic_store_rel_ptr((uintptr_t *)&pmp->pm_dev->si_mountpt, 0);
vrele(pmp->pm_devvp);
dev_rel(pmp->pm_dev);
free(pmp->pm_inusemap, M_MSDOSFSFAT);
diff --git a/sys/fs/udf/udf_vfsops.c b/sys/fs/udf/udf_vfsops.c
index 8c44f48..a2fcac7 100644
--- a/sys/fs/udf/udf_vfsops.c
+++ b/sys/fs/udf/udf_vfsops.c
@@ -325,11 +325,9 @@ udf_mountfs(struct vnode *devvp, struct mount *mp)
dev = devvp->v_rdev;
dev_ref(dev);
- DROP_GIANT();
g_topology_lock();
error = g_vfs_open(devvp, &cp, "udf", 0);
g_topology_unlock();
- PICKUP_GIANT();
VOP_UNLOCK(devvp, 0);
if (error)
goto bail;
@@ -500,11 +498,9 @@ bail:
if (bp != NULL)
brelse(bp);
if (cp != NULL) {
- DROP_GIANT();
g_topology_lock();
g_vfs_close(cp);
g_topology_unlock();
- PICKUP_GIANT();
}
dev_rel(dev);
return error;
@@ -533,11 +529,9 @@ udf_unmount(struct mount *mp, int mntflags)
#endif
}
- DROP_GIANT();
g_topology_lock();
g_vfs_close(udfmp->im_cp);
g_topology_unlock();
- PICKUP_GIANT();
vrele(udfmp->im_devvp);
dev_rel(udfmp->im_dev);
diff --git a/sys/geom/eli/g_eli.c b/sys/geom/eli/g_eli.c
index 403d0b6..6d734ae 100644
--- a/sys/geom/eli/g_eli.c
+++ b/sys/geom/eli/g_eli.c
@@ -309,6 +309,7 @@ g_eli_start(struct bio *bp)
case BIO_WRITE:
case BIO_GETATTR:
case BIO_FLUSH:
+ case BIO_ZONE:
break;
case BIO_DELETE:
/*
@@ -348,6 +349,7 @@ g_eli_start(struct bio *bp)
case BIO_GETATTR:
case BIO_FLUSH:
case BIO_DELETE:
+ case BIO_ZONE:
cbp->bio_done = g_std_done;
cp = LIST_FIRST(&sc->sc_geom->consumer);
cbp->bio_to = cp->provider;
@@ -1229,7 +1231,6 @@ g_eli_shutdown_pre_sync(void *arg, int howto)
int error;
mp = arg;
- DROP_GIANT();
g_topology_lock();
LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
sc = gp->softc;
@@ -1245,7 +1246,6 @@ g_eli_shutdown_pre_sync(void *arg, int howto)
}
}
g_topology_unlock();
- PICKUP_GIANT();
}
static void
diff --git a/sys/geom/geom.h b/sys/geom/geom.h
index bf70d0b..b261693 100644
--- a/sys/geom/geom.h
+++ b/sys/geom/geom.h
@@ -56,6 +56,7 @@ struct bio;
struct sbuf;
struct gctl_req;
struct g_configargs;
+struct disk_zone_args;
typedef int g_config_t (struct g_configargs *ca);
typedef void g_ctl_req_t (struct gctl_req *, struct g_class *cp, char const *verb);
@@ -318,6 +319,7 @@ struct bio * g_duplicate_bio(struct bio *);
void g_destroy_bio(struct bio *);
void g_io_deliver(struct bio *bp, int error);
int g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr);
+int g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp);
int g_io_flush(struct g_consumer *cp);
int g_register_classifier(struct g_classifier_hook *hook);
void g_unregister_classifier(struct g_classifier_hook *hook);
@@ -369,7 +371,6 @@ g_free(void *ptr)
#define g_topology_lock() \
do { \
- mtx_assert(&Giant, MA_NOTOWNED); \
sx_xlock(&topology_lock); \
} while (0)
diff --git a/sys/geom/geom_dev.c b/sys/geom/geom_dev.c
index 5ba9191..005cc3c 100644
--- a/sys/geom/geom_dev.c
+++ b/sys/geom/geom_dev.c
@@ -549,6 +549,42 @@ g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread
error = g_io_getattr(arg->name, cp, &arg->len, &arg->value);
break;
}
+ case DIOCZONECMD: {
+ struct disk_zone_args *zone_args =(struct disk_zone_args *)data;
+ struct disk_zone_rep_entry *new_entries, *old_entries;
+ struct disk_zone_report *rep;
+ size_t alloc_size;
+
+ old_entries = NULL;
+ new_entries = NULL;
+ rep = NULL;
+ alloc_size = 0;
+
+ if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES) {
+
+ rep = &zone_args->zone_params.report;
+ alloc_size = rep->entries_allocated *
+ sizeof(struct disk_zone_rep_entry);
+ if (alloc_size != 0)
+ new_entries = g_malloc(alloc_size,
+ M_WAITOK| M_ZERO);
+ old_entries = rep->entries;
+ rep->entries = new_entries;
+ }
+ error = g_io_zonecmd(zone_args, cp);
+ if ((zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES)
+ && (alloc_size != 0)
+ && (error == 0)) {
+ error = copyout(new_entries, old_entries, alloc_size);
+ }
+ if ((old_entries != NULL)
+ && (rep != NULL))
+ rep->entries = old_entries;
+
+ if (new_entries != NULL)
+ g_free(new_entries);
+ break;
+ }
default:
if (cp->provider->geom->ioctl != NULL) {
error = cp->provider->geom->ioctl(cp->provider, cmd, data, fflag, td);
@@ -574,6 +610,9 @@ g_dev_done(struct bio *bp2)
bp->bio_error = bp2->bio_error;
bp->bio_completed = bp2->bio_completed;
bp->bio_resid = bp->bio_length - bp2->bio_completed;
+ if (bp2->bio_cmd == BIO_ZONE)
+ bcopy(&bp2->bio_zone, &bp->bio_zone, sizeof(bp->bio_zone));
+
if (bp2->bio_error != 0) {
g_trace(G_T_BIO, "g_dev_done(%p) had error %d",
bp2, bp2->bio_error);
@@ -608,7 +647,8 @@ g_dev_strategy(struct bio *bp)
KASSERT(bp->bio_cmd == BIO_READ ||
bp->bio_cmd == BIO_WRITE ||
bp->bio_cmd == BIO_DELETE ||
- bp->bio_cmd == BIO_FLUSH,
+ bp->bio_cmd == BIO_FLUSH ||
+ bp->bio_cmd == BIO_ZONE,
("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd));
dev = bp->bio_dev;
cp = dev->si_drv2;
diff --git a/sys/geom/geom_disk.c b/sys/geom/geom_disk.c
index 1580030..ce4e079 100644
--- a/sys/geom/geom_disk.c
+++ b/sys/geom/geom_disk.c
@@ -226,7 +226,11 @@ g_disk_done(struct bio *bp)
if (bp2->bio_error == 0)
bp2->bio_error = bp->bio_error;
bp2->bio_completed += bp->bio_completed;
+
switch (bp->bio_cmd) {
+ case BIO_ZONE:
+ bcopy(&bp->bio_zone, &bp2->bio_zone, sizeof(bp->bio_zone));
+ /*FALLTHROUGH*/
case BIO_READ:
case BIO_WRITE:
case BIO_DELETE:
@@ -515,6 +519,16 @@ g_disk_start(struct bio *bp)
error = EOPNOTSUPP;
break;
}
+ /*FALLTHROUGH*/
+ case BIO_ZONE:
+ if (bp->bio_cmd == BIO_ZONE) {
+ if (!(dp->d_flags & DISKFLAG_CANZONE)) {
+ error = EOPNOTSUPP;
+ break;
+ }
+ g_trace(G_T_BIO, "g_disk_zone(%s)",
+ bp->bio_to->name);
+ }
bp2 = g_clone_bio(bp);
if (bp2 == NULL) {
g_io_deliver(bp, ENOMEM);
diff --git a/sys/geom/geom_disk.h b/sys/geom/geom_disk.h
index 4cf53c4..97faeed 100644
--- a/sys/geom/geom_disk.h
+++ b/sys/geom/geom_disk.h
@@ -109,6 +109,7 @@ struct disk {
#define DISKFLAG_CANFLUSHCACHE 0x8
#define DISKFLAG_UNMAPPED_BIO 0x10
#define DISKFLAG_DIRECT_COMPLETION 0x20
+#define DISKFLAG_CANZONE 0x80
struct disk *disk_alloc(void);
void disk_create(struct disk *disk, int version);
diff --git a/sys/geom/geom_event.c b/sys/geom/geom_event.c
index 2ded638..3c2ee49 100644
--- a/sys/geom/geom_event.c
+++ b/sys/geom/geom_event.c
@@ -83,7 +83,6 @@ g_waitidle(void)
{
g_topology_assert_not();
- mtx_assert(&Giant, MA_NOTOWNED);
mtx_lock(&g_eventlock);
while (!TAILQ_EMPTY(&g_events))
diff --git a/sys/geom/geom_io.c b/sys/geom/geom_io.c
index 8270274..401c20f 100644
--- a/sys/geom/geom_io.c
+++ b/sys/geom/geom_io.c
@@ -218,6 +218,9 @@ g_clone_bio(struct bio *bp)
bp2->bio_ma_n = bp->bio_ma_n;
bp2->bio_ma_offset = bp->bio_ma_offset;
bp2->bio_attribute = bp->bio_attribute;
+ if (bp->bio_cmd == BIO_ZONE)
+ bcopy(&bp->bio_zone, &bp2->bio_zone,
+ sizeof(bp->bio_zone));
/* Inherit classification info from the parent */
bp2->bio_classifier1 = bp->bio_classifier1;
bp2->bio_classifier2 = bp->bio_classifier2;
@@ -305,6 +308,34 @@ g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
}
int
+g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp)
+{
+ struct bio *bp;
+ int error;
+
+ g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd);
+ bp = g_alloc_bio();
+ bp->bio_cmd = BIO_ZONE;
+ bp->bio_done = NULL;
+ /*
+ * XXX KDM need to handle report zone data.
+ */
+ bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args));
+ if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES)
+ bp->bio_length =
+ zone_args->zone_params.report.entries_allocated *
+ sizeof(struct disk_zone_rep_entry);
+ else
+ bp->bio_length = 0;
+
+ g_io_request(bp, cp);
+ error = biowait(bp, "gzone");
+ bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args));
+ g_destroy_bio(bp);
+ return (error);
+}
+
+int
g_io_flush(struct g_consumer *cp)
{
struct bio *bp;
@@ -349,6 +380,14 @@ g_io_check(struct bio *bp)
if (cp->acw == 0)
return (EPERM);
break;
+ case BIO_ZONE:
+ if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) ||
+ (bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) {
+ if (cp->acr == 0)
+ return (EPERM);
+ } else if (cp->acw == 0)
+ return (EPERM);
+ break;
default:
return (EPERM);
}
@@ -988,6 +1027,35 @@ g_print_bio(struct bio *bp)
cmd = "FLUSH";
printf("%s[%s]", pname, cmd);
return;
+ case BIO_ZONE: {
+ char *subcmd = NULL;
+ cmd = "ZONE";
+ switch (bp->bio_zone.zone_cmd) {
+ case DISK_ZONE_OPEN:
+ subcmd = "OPEN";
+ break;
+ case DISK_ZONE_CLOSE:
+ subcmd = "CLOSE";
+ break;
+ case DISK_ZONE_FINISH:
+ subcmd = "FINISH";
+ break;
+ case DISK_ZONE_RWP:
+ subcmd = "RWP";
+ break;
+ case DISK_ZONE_REPORT_ZONES:
+ subcmd = "REPORT ZONES";
+ break;
+ case DISK_ZONE_GET_PARAMS:
+ subcmd = "GET PARAMS";
+ break;
+ default:
+ subcmd = "UNKNOWN";
+ break;
+ }
+ printf("%s[%s,%s]", pname, cmd, subcmd);
+ return;
+ }
case BIO_READ:
cmd = "READ";
break;
diff --git a/sys/geom/geom_kern.c b/sys/geom/geom_kern.c
index dbced0f..9f3f120 100644
--- a/sys/geom/geom_kern.c
+++ b/sys/geom/geom_kern.c
@@ -90,7 +90,6 @@ static void
g_up_procbody(void *arg)
{
- mtx_assert(&Giant, MA_NOTOWNED);
thread_lock(g_up_td);
sched_prio(g_up_td, PRIBIO);
thread_unlock(g_up_td);
@@ -103,7 +102,6 @@ static void
g_down_procbody(void *arg)
{
- mtx_assert(&Giant, MA_NOTOWNED);
thread_lock(g_down_td);
sched_prio(g_down_td, PRIBIO);
thread_unlock(g_down_td);
@@ -116,7 +114,6 @@ static void
g_event_procbody(void *arg)
{
- mtx_assert(&Giant, MA_NOTOWNED);
thread_lock(g_event_td);
sched_prio(g_event_td, PRIBIO);
thread_unlock(g_event_td);
@@ -147,14 +144,12 @@ g_init(void)
g_io_init();
g_event_init();
g_ctl_init();
- mtx_lock(&Giant);
kproc_kthread_add(g_event_procbody, NULL, &g_proc, &g_event_td,
RFHIGHPID, 0, "geom", "g_event");
kproc_kthread_add(g_up_procbody, NULL, &g_proc, &g_up_td,
RFHIGHPID, 0, "geom", "g_up");
kproc_kthread_add(g_down_procbody, NULL, &g_proc, &g_down_td,
RFHIGHPID, 0, "geom", "g_down");
- mtx_unlock(&Giant);
EVENTHANDLER_REGISTER(shutdown_pre_sync, geom_shutdown, NULL,
SHUTDOWN_PRI_FIRST);
}
diff --git a/sys/geom/geom_mbr.c b/sys/geom/geom_mbr.c
index 86ee860..a811e35 100644
--- a/sys/geom/geom_mbr.c
+++ b/sys/geom/geom_mbr.c
@@ -190,7 +190,6 @@ g_mbr_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thr
case DIOCSMBR: {
if (!(fflag & FWRITE))
return (EPERM);
- DROP_GIANT();
g_topology_lock();
cp = LIST_FIRST(&gp->consumer);
if (cp->acw == 0) {
@@ -205,7 +204,6 @@ g_mbr_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thr
if (opened)
g_access(cp, 0, -1 , 0);
g_topology_unlock();
- PICKUP_GIANT();
return(error);
}
default:
diff --git a/sys/geom/geom_pc98.c b/sys/geom/geom_pc98.c
index 42c9962..f4435cb 100644
--- a/sys/geom/geom_pc98.c
+++ b/sys/geom/geom_pc98.c
@@ -176,7 +176,6 @@ g_pc98_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct th
case DIOCSPC98: {
if (!(fflag & FWRITE))
return (EPERM);
- DROP_GIANT();
g_topology_lock();
cp = LIST_FIRST(&gp->consumer);
if (cp->acw == 0) {
@@ -191,7 +190,6 @@ g_pc98_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct th
if (opened)
g_access(cp, 0, -1 , 0);
g_topology_unlock();
- PICKUP_GIANT();
return(error);
}
default:
diff --git a/sys/geom/geom_subr.c b/sys/geom/geom_subr.c
index 54a99bf..d912838 100644
--- a/sys/geom/geom_subr.c
+++ b/sys/geom/geom_subr.c
@@ -247,9 +247,7 @@ g_modevent(module_t mod, int type, void *data)
break;
case MOD_UNLOAD:
g_trace(G_T_TOPOLOGY, "g_modevent(%s, UNLOAD)", mp->name);
- DROP_GIANT();
error = g_unload_class(mp);
- PICKUP_GIANT();
if (error == 0) {
KASSERT(LIST_EMPTY(&mp->geom),
("Unloaded class (%s) still has geom", mp->name));
@@ -1471,6 +1469,7 @@ db_print_bio_cmd(struct bio *bp)
case BIO_CMD0: db_printf("BIO_CMD0"); break;
case BIO_CMD1: db_printf("BIO_CMD1"); break;
case BIO_CMD2: db_printf("BIO_CMD2"); break;
+ case BIO_ZONE: db_printf("BIO_ZONE"); break;
default: db_printf("UNKNOWN"); break;
}
db_printf("\n");
diff --git a/sys/geom/journal/g_journal.c b/sys/geom/journal/g_journal.c
index 871bd8e4..0678003 100644
--- a/sys/geom/journal/g_journal.c
+++ b/sys/geom/journal/g_journal.c
@@ -2697,7 +2697,6 @@ g_journal_shutdown(void *arg, int howto __unused)
if (panicstr != NULL)
return;
mp = arg;
- DROP_GIANT();
g_topology_lock();
LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
if (gp->softc == NULL)
@@ -2706,7 +2705,6 @@ g_journal_shutdown(void *arg, int howto __unused)
g_journal_destroy(gp->softc);
}
g_topology_unlock();
- PICKUP_GIANT();
}
/*
@@ -2725,7 +2723,6 @@ g_journal_lowmem(void *arg, int howto __unused)
g_journal_stats_low_mem++;
mp = arg;
- DROP_GIANT();
g_topology_lock();
LIST_FOREACH(gp, &mp->geom, geom) {
sc = gp->softc;
@@ -2756,7 +2753,6 @@ g_journal_lowmem(void *arg, int howto __unused)
break;
}
g_topology_unlock();
- PICKUP_GIANT();
}
static void g_journal_switcher(void *arg);
@@ -2871,7 +2867,6 @@ g_journal_do_switch(struct g_class *classp)
char *mountpoint;
int error, save;
- DROP_GIANT();
g_topology_lock();
LIST_FOREACH(gp, &classp->geom, geom) {
sc = gp->softc;
@@ -2886,7 +2881,6 @@ g_journal_do_switch(struct g_class *classp)
mtx_unlock(&sc->sc_mtx);
}
g_topology_unlock();
- PICKUP_GIANT();
mtx_lock(&mountlist_mtx);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
@@ -2901,11 +2895,9 @@ g_journal_do_switch(struct g_class *classp)
continue;
/* mtx_unlock(&mountlist_mtx) was done inside vfs_busy() */
- DROP_GIANT();
g_topology_lock();
sc = g_journal_find_device(classp, mp->mnt_gjprovider);
g_topology_unlock();
- PICKUP_GIANT();
if (sc == NULL) {
GJ_DEBUG(0, "Cannot find journal geom for %s.",
@@ -2984,7 +2976,6 @@ next:
sc = NULL;
for (;;) {
- DROP_GIANT();
g_topology_lock();
LIST_FOREACH(gp, &g_journal_class.geom, geom) {
sc = gp->softc;
@@ -3000,7 +2991,6 @@ next:
sc = NULL;
}
g_topology_unlock();
- PICKUP_GIANT();
if (sc == NULL)
break;
mtx_assert(&sc->sc_mtx, MA_OWNED);
diff --git a/sys/geom/mirror/g_mirror.c b/sys/geom/mirror/g_mirror.c
index 91f1367..379f615 100644
--- a/sys/geom/mirror/g_mirror.c
+++ b/sys/geom/mirror/g_mirror.c
@@ -3310,7 +3310,6 @@ g_mirror_shutdown_post_sync(void *arg, int howto)
int error;
mp = arg;
- DROP_GIANT();
g_topology_lock();
g_mirror_shutdown = 1;
LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
@@ -3329,7 +3328,6 @@ g_mirror_shutdown_post_sync(void *arg, int howto)
g_topology_lock();
}
g_topology_unlock();
- PICKUP_GIANT();
}
static void
diff --git a/sys/geom/mountver/g_mountver.c b/sys/geom/mountver/g_mountver.c
index eafccc8..61375ef 100644
--- a/sys/geom/mountver/g_mountver.c
+++ b/sys/geom/mountver/g_mountver.c
@@ -611,12 +611,10 @@ g_mountver_shutdown_pre_sync(void *arg, int howto)
struct g_geom *gp, *gp2;
mp = arg;
- DROP_GIANT();
g_topology_lock();
LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2)
g_mountver_destroy(gp, 1);
g_topology_unlock();
- PICKUP_GIANT();
}
static void
diff --git a/sys/geom/raid/g_raid.c b/sys/geom/raid/g_raid.c
index 4885319..e590e35 100644
--- a/sys/geom/raid/g_raid.c
+++ b/sys/geom/raid/g_raid.c
@@ -2462,7 +2462,6 @@ g_raid_shutdown_post_sync(void *arg, int howto)
struct g_raid_volume *vol;
mp = arg;
- DROP_GIANT();
g_topology_lock();
g_raid_shutdown = 1;
LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
@@ -2477,7 +2476,6 @@ g_raid_shutdown_post_sync(void *arg, int howto)
g_topology_lock();
}
g_topology_unlock();
- PICKUP_GIANT();
}
static void
diff --git a/sys/geom/raid3/g_raid3.c b/sys/geom/raid3/g_raid3.c
index a2ffe53..9b3c483 100644
--- a/sys/geom/raid3/g_raid3.c
+++ b/sys/geom/raid3/g_raid3.c
@@ -3543,7 +3543,6 @@ g_raid3_shutdown_post_sync(void *arg, int howto)
int error;
mp = arg;
- DROP_GIANT();
g_topology_lock();
g_raid3_shutdown = 1;
LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
@@ -3562,7 +3561,6 @@ g_raid3_shutdown_post_sync(void *arg, int howto)
g_topology_lock();
}
g_topology_unlock();
- PICKUP_GIANT();
}
static void
diff --git a/sys/gnu/dev/bwn/phy_n/if_bwn_phy_n_core.c b/sys/gnu/dev/bwn/phy_n/if_bwn_phy_n_core.c
index 1169585..cfc3104 100644
--- a/sys/gnu/dev/bwn/phy_n/if_bwn_phy_n_core.c
+++ b/sys/gnu/dev/bwn/phy_n/if_bwn_phy_n_core.c
@@ -6635,16 +6635,12 @@ bwn_nphy_op_prepare_structs(struct bwn_mac *mac)
if (mac->mac_phy.rev >= 2 &&
(siba_sprom_get_bf2_lo(sc->sc_dev) & BWN_BFL2_TXPWRCTRL_EN)) {
nphy->txpwrctrl = true;
-#ifdef CONFIG_BWN_SSB
- if (dev->dev->bus_type == BWN_BUS_SSB &&
- dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI) {
- struct pci_dev *pdev =
- dev->dev->sdev->bus->host_pci;
- if (pdev->device == 0x4328 ||
- pdev->device == 0x432a)
+ if (bwn_is_bus_siba(mac) &&
+ (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCI)) {
+ if ((siba_get_pci_device(sc->sc_dev) == 0x4328) ||
+ (siba_get_pci_device(sc->sc_dev) == 0x432a))
nphy->pwg_gain_5ghz = true;
}
-#endif
} else if (siba_sprom_get_bf2_lo(sc->sc_dev) & BWN_BFL2_5G_PWRGAIN) {
nphy->pwg_gain_5ghz = true;
}
diff --git a/sys/i386/i386/sys_machdep.c b/sys/i386/i386/sys_machdep.c
index 4f78e29..9c8d94b 100644
--- a/sys/i386/i386/sys_machdep.c
+++ b/sys/i386/i386/sys_machdep.c
@@ -315,8 +315,9 @@ i386_set_ioperm(td, uap)
struct thread *td;
struct i386_ioperm_args *uap;
{
- int i, error;
char *iomap;
+ u_int i;
+ int error;
if ((error = priv_check(td, PRIV_IO)) != 0)
return (error);
@@ -334,7 +335,8 @@ i386_set_ioperm(td, uap)
return (error);
iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
- if (uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
+ if (uap->start > uap->start + uap->length ||
+ uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
return (EINVAL);
for (i = uap->start; i < uap->start + uap->length; i++) {
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index fe6297a..65f3a0a 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -451,13 +451,7 @@ cpu_set_syscall_retval(struct thread *td, int error)
break;
default:
- if (td->td_proc->p_sysent->sv_errsize) {
- if (error >= td->td_proc->p_sysent->sv_errsize)
- error = -1; /* XXX */
- else
- error = td->td_proc->p_sysent->sv_errtbl[error];
- }
- td->td_frame->tf_eax = error;
+ td->td_frame->tf_eax = SV_ABI_ERRNO(td->td_proc, error);
td->td_frame->tf_eflags |= PSL_C;
break;
}
diff --git a/sys/i386/linux/linux_proto.h b/sys/i386/linux/linux_proto.h
index 7e260d8..87926f4 100644
--- a/sys/i386/linux/linux_proto.h
+++ b/sys/i386/linux/linux_proto.h
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/i386/linux/syscalls.master 297061 2016-03-20 13:21:20Z dchagin
+ * created from FreeBSD: head/sys/i386/linux/syscalls.master 300359 2016-05-21 08:01:14Z dchagin
*/
#ifndef _LINUX_SYSPROTO_H_
@@ -478,16 +478,16 @@ struct linux_sysctl_args {
};
struct linux_sched_setparam_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_getparam_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_setscheduler_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
char policy_l_[PADL_(l_int)]; l_int policy; char policy_r_[PADR_(l_int)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_getscheduler_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
diff --git a/sys/i386/linux/linux_syscall.h b/sys/i386/linux/linux_syscall.h
index 918f838..d028629 100644
--- a/sys/i386/linux/linux_syscall.h
+++ b/sys/i386/linux/linux_syscall.h
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/i386/linux/syscalls.master 297061 2016-03-20 13:21:20Z dchagin
+ * created from FreeBSD: head/sys/i386/linux/syscalls.master 300359 2016-05-21 08:01:14Z dchagin
*/
#define LINUX_SYS_linux_exit 1
diff --git a/sys/i386/linux/linux_syscalls.c b/sys/i386/linux/linux_syscalls.c
index 4a5cd4c..cdde808 100644
--- a/sys/i386/linux/linux_syscalls.c
+++ b/sys/i386/linux/linux_syscalls.c
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/i386/linux/syscalls.master 297061 2016-03-20 13:21:20Z dchagin
+ * created from FreeBSD: head/sys/i386/linux/syscalls.master 300359 2016-05-21 08:01:14Z dchagin
*/
const char *linux_syscallnames[] = {
diff --git a/sys/i386/linux/linux_sysent.c b/sys/i386/linux/linux_sysent.c
index 181141d..7268964 100644
--- a/sys/i386/linux/linux_sysent.c
+++ b/sys/i386/linux/linux_sysent.c
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/i386/linux/syscalls.master 297061 2016-03-20 13:21:20Z dchagin
+ * created from FreeBSD: head/sys/i386/linux/syscalls.master 300359 2016-05-21 08:01:14Z dchagin
*/
#include <sys/param.h>
diff --git a/sys/i386/linux/linux_systrace_args.c b/sys/i386/linux/linux_systrace_args.c
index 7feba47..f02f34f 100644
--- a/sys/i386/linux/linux_systrace_args.c
+++ b/sys/i386/linux/linux_systrace_args.c
@@ -1085,7 +1085,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 154: {
struct linux_sched_setparam_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
- uarg[1] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[1] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 2;
break;
}
@@ -1093,7 +1093,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 155: {
struct linux_sched_getparam_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
- uarg[1] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[1] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 2;
break;
}
@@ -1102,7 +1102,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
struct linux_sched_setscheduler_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
iarg[1] = p->policy; /* l_int */
- uarg[2] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[2] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 3;
break;
}
@@ -4072,7 +4072,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_pid_t";
break;
case 1:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
@@ -4085,7 +4085,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_pid_t";
break;
case 1:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
@@ -4101,7 +4101,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_int";
break;
case 2:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
diff --git a/sys/i386/linux/syscalls.master b/sys/i386/linux/syscalls.master
index 1032ef0..7ec3154 100644
--- a/sys/i386/linux/syscalls.master
+++ b/sys/i386/linux/syscalls.master
@@ -270,12 +270,12 @@
152 AUE_MLOCKALL NOPROTO { int mlockall(int how); }
153 AUE_MUNLOCKALL NOPROTO { int munlockall(void); }
154 AUE_SCHED_SETPARAM STD { int linux_sched_setparam(l_pid_t pid, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
155 AUE_SCHED_GETPARAM STD { int linux_sched_getparam(l_pid_t pid, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
156 AUE_SCHED_SETSCHEDULER STD { int linux_sched_setscheduler( \
l_pid_t pid, l_int policy, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
157 AUE_SCHED_GETSCHEDULER STD { int linux_sched_getscheduler( \
l_pid_t pid); }
158 AUE_NULL NOPROTO { int sched_yield(void); }
diff --git a/sys/kern/bus_if.m b/sys/kern/bus_if.m
index 8592e9f..2b75438 100644
--- a/sys/kern/bus_if.m
+++ b/sys/kern/bus_if.m
@@ -287,8 +287,9 @@ METHOD struct resource * alloc_resource {
* @brief Activate a resource
*
* Activate a resource previously allocated with
- * BUS_ALLOC_RESOURCE(). This may for instance map a memory region
- * into the kernel's virtual address space.
+ * BUS_ALLOC_RESOURCE(). This may enable decoding of this resource in a
+ * device for instance. It will also establish a mapping for the resource
+ * unless RF_UNMAPPED was set when allocating the resource.
*
* @param _dev the parent device of @p _child
* @param _child the device which allocated the resource
@@ -304,12 +305,58 @@ METHOD int activate_resource {
struct resource *_r;
};
+
+/**
+ * @brief Map a resource
+ *
+ * Allocate a mapping for a range of an active resource. The mapping
+ * is described by a struct resource_map object. This may for instance
+ * map a memory region into the kernel's virtual address space.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which allocated the resource
+ * @param _type the type of resource
+ * @param _r the resource to map
+ * @param _args optional attributes of the mapping
+ * @param _map the mapping
+ */
+METHOD int map_resource {
+ device_t _dev;
+ device_t _child;
+ int _type;
+ struct resource *_r;
+ struct resource_map_request *_args;
+ struct resource_map *_map;
+} DEFAULT bus_generic_map_resource;
+
+
+/**
+ * @brief Unmap a resource
+ *
+ * Release a mapping previously allocated with
+ * BUS_MAP_RESOURCE(). This may for instance unmap a memory region
+ * from the kernel's virtual address space.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which allocated the resource
+ * @param _type the type of resource
+ * @param _r the resource
+ * @param _map the mapping to release
+ */
+METHOD int unmap_resource {
+ device_t _dev;
+ device_t _child;
+ int _type;
+ struct resource *_r;
+ struct resource_map *_map;
+} DEFAULT bus_generic_unmap_resource;
+
+
/**
* @brief Deactivate a resource
*
* Deactivate a resource previously allocated with
- * BUS_ALLOC_RESOURCE(). This may for instance unmap a memory region
- * from the kernel's virtual address space.
+ * BUS_ALLOC_RESOURCE().
*
* @param _dev the parent device of @p _child
* @param _child the device which allocated the resource
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index e7c81d6..e6bc60c 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -110,9 +110,6 @@ MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
-MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
-MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
-
static struct malloc_type *kmemstatistics;
static int kmemcount;
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index b90caab..55df6a0 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -327,7 +327,7 @@ pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
if (sbt == 0)
sbt = tick_sbt;
- if (cold || kdb_active) {
+ if (cold || kdb_active || SCHEDULER_STOPPED()) {
/*
* We delay one second at a time to avoid overflowing the
* system specific DELAY() function(s):
diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c
index e882553..f491643 100644
--- a/sys/kern/subr_bus.c
+++ b/sys/kern/subr_bus.c
@@ -63,6 +63,7 @@ __FBSDID("$FreeBSD$");
#include <machine/stdarg.h>
#include <vm/uma.h>
+#include <vm/vm.h>
SYSCTL_NODE(_hw, OID_AUTO, bus, CTLFLAG_RW, NULL, NULL);
SYSCTL_ROOT_NODE(OID_AUTO, dev, CTLFLAG_RW, NULL, NULL);
@@ -3050,6 +3051,15 @@ device_set_unit(device_t dev, int unit)
* Some useful method implementations to make life easier for bus drivers.
*/
+void
+resource_init_map_request_impl(struct resource_map_request *args, size_t sz)
+{
+
+ bzero(args, sz);
+ args->size = sz;
+ args->memattr = VM_MEMATTR_UNCACHEABLE;
+}
+
/**
* @brief Initialise a resource list.
*
@@ -4060,6 +4070,40 @@ bus_generic_deactivate_resource(device_t dev, device_t child, int type,
}
/**
+ * @brief Helper function for implementing BUS_MAP_RESOURCE().
+ *
+ * This simple implementation of BUS_MAP_RESOURCE() simply calls the
+ * BUS_MAP_RESOURCE() method of the parent of @p dev.
+ */
+int
+bus_generic_map_resource(device_t dev, device_t child, int type,
+ struct resource *r, struct resource_map_request *args,
+ struct resource_map *map)
+{
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (dev->parent)
+ return (BUS_MAP_RESOURCE(dev->parent, child, type, r, args,
+ map));
+ return (EINVAL);
+}
+
+/**
+ * @brief Helper function for implementing BUS_UNMAP_RESOURCE().
+ *
+ * This simple implementation of BUS_UNMAP_RESOURCE() simply calls the
+ * BUS_UNMAP_RESOURCE() method of the parent of @p dev.
+ */
+int
+bus_generic_unmap_resource(device_t dev, device_t child, int type,
+ struct resource *r, struct resource_map *map)
+{
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (dev->parent)
+ return (BUS_UNMAP_RESOURCE(dev->parent, child, type, r, map));
+ return (EINVAL);
+}
+
+/**
* @brief Helper function for implementing BUS_BIND_INTR().
*
* This simple implementation of BUS_BIND_INTR() simply calls the
@@ -4421,6 +4465,36 @@ bus_deactivate_resource(device_t dev, int type, int rid, struct resource *r)
}
/**
+ * @brief Wrapper function for BUS_MAP_RESOURCE().
+ *
+ * This function simply calls the BUS_MAP_RESOURCE() method of the
+ * parent of @p dev.
+ */
+int
+bus_map_resource(device_t dev, int type, struct resource *r,
+ struct resource_map_request *args, struct resource_map *map)
+{
+ if (dev->parent == NULL)
+ return (EINVAL);
+ return (BUS_MAP_RESOURCE(dev->parent, dev, type, r, args, map));
+}
+
+/**
+ * @brief Wrapper function for BUS_UNMAP_RESOURCE().
+ *
+ * This function simply calls the BUS_UNMAP_RESOURCE() method of the
+ * parent of @p dev.
+ */
+int
+bus_unmap_resource(device_t dev, int type, struct resource *r,
+ struct resource_map *map)
+{
+ if (dev->parent == NULL)
+ return (EINVAL);
+ return (BUS_UNMAP_RESOURCE(dev->parent, dev, type, r, map));
+}
+
+/**
* @brief Wrapper function for BUS_RELEASE_RESOURCE().
*
* This function simply calls the BUS_RELEASE_RESOURCE() method of the
diff --git a/sys/kern/subr_devstat.c b/sys/kern/subr_devstat.c
index c291342..84a98d0 100644
--- a/sys/kern/subr_devstat.c
+++ b/sys/kern/subr_devstat.c
@@ -354,7 +354,9 @@ devstat_end_transaction_bio_bt(struct devstat *ds, struct bio *bp,
if (bp->bio_cmd == BIO_DELETE)
flg = DEVSTAT_FREE;
- else if (bp->bio_cmd == BIO_READ)
+ else if ((bp->bio_cmd == BIO_READ)
+ || ((bp->bio_cmd == BIO_ZONE)
+ && (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)))
flg = DEVSTAT_READ;
else if (bp->bio_cmd == BIO_WRITE)
flg = DEVSTAT_WRITE;
diff --git a/sys/kern/subr_intr.c b/sys/kern/subr_intr.c
index bb9fbb5..cb80880 100644
--- a/sys/kern/subr_intr.c
+++ b/sys/kern/subr_intr.c
@@ -37,6 +37,7 @@ __FBSDID("$FreeBSD$");
#include "opt_acpi.h"
#include "opt_ddb.h"
+#include "opt_hwpmc_hooks.h"
#include "opt_platform.h"
#include <sys/param.h>
@@ -53,6 +54,10 @@ __FBSDID("$FreeBSD$");
#include <sys/rman.h>
#include <sys/sched.h>
#include <sys/smp.h>
+#ifdef HWPMC_HOOKS
+#include <sys/pmckern.h>
+#endif
+
#include <machine/atomic.h>
#include <machine/intr.h>
#include <machine/cpu.h>
@@ -311,6 +316,10 @@ intr_irq_handler(struct trapframe *tf)
irq_root_filter(irq_root_arg);
td->td_intr_frame = oldframe;
critical_exit();
+#ifdef HWPMC_HOOKS
+ if (pmc_hook && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN))
+ pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf);
+#endif
}
/*
diff --git a/sys/kern/subr_rman.c b/sys/kern/subr_rman.c
index d1c68b45..cc0bcdc 100644
--- a/sys/kern/subr_rman.c
+++ b/sys/kern/subr_rman.c
@@ -897,6 +897,27 @@ rman_get_bushandle(struct resource *r)
}
void
+rman_set_mapping(struct resource *r, struct resource_map *map)
+{
+
+ KASSERT(rman_get_size(r) == map->r_size,
+ ("rman_set_mapping: size mismatch"));
+ rman_set_bustag(r, map->r_bustag);
+ rman_set_bushandle(r, map->r_bushandle);
+ rman_set_virtual(r, map->r_vaddr);
+}
+
+void
+rman_get_mapping(struct resource *r, struct resource_map *map)
+{
+
+ map->r_bustag = rman_get_bustag(r);
+ map->r_bushandle = rman_get_bushandle(r);
+ map->r_size = rman_get_size(r);
+ map->r_vaddr = rman_get_virtual(r);
+}
+
+void
rman_set_rid(struct resource *r, int rid)
{
diff --git a/sys/kern/subr_sglist.c b/sys/kern/subr_sglist.c
index df88a26..0d371a4 100644
--- a/sys/kern/subr_sglist.c
+++ b/sys/kern/subr_sglist.c
@@ -192,6 +192,31 @@ sglist_count(void *buf, size_t len)
}
/*
+ * Determine the number of scatter/gather list elements needed to
+ * describe a buffer backed by an array of VM pages.
+ */
+int
+sglist_count_vmpages(vm_page_t *m, size_t pgoff, size_t len)
+{
+ vm_paddr_t lastaddr, paddr;
+ int i, nsegs;
+
+ if (len == 0)
+ return (0);
+
+ len += pgoff;
+ nsegs = 1;
+ lastaddr = VM_PAGE_TO_PHYS(m[0]);
+ for (i = 1; len > PAGE_SIZE; len -= PAGE_SIZE, i++) {
+ paddr = VM_PAGE_TO_PHYS(m[i]);
+ if (lastaddr + PAGE_SIZE != paddr)
+ nsegs++;
+ lastaddr = paddr;
+ }
+ return (nsegs);
+}
+
+/*
* Allocate a scatter/gather list along with 'nsegs' segments. The
* 'mflags' parameters are the same as passed to malloc(9). The caller
* should use sglist_free() to free this list.
@@ -252,33 +277,14 @@ sglist_append(struct sglist *sg, void *buf, size_t len)
int
sglist_append_bio(struct sglist *sg, struct bio *bp)
{
- struct sgsave save;
- vm_paddr_t paddr;
- size_t len, tlen;
- int error, i, ma_offs;
+ int error;
- if ((bp->bio_flags & BIO_UNMAPPED) == 0) {
+ if ((bp->bio_flags & BIO_UNMAPPED) == 0)
error = sglist_append(sg, bp->bio_data, bp->bio_bcount);
- return (error);
- }
-
- if (sg->sg_maxseg == 0)
- return (EINVAL);
-
- SGLIST_SAVE(sg, save);
- tlen = bp->bio_bcount;
- ma_offs = bp->bio_ma_offset;
- for (i = 0; tlen > 0; i++, tlen -= len) {
- len = min(PAGE_SIZE - ma_offs, tlen);
- paddr = VM_PAGE_TO_PHYS(bp->bio_ma[i]) + ma_offs;
- error = sglist_append_phys(sg, paddr, len);
- if (error) {
- SGLIST_RESTORE(sg, save);
- return (error);
- }
- ma_offs = 0;
- }
- return (0);
+ else
+ error = sglist_append_vmpages(sg, bp->bio_ma,
+ bp->bio_ma_offset, bp->bio_bcount);
+ return (error);
}
/*
@@ -341,6 +347,51 @@ sglist_append_mbuf(struct sglist *sg, struct mbuf *m0)
}
/*
+ * Append the segments that describe a buffer spanning an array of VM
+ * pages. The buffer begins at an offset of 'pgoff' in the first
+ * page.
+ */
+int
+sglist_append_vmpages(struct sglist *sg, vm_page_t *m, size_t pgoff,
+ size_t len)
+{
+ struct sgsave save;
+ struct sglist_seg *ss;
+ vm_paddr_t paddr;
+ size_t seglen;
+ int error, i;
+
+ if (sg->sg_maxseg == 0)
+ return (EINVAL);
+ if (len == 0)
+ return (0);
+
+ SGLIST_SAVE(sg, save);
+ i = 0;
+ if (sg->sg_nseg == 0) {
+ seglen = min(PAGE_SIZE - pgoff, len);
+ sg->sg_segs[0].ss_paddr = VM_PAGE_TO_PHYS(m[0]) + pgoff;
+ sg->sg_segs[0].ss_len = seglen;
+ sg->sg_nseg = 1;
+ pgoff = 0;
+ len -= seglen;
+ i++;
+ }
+ ss = &sg->sg_segs[sg->sg_nseg - 1];
+ for (; len > 0; i++, len -= seglen) {
+ seglen = min(PAGE_SIZE - pgoff, len);
+ paddr = VM_PAGE_TO_PHYS(m[i]) + pgoff;
+ error = _sglist_append_range(sg, &ss, paddr, seglen);
+ if (error) {
+ SGLIST_RESTORE(sg, save);
+ return (error);
+ }
+ pgoff = 0;
+ }
+ return (0);
+}
+
+/*
* Append the segments that describe a single user address range to a
* scatter/gather list. If there are insufficient segments, then this
* fails with EFBIG.
diff --git a/sys/kern/subr_taskqueue.c b/sys/kern/subr_taskqueue.c
index b370b37..00cb46f 100644
--- a/sys/kern/subr_taskqueue.c
+++ b/sys/kern/subr_taskqueue.c
@@ -68,7 +68,6 @@ struct taskqueue {
TAILQ_HEAD(, taskqueue_busy) tq_active;
struct mtx tq_mutex;
struct thread **tq_threads;
- struct thread *tq_curthread;
int tq_tcount;
int tq_spin;
int tq_flags;
@@ -128,16 +127,17 @@ _taskqueue_create(const char *name, int mflags,
int mtxflags, const char *mtxname __unused)
{
struct taskqueue *queue;
- char *tq_name = NULL;
+ char *tq_name;
- if (name != NULL)
- tq_name = strndup(name, 32, M_TASKQUEUE);
- if (tq_name == NULL)
- tq_name = "taskqueue";
+ tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO);
+ if (!tq_name)
+ return (NULL);
+
+ snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
if (!queue)
- return NULL;
+ return (NULL);
STAILQ_INIT(&queue->tq_queue);
TAILQ_INIT(&queue->tq_active);
@@ -153,7 +153,7 @@ _taskqueue_create(const char *name, int mflags,
queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
- return queue;
+ return (queue);
}
struct taskqueue *
@@ -221,7 +221,7 @@ taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
* Count multiple enqueues.
*/
if (task->ta_pending) {
- if (task->ta_pending < UCHAR_MAX)
+ if (task->ta_pending < USHRT_MAX)
task->ta_pending++;
TQ_UNLOCK(queue);
return (0);
@@ -464,8 +464,7 @@ taskqueue_run_locked(struct taskqueue *queue)
TQ_LOCK(queue);
tb.tb_running = NULL;
- if ((task->ta_flags & TASK_SKIP_WAKEUP) == 0)
- wakeup(task);
+ wakeup(task);
TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
tb_first = TAILQ_FIRST(&queue->tq_active);
@@ -480,9 +479,7 @@ taskqueue_run(struct taskqueue *queue)
{
TQ_LOCK(queue);
- queue->tq_curthread = curthread;
taskqueue_run_locked(queue);
- queue->tq_curthread = NULL;
TQ_UNLOCK(queue);
}
@@ -715,7 +712,6 @@ taskqueue_thread_loop(void *arg)
tq = *tqp;
taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
TQ_LOCK(tq);
- tq->tq_curthread = curthread;
while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
/* XXX ? */
taskqueue_run_locked(tq);
@@ -729,7 +725,6 @@ taskqueue_thread_loop(void *arg)
TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
}
taskqueue_run_locked(tq);
- tq->tq_curthread = NULL;
/*
* This thread is on its way out, so just drop the lock temporarily
* in order to call the shutdown callback. This allows the callback
@@ -753,8 +748,7 @@ taskqueue_thread_enqueue(void *context)
tqp = context;
tq = *tqp;
- if (tq->tq_curthread != curthread)
- wakeup_one(tq);
+ wakeup_one(tq);
}
TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
diff --git a/sys/kern/sys_socket.c b/sys/kern/sys_socket.c
index e6416b9..4c314a1 100644
--- a/sys/kern/sys_socket.c
+++ b/sys/kern/sys_socket.c
@@ -605,7 +605,6 @@ retry:
cnt -= uio.uio_resid;
td->td_ucred = td_savedcred;
- /* XXX: Not sure if this is needed? */
if (cnt != 0 && (error == ERESTART || error == EINTR ||
error == EWOULDBLOCK))
error = 0;
@@ -633,7 +632,10 @@ retry:
TAILQ_INSERT_HEAD(&sb->sb_aiojobq, job, list);
}
} else {
- aio_complete(job, cnt, error);
+ if (error)
+ aio_complete(job, -1, error);
+ else
+ aio_complete(job, cnt, 0);
SOCKBUF_LOCK(sb);
}
}
diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c
index 70cc8fd..8117418 100644
--- a/sys/kern/uipc_socket.c
+++ b/sys/kern/uipc_socket.c
@@ -550,7 +550,7 @@ SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
/*
* When an attempt at a new connection is noted on a socket which accepts
* connections, sonewconn is called. If the connection is possible (subject
- * to space constraints, etc.) then we allocate a new structure, propoerly
+ * to space constraints, etc.) then we allocate a new structure, properly
* linked into the data structure of the original socket, and return this.
* Connstatus may be 0, or SS_ISCONFIRMING, or SS_ISCONNECTED.
*
@@ -2711,7 +2711,7 @@ sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
* that we always tell the user precisely how much we copied, rather
* than something useful like the total amount we had available for
* her. Note that this interface is not idempotent; the entire
- * answer must generated ahead of time.
+ * answer must be generated ahead of time.
*/
valsize = min(len, sopt->sopt_valsize);
sopt->sopt_valsize = valsize;
diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c
index 20df141..64adc37 100644
--- a/sys/kern/vfs_aio.c
+++ b/sys/kern/vfs_aio.c
@@ -806,7 +806,10 @@ aio_process_rw(struct kaiocb *job)
cnt -= auio.uio_resid;
td->td_ucred = td_savedcred;
- aio_complete(job, cnt, error);
+ if (error)
+ aio_complete(job, -1, error);
+ else
+ aio_complete(job, cnt, 0);
}
static void
@@ -824,7 +827,10 @@ aio_process_sync(struct kaiocb *job)
if (fp->f_vnode != NULL)
error = aio_fsync_vnode(td, fp->f_vnode);
td->td_ucred = td_savedcred;
- aio_complete(job, 0, error);
+ if (error)
+ aio_complete(job, -1, error);
+ else
+ aio_complete(job, 0, 0);
}
static void
@@ -839,7 +845,10 @@ aio_process_mlock(struct kaiocb *job)
aio_switch_vmspace(job);
error = vm_mlock(job->userproc, job->cred,
__DEVOLATILE(void *, cb->aio_buf), cb->aio_nbytes);
- aio_complete(job, 0, error);
+ if (error)
+ aio_complete(job, -1, error);
+ else
+ aio_complete(job, 0, 0);
}
static void
@@ -2323,7 +2332,10 @@ aio_physwakeup(struct bio *bp)
else
job->inputcharge += nblks;
- aio_complete(job, nbytes, error);
+ if (error)
+ aio_complete(job, -1, error);
+ else
+ aio_complete(job, nbytes, 0);
g_destroy_bio(bp);
}
diff --git a/sys/mips/conf/BCM b/sys/mips/conf/BCM
index 788bbd8..e2215f7 100644
--- a/sys/mips/conf/BCM
+++ b/sys/mips/conf/BCM
@@ -5,6 +5,8 @@
# used in COTS hardware including the ASUS RT-N12, RT-N16, RT-N53.
#
+#NO_UNIVERSE
+
ident BCM
hints "BCM.hints"
diff --git a/sys/mips/include/clock.h b/sys/mips/include/clock.h
index 15acef8..0622efb 100644
--- a/sys/mips/include/clock.h
+++ b/sys/mips/include/clock.h
@@ -17,8 +17,6 @@
extern int cpu_clock;
-extern uint32_t clockintr(uint32_t, struct trapframe *);
-
#define wall_cmos_clock 0
#define adjkerntz 0
diff --git a/sys/modules/bhnd/bhndb/Makefile b/sys/modules/bhnd/bhndb/Makefile
index 7188009..0535864 100644
--- a/sys/modules/bhnd/bhndb/Makefile
+++ b/sys/modules/bhnd/bhndb/Makefile
@@ -4,6 +4,7 @@
KMOD= bhndb
SRCS= bhndb.c bhndb_subr.c bhndb_hwdata.c \
+ bhnd_bhndb.c \
bhndb_bus_if.c bhndb_bus_if.h \
bhndb_if.c bhndb_if.h
SRCS+= bhnd_bus_if.h \
diff --git a/sys/modules/hyperv/vmbus/Makefile b/sys/modules/hyperv/vmbus/Makefile
index 8187146..cfbd2cf 100644
--- a/sys/modules/hyperv/vmbus/Makefile
+++ b/sys/modules/hyperv/vmbus/Makefile
@@ -11,7 +11,8 @@ SRCS= hv_channel.c \
hv_hv.c \
hv_ring_buffer.c \
hv_vmbus_drv_freebsd.c \
- hv_vmbus_priv.h
+ hv_vmbus_priv.h \
+ hyperv_busdma.c
SRCS+= acpi_if.h bus_if.h device_if.h opt_acpi.h
# XXX: for assym.s
diff --git a/sys/modules/vnic/Makefile b/sys/modules/vnic/Makefile
new file mode 100644
index 0000000..69d1590
--- /dev/null
+++ b/sys/modules/vnic/Makefile
@@ -0,0 +1,10 @@
+# $FreeBSD$
+
+SYSDIR?=${.CURDIR}/../..
+.include "${SYSDIR}/conf/kern.opts.mk"
+
+CFLAGS+= -DFDT
+
+SUBDIR = mrmlbus thunder_mdio thunder_bgx vnicpf vnicvf
+
+.include <bsd.subdir.mk>
diff --git a/sys/modules/vnic/mrmlbus/Makefile b/sys/modules/vnic/mrmlbus/Makefile
new file mode 100644
index 0000000..1d1eb34
--- /dev/null
+++ b/sys/modules/vnic/mrmlbus/Makefile
@@ -0,0 +1,16 @@
+# $FreeBSD$
+
+SYSDIR?=${.CURDIR}/../../..
+.include "${SYSDIR}/conf/kern.opts.mk"
+
+S= ${.CURDIR}/../../..
+
+.PATH: $S/dev/vnic
+
+KMOD= mrmlbus
+SRCS= device_if.h bus_if.h opt_platform.h pci_if.h ofw_bus_if.h miibus_if.h lmac_if.h
+SRCS+= mrml_bridge.c
+
+CFLAGS+= -DFDT
+
+.include <bsd.kmod.mk>
diff --git a/sys/modules/vnic/thunder_bgx/Makefile b/sys/modules/vnic/thunder_bgx/Makefile
new file mode 100644
index 0000000..4926e7a
--- /dev/null
+++ b/sys/modules/vnic/thunder_bgx/Makefile
@@ -0,0 +1,16 @@
+# $FreeBSD$
+
+SYSDIR?=${.CURDIR}/../../..
+.include "${SYSDIR}/conf/kern.opts.mk"
+
+S= ${.CURDIR}/../../..
+
+.PATH: $S/dev/vnic
+
+KMOD= thunder_bgx
+SRCS= thunder_bgx.c thunder_bgx_fdt.c
+SRCS+= opt_platform.h device_if.h bus_if.h pci_if.h lmac_if.h ofw_bus_if.h
+
+CFLAGS+= -DFDT
+
+.include <bsd.kmod.mk>
diff --git a/sys/modules/vnic/thunder_mdio/Makefile b/sys/modules/vnic/thunder_mdio/Makefile
new file mode 100644
index 0000000..78ca8f8
--- /dev/null
+++ b/sys/modules/vnic/thunder_mdio/Makefile
@@ -0,0 +1,16 @@
+# $FreeBSD$
+
+SYSDIR?=${.CURDIR}/../../..
+.include "${SYSDIR}/conf/kern.opts.mk"
+
+S= ${.CURDIR}/../../..
+
+.PATH: $S/dev/vnic
+
+KMOD= thunder_mdio
+SRCS= opt_platform.h device_if.h bus_if.h pci_if.h ofw_bus_if.h miibus_if.h lmac_if.h
+SRCS+= thunder_mdio.c thunder_mdio_fdt.c
+
+CFLAGS+= -DFDT
+
+.include <bsd.kmod.mk>
diff --git a/sys/modules/vnic/vnicpf/Makefile b/sys/modules/vnic/vnicpf/Makefile
new file mode 100644
index 0000000..206f75f
--- /dev/null
+++ b/sys/modules/vnic/vnicpf/Makefile
@@ -0,0 +1,16 @@
+# $FreeBSD$
+
+SYSDIR?=${.CURDIR}/../../..
+.include "${SYSDIR}/conf/kern.opts.mk"
+
+S= ${.CURDIR}/../../..
+
+.PATH: $S/dev/vnic
+
+KMOD= vnicpf
+SRCS= pci_iov_if.h opt_platform.h device_if.h bus_if.h pci_if.h ofw_bus_if.h miibus_if.h lmac_if.h
+SRCS+= nic_main.c
+
+CFLAGS+= -DFDT -DPCI_IOV
+
+.include <bsd.kmod.mk>
diff --git a/sys/modules/vnic/vnicvf/Makefile b/sys/modules/vnic/vnicvf/Makefile
new file mode 100644
index 0000000..9614cc0
--- /dev/null
+++ b/sys/modules/vnic/vnicvf/Makefile
@@ -0,0 +1,16 @@
+# $FreeBSD$
+
+SYSDIR?=${.CURDIR}/../../..
+.include "${SYSDIR}/conf/kern.opts.mk"
+
+S= ${.CURDIR}/../../..
+
+.PATH: $S/dev/vnic
+
+KMOD= vnicvf
+SRCS= nicvf_main.c nicvf_queues.c
+SRCS+= opt_platform.h ofw_bus_if.h lmac_if.h miibus_if.h pci_if.h bus_if.h device_if.h opt_inet.h opt_inet6.h
+
+CFLAGS+= -DFDT -DPCI_IOV
+
+.include <bsd.kmod.mk>
diff --git a/sys/net/if_tun.c b/sys/net/if_tun.c
index f5aba00..b924ca8 100644
--- a/sys/net/if_tun.c
+++ b/sys/net/if_tun.c
@@ -848,7 +848,7 @@ tunwrite(struct cdev *dev, struct uio *uio, int flag)
struct tun_softc *tp = dev->si_drv1;
struct ifnet *ifp = TUN2IFP(tp);
struct mbuf *m;
- uint32_t family;
+ uint32_t family, mru;
int isr;
TUNDEBUG(ifp, "tunwrite\n");
@@ -860,7 +860,10 @@ tunwrite(struct cdev *dev, struct uio *uio, int flag)
if (uio->uio_resid == 0)
return (0);
- if (uio->uio_resid < 0 || uio->uio_resid > TUNMRU) {
+ mru = TUNMRU;
+ if (tp->tun_flags & TUN_IFHEAD)
+ mru += sizeof(family);
+ if (uio->uio_resid < 0 || uio->uio_resid > mru) {
TUNDEBUG(ifp, "len=%zd!\n", uio->uio_resid);
return (EIO);
}
diff --git a/sys/net/mp_ring.c b/sys/net/mp_ring.c
index 1e17964..3ff272c 100644
--- a/sys/net/mp_ring.c
+++ b/sys/net/mp_ring.c
@@ -358,7 +358,7 @@ ifmp_ring_enqueue(struct ifmp_ring *r, void **items, int n, int budget)
/*
* Wait for other producers who got in ahead of us to enqueue their
* items, one producer at a time. It is our turn when the ring's
- * pidx_tail reaches the begining of our reservation (pidx_start).
+ * pidx_tail reaches the beginning of our reservation (pidx_start).
*/
while (ns.pidx_tail != pidx_start) {
cpu_spinwait();
@@ -432,7 +432,7 @@ ifmp_ring_enqueue(struct ifmp_ring *r, void **items, int n, int budget)
/*
* Wait for other producers who got in ahead of us to enqueue their
* items, one producer at a time. It is our turn when the ring's
- * pidx_tail reaches the begining of our reservation (pidx_start).
+ * pidx_tail reaches the beginning of our reservation (pidx_start).
*/
while (ns.pidx_tail != pidx_start) {
cpu_spinwait();
diff --git a/sys/net80211/ieee80211.c b/sys/net80211/ieee80211.c
index 453f119..39da95f 100644
--- a/sys/net80211/ieee80211.c
+++ b/sys/net80211/ieee80211.c
@@ -1207,7 +1207,7 @@ add_chanlist(struct ieee80211_channel chans[], int maxchans, int *nchans,
}
}
- return (error);
+ return (0);
}
int
diff --git a/sys/net80211/ieee80211_action.c b/sys/net80211/ieee80211_action.c
index 9c75989..a42b469 100644
--- a/sys/net80211/ieee80211_action.c
+++ b/sys/net80211/ieee80211_action.c
@@ -103,7 +103,6 @@ ieee80211_send_action_register(int cat, int act, ieee80211_send_action_func *f)
break;
meshaction_send_action[act] = f;
return 0;
- break;
case IEEE80211_ACTION_CAT_VENDOR:
if (act >= nitems(vendor_send_action))
break;
diff --git a/sys/net80211/ieee80211_crypto_none.c b/sys/net80211/ieee80211_crypto_none.c
index fef4c09..fd5cb6a 100644
--- a/sys/net80211/ieee80211_crypto_none.c
+++ b/sys/net80211/ieee80211_crypto_none.c
@@ -101,7 +101,6 @@ none_encap(struct ieee80211_key *k, struct mbuf *m)
struct ieee80211vap *vap = k->wk_private;
#ifdef IEEE80211_DEBUG
struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
-#endif
uint8_t keyid;
keyid = ieee80211_crypto_get_keyid(vap, k);
@@ -112,6 +111,7 @@ none_encap(struct ieee80211_key *k, struct mbuf *m)
*/
IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr1,
"key id %u is not set (encap)", keyid);
+#endif
vap->iv_stats.is_tx_badcipher++;
return 0;
}
diff --git a/sys/net80211/ieee80211_freebsd.c b/sys/net80211/ieee80211_freebsd.c
index 373b053..0333e9b 100644
--- a/sys/net80211/ieee80211_freebsd.c
+++ b/sys/net80211/ieee80211_freebsd.c
@@ -61,7 +61,7 @@ __FBSDID("$FreeBSD$");
SYSCTL_NODE(_net, OID_AUTO, wlan, CTLFLAG_RD, 0, "IEEE 80211 parameters");
#ifdef IEEE80211_DEBUG
-int ieee80211_debug = 0;
+static int ieee80211_debug = 0;
SYSCTL_INT(_net_wlan, OID_AUTO, debug, CTLFLAG_RW, &ieee80211_debug,
0, "debugging printfs");
#endif
diff --git a/sys/net80211/ieee80211_hostap.c b/sys/net80211/ieee80211_hostap.c
index 60535b2..5b7611e 100644
--- a/sys/net80211/ieee80211_hostap.c
+++ b/sys/net80211/ieee80211_hostap.c
@@ -1067,7 +1067,7 @@ hostap_auth_shared(struct ieee80211_node *ni, struct ieee80211_frame *wh,
*/
ni->ni_flags |= IEEE80211_NODE_AREF;
/*
- * Mark the node as requiring a valid associatio id
+ * Mark the node as requiring a valid association id
* before outbound traffic is permitted.
*/
ni->ni_flags |= IEEE80211_NODE_ASSOCID;
diff --git a/sys/net80211/ieee80211_ht.c b/sys/net80211/ieee80211_ht.c
index 0320c2d..7e3616d 100644
--- a/sys/net80211/ieee80211_ht.c
+++ b/sys/net80211/ieee80211_ht.c
@@ -1643,6 +1643,7 @@ ieee80211_setup_htrates(struct ieee80211_node *ni, const uint8_t *ie, int flags)
int i, maxequalmcs, maxunequalmcs;
maxequalmcs = ic->ic_txstream * 8 - 1;
+ maxunequalmcs = 0;
if (ic->ic_htcaps & IEEE80211_HTC_TXUNEQUAL) {
if (ic->ic_txstream >= 2)
maxunequalmcs = 38;
@@ -1650,8 +1651,7 @@ ieee80211_setup_htrates(struct ieee80211_node *ni, const uint8_t *ie, int flags)
maxunequalmcs = 52;
if (ic->ic_txstream >= 4)
maxunequalmcs = 76;
- } else
- maxunequalmcs = 0;
+ }
rs = &ni->ni_htrates;
memset(rs, 0, sizeof(*rs));
diff --git a/sys/net80211/ieee80211_hwmp.c b/sys/net80211/ieee80211_hwmp.c
index 192024e..332f140 100644
--- a/sys/net80211/ieee80211_hwmp.c
+++ b/sys/net80211/ieee80211_hwmp.c
@@ -944,7 +944,6 @@ hwmp_recv_preq(struct ieee80211vap *vap, struct ieee80211_node *ni,
struct ieee80211_hwmp_route *hrorig = NULL;
struct ieee80211_hwmp_route *hrtarg = NULL;
struct ieee80211_hwmp_state *hs = vap->iv_hwmp;
- struct ieee80211_meshprep_ie prep;
ieee80211_hwmp_seq preqid; /* last seen preqid for orig */
uint32_t metric = 0;
@@ -1057,6 +1056,8 @@ hwmp_recv_preq(struct ieee80211vap *vap, struct ieee80211_node *ni,
IEEE80211_ADDR_EQ(vap->iv_myaddr, rttarg->rt_mesh_gate) &&
rttarg->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY &&
rttarg->rt_flags & IEEE80211_MESHRT_FLAGS_VALID)) {
+ struct ieee80211_meshprep_ie prep;
+
/*
* When we are the target we shall update our own HWMP seq
* number with max of (current and preq->seq) + 1
@@ -1139,6 +1140,8 @@ hwmp_recv_preq(struct ieee80211vap *vap, struct ieee80211_node *ni,
*/
if ((rtorig->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0 ||
(preq->preq_flags & IEEE80211_MESHPREQ_FLAGS_PP)) {
+ struct ieee80211_meshprep_ie prep;
+
prep.prep_flags = 0;
prep.prep_hopcount = 0;
prep.prep_ttl = ms->ms_ttl;
diff --git a/sys/net80211/ieee80211_ioctl.c b/sys/net80211/ieee80211_ioctl.c
index 3d93278..b65f23a 100644
--- a/sys/net80211/ieee80211_ioctl.c
+++ b/sys/net80211/ieee80211_ioctl.c
@@ -2486,6 +2486,11 @@ ieee80211_scanreq(struct ieee80211vap *vap, struct ieee80211_scan_req *sr)
* Otherwise just invoke the scan machinery directly.
*/
IEEE80211_LOCK(ic);
+ if (ic->ic_nrunning == 0) {
+ IEEE80211_UNLOCK(ic);
+ return ENXIO;
+ }
+
if (vap->iv_state == IEEE80211_S_INIT) {
/* NB: clobbers previous settings */
vap->iv_scanreq_flags = sr->sr_flags;
diff --git a/sys/net80211/ieee80211_mesh.c b/sys/net80211/ieee80211_mesh.c
index 7ba784b..c8e69b7 100644
--- a/sys/net80211/ieee80211_mesh.c
+++ b/sys/net80211/ieee80211_mesh.c
@@ -803,16 +803,15 @@ mesh_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
* Update bss node channel to reflect where
* we landed after CSA.
*/
- ieee80211_node_set_chan(vap->iv_bss,
+ ieee80211_node_set_chan(ni,
ieee80211_ht_adjust_channel(ic, ic->ic_curchan,
- ieee80211_htchanflags(vap->iv_bss->ni_chan)));
+ ieee80211_htchanflags(ni->ni_chan)));
/* XXX bypass debug msgs */
break;
case IEEE80211_S_SCAN:
case IEEE80211_S_RUN:
#ifdef IEEE80211_DEBUG
if (ieee80211_msg_debug(vap)) {
- struct ieee80211_node *ni = vap->iv_bss;
ieee80211_note(vap,
"synchronized with %s meshid ",
ether_sprintf(ni->ni_meshid));
@@ -827,7 +826,7 @@ mesh_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
default:
break;
}
- ieee80211_node_authorize(vap->iv_bss);
+ ieee80211_node_authorize(ni);
callout_reset(&ms->ms_cleantimer, ms->ms_ppath->mpp_inact,
mesh_rt_cleanup_cb, vap);
mesh_gatemode_setup(vap);
@@ -1510,10 +1509,11 @@ mesh_recv_group_data(struct ieee80211vap *vap, struct mbuf *m,
* will sent it on another port member.
*/
if (ms->ms_flags & IEEE80211_MESHFLAGS_GATE &&
- ms->ms_flags & IEEE80211_MESHFLAGS_FWD)
+ ms->ms_flags & IEEE80211_MESHFLAGS_FWD) {
IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH,
MC01(mc)->mc_addr4, "%s",
"forward from MBSS to the DS");
+ }
}
}
return (0); /* process locally */
diff --git a/sys/net80211/ieee80211_phy.c b/sys/net80211/ieee80211_phy.c
index 13cecb6..644f52f 100644
--- a/sys/net80211/ieee80211_phy.c
+++ b/sys/net80211/ieee80211_phy.c
@@ -563,7 +563,6 @@ ieee80211_compute_duration(const struct ieee80211_rate_table *rt,
default:
panic("%s: unknown phy %u (rate %u)\n", __func__,
rt->info[rix].phy, rate);
- break;
}
return txTime;
}
diff --git a/sys/net80211/ieee80211_scan_sta.c b/sys/net80211/ieee80211_scan_sta.c
index 8d9946d..47b0573 100644
--- a/sys/net80211/ieee80211_scan_sta.c
+++ b/sys/net80211/ieee80211_scan_sta.c
@@ -129,11 +129,13 @@ static void sta_flush_table(struct sta_table *);
#define MATCH_NOTSEEN 0x00080 /* not seen in recent scans */
#define MATCH_RSSI 0x00100 /* rssi deemed too low to use */
#define MATCH_CC 0x00200 /* country code mismatch */
+#ifdef IEEE80211_SUPPORT_TDMA
#define MATCH_TDMA_NOIE 0x00400 /* no TDMA ie */
#define MATCH_TDMA_NOTMASTER 0x00800 /* not TDMA master */
#define MATCH_TDMA_NOSLOT 0x01000 /* all TDMA slots occupied */
#define MATCH_TDMA_LOCAL 0x02000 /* local address */
#define MATCH_TDMA_VERSION 0x04000 /* protocol version mismatch */
+#endif
#define MATCH_MESH_NOID 0x10000 /* no MESHID ie */
#define MATCH_MESHID 0x20000 /* meshid mismatch */
static int match_bss(struct ieee80211vap *,
@@ -1615,7 +1617,6 @@ notfound:
} else
chan = vap->iv_des_chan;
if (chan != NULL) {
- struct ieee80211com *ic = vap->iv_ic;
/*
* Create a HT capable IBSS; the per-node
* probe request/response will result in
diff --git a/sys/net80211/ieee80211_scan_sw.c b/sys/net80211/ieee80211_scan_sw.c
index f1415cb..836dd51 100644
--- a/sys/net80211/ieee80211_scan_sw.c
+++ b/sys/net80211/ieee80211_scan_sw.c
@@ -865,10 +865,10 @@ scan_done(struct ieee80211_scan_state *ss, int scandone)
*/
if (scandone) {
vap->iv_sta_ps(vap, 0);
- if (ss->ss_next >= ss->ss_last) {
- ieee80211_notify_scan_done(vap);
+ if (ss->ss_next >= ss->ss_last)
ic->ic_flags_ext &= ~IEEE80211_FEXT_BGSCAN;
- }
+
+ ieee80211_notify_scan_done(vap);
}
ss_priv->ss_iflags &= ~(ISCAN_CANCEL|ISCAN_ABORT);
ss_priv->ss_scanend = 0;
diff --git a/sys/net80211/ieee80211_sta.c b/sys/net80211/ieee80211_sta.c
index 90561b2..9100fb0 100644
--- a/sys/net80211/ieee80211_sta.c
+++ b/sys/net80211/ieee80211_sta.c
@@ -154,7 +154,6 @@ sta_beacon_miss(struct ieee80211vap *vap)
vap->iv_stats.is_beacon_miss++;
if (vap->iv_roaming == IEEE80211_ROAMING_AUTO) {
#ifdef IEEE80211_SUPPORT_SUPERG
- struct ieee80211com *ic = vap->iv_ic;
/*
* If we receive a beacon miss interrupt when using
diff --git a/sys/net80211/ieee80211_superg.c b/sys/net80211/ieee80211_superg.c
index 278ba88..5565e5e 100644
--- a/sys/net80211/ieee80211_superg.c
+++ b/sys/net80211/ieee80211_superg.c
@@ -1044,7 +1044,6 @@ superg_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
default:
return ENOSYS;
}
- return 0;
}
IEEE80211_IOCTL_SET(superg, superg_ioctl_set80211);
diff --git a/sys/netinet/tcp_input.c b/sys/netinet/tcp_input.c
index 330d305..c106996 100644
--- a/sys/netinet/tcp_input.c
+++ b/sys/netinet/tcp_input.c
@@ -185,7 +185,7 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW,
static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN");
-VNET_DEFINE(int, tcp_do_ecn) = 0;
+VNET_DEFINE(int, tcp_do_ecn) = 2;
SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(tcp_do_ecn), 0,
"TCP ECN support");
diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c
index e2490ac..fdd1cfb 100644
--- a/sys/netinet/tcp_output.c
+++ b/sys/netinet/tcp_output.c
@@ -1117,7 +1117,7 @@ send:
* resend those bits a number of times as per
* RFC 3168.
*/
- if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) {
+ if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) {
if (tp->t_rxtshift >= 1) {
if (tp->t_rxtshift <= V_tcp_ecn_maxretries)
flags |= TH_ECE|TH_CWR;
diff --git a/sys/netinet6/ip6_output.c b/sys/netinet6/ip6_output.c
index 1f780bd6..9017de7 100644
--- a/sys/netinet6/ip6_output.c
+++ b/sys/netinet6/ip6_output.c
@@ -135,6 +135,8 @@ struct ip6_exthdrs {
struct mbuf *ip6e_dest2;
};
+static MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
+
static int ip6_pcbopt(int, u_char *, int, struct ip6_pktopts **,
struct ucred *, int);
static int ip6_pcbopts(struct ip6_pktopts **, struct mbuf *,
@@ -323,12 +325,6 @@ ip6_output(struct mbuf *m0, struct ip6_pktopts *opt,
struct m_tag *fwd_tag = NULL;
uint32_t id;
- ip6 = mtod(m, struct ip6_hdr *);
- if (ip6 == NULL) {
- printf ("ip6 is NULL");
- goto bad;
- }
-
if (inp != NULL) {
M_SETFIB(m, inp->inp_inc.inc_fibnum);
if ((flags & IP_NODEFAULTFLOWID) == 0) {
@@ -410,7 +406,6 @@ ip6_output(struct mbuf *m0, struct ip6_pktopts *opt,
hdrsplit++;
}
- /* adjust pointer */
ip6 = mtod(m, struct ip6_hdr *);
/* adjust mbuf packet header length */
@@ -541,10 +536,6 @@ again:
else
ip6->ip6_hlim = V_ip6_defmcasthlim;
}
-
- /* adjust pointer */
- ip6 = mtod(m, struct ip6_hdr *);
-
/*
* Validate route against routing table additions;
* a better/more specific route might have been added.
@@ -803,6 +794,7 @@ again:
error = pfil_run_hooks(&V_inet6_pfil_hook, &m, ifp, PFIL_OUT, inp);
if (error != 0 || m == NULL)
goto done;
+ /* adjust pointer */
ip6 = mtod(m, struct ip6_hdr *);
needfiblookup = 0;
diff --git a/sys/netinet6/nd6.c b/sys/netinet6/nd6.c
index d18ebd8..f71cd6a 100644
--- a/sys/netinet6/nd6.c
+++ b/sys/netinet6/nd6.c
@@ -85,6 +85,8 @@ __FBSDID("$FreeBSD$");
#define SIN6(s) ((const struct sockaddr_in6 *)(s))
+MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
+
/* timer values */
VNET_DEFINE(int, nd6_prune) = 1; /* walk list every 1 seconds */
VNET_DEFINE(int, nd6_delay) = 5; /* delay first probe time 5 second */
diff --git a/sys/netinet6/nd6.h b/sys/netinet6/nd6.h
index 3b1aa96..0978f0d 100644
--- a/sys/netinet6/nd6.h
+++ b/sys/netinet6/nd6.h
@@ -316,6 +316,10 @@ struct nd_pfxrouter {
LIST_HEAD(nd_prhead, nd_prefix);
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_IP6NDP);
+#endif
+
/* nd6.c */
VNET_DECLARE(int, nd6_prune);
VNET_DECLARE(int, nd6_delay);
diff --git a/sys/netpfil/ipfw/ip_fw_dynamic.c b/sys/netpfil/ipfw/ip_fw_dynamic.c
index 23e950d..53df5e2 100644
--- a/sys/netpfil/ipfw/ip_fw_dynamic.c
+++ b/sys/netpfil/ipfw/ip_fw_dynamic.c
@@ -687,7 +687,7 @@ ipfw_install_state(struct ip_fw_chain *chain, struct ip_fw *rule,
ipfw_insn_limit *cmd, struct ip_fw_args *args, uint32_t tablearg)
{
ipfw_dyn_rule *q;
- int i, dir;
+ int i;
DEB(print_dyn_rule(&args->f_id, cmd->o.opcode, "install_state", "");)
@@ -695,7 +695,7 @@ ipfw_install_state(struct ip_fw_chain *chain, struct ip_fw *rule,
IPFW_BUCK_LOCK(i);
- q = lookup_dyn_rule_locked(&args->f_id, i, &dir, NULL);
+ q = lookup_dyn_rule_locked(&args->f_id, i, NULL, NULL);
if (q != NULL) { /* should never occur */
DEB(
if (last_log != time_uptime) {
@@ -816,7 +816,7 @@ ipfw_install_state(struct ip_fw_chain *chain, struct ip_fw *rule,
return (1); /* Notify caller about failure */
}
- dyn_update_proto_state(q, &args->f_id, NULL, dir);
+ dyn_update_proto_state(q, &args->f_id, NULL, MATCH_FORWARD);
IPFW_BUCK_UNLOCK(i);
return (0);
}
diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c
index a6ab515..8dd5766 100644
--- a/sys/netpfil/pf/pf.c
+++ b/sys/netpfil/pf/pf.c
@@ -5083,8 +5083,7 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
&nk->addr[pd2.didx], pd2.af) ||
nk->port[pd2.didx] != th.th_dport)
pf_change_icmp(pd2.dst, &th.th_dport,
- NULL, /* XXX Inbound NAT? */
- &nk->addr[pd2.didx],
+ saddr, &nk->addr[pd2.didx],
nk->port[pd2.didx], NULL,
pd2.ip_sum, icmpsum,
pd->ip_sum, 0, pd2.af);
@@ -5156,8 +5155,7 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
&nk->addr[pd2.didx], pd2.af) ||
nk->port[pd2.didx] != uh.uh_dport)
pf_change_icmp(pd2.dst, &uh.uh_dport,
- NULL, /* XXX Inbound NAT? */
- &nk->addr[pd2.didx],
+ saddr, &nk->addr[pd2.didx],
nk->port[pd2.didx], &uh.uh_sum,
pd2.ip_sum, icmpsum,
pd->ip_sum, 1, pd2.af);
@@ -5224,8 +5222,7 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
&nk->addr[pd2.didx], pd2.af) ||
nk->port[pd2.didx] != iih.icmp_id)
pf_change_icmp(pd2.dst, &iih.icmp_id,
- NULL, /* XXX Inbound NAT? */
- &nk->addr[pd2.didx],
+ saddr, &nk->addr[pd2.didx],
nk->port[pd2.didx], NULL,
pd2.ip_sum, icmpsum,
pd->ip_sum, 0, AF_INET);
@@ -5277,8 +5274,7 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
&nk->addr[pd2.didx], pd2.af) ||
nk->port[pd2.didx] != iih.icmp6_id)
pf_change_icmp(pd2.dst, &iih.icmp6_id,
- NULL, /* XXX Inbound NAT? */
- &nk->addr[pd2.didx],
+ saddr, &nk->addr[pd2.didx],
nk->port[pd2.didx], NULL,
pd2.ip_sum, icmpsum,
pd->ip_sum, 0, AF_INET6);
@@ -5317,8 +5313,7 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
if (PF_ANEQ(pd2.dst,
&nk->addr[pd2.didx], pd2.af))
- pf_change_icmp(pd2.src, NULL,
- NULL, /* XXX Inbound NAT? */
+ pf_change_icmp(pd2.dst, NULL, saddr,
&nk->addr[pd2.didx], 0, NULL,
pd2.ip_sum, icmpsum,
pd->ip_sum, 0, pd2.af);
diff --git a/sys/netpfil/pf/pf_norm.c b/sys/netpfil/pf/pf_norm.c
index a2841a2..dbc8818 100644
--- a/sys/netpfil/pf/pf_norm.c
+++ b/sys/netpfil/pf/pf_norm.c
@@ -374,7 +374,7 @@ pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
}
*(struct pf_fragment_cmp *)frag = *key;
- frag->fr_timeout = time_second;
+ frag->fr_timeout = time_uptime;
frag->fr_maxlen = frent->fe_len;
TAILQ_INIT(&frag->fr_queue);
diff --git a/sys/powerpc/powermac/powermac_thermal.h b/sys/powerpc/powermac/powermac_thermal.h
index 424c612..7231066 100644
--- a/sys/powerpc/powermac/powermac_thermal.h
+++ b/sys/powerpc/powermac/powermac_thermal.h
@@ -29,7 +29,7 @@
#ifndef _POWERPC_POWERMAC_POWERMAC_THERMAL_H
#define _POWERPC_POWERMAC_POWERMAC_THERMAL_H
-#define ZERO_C_TO_K 2732
+#define ZERO_C_TO_K 2731
struct pmac_fan {
int min_rpm, max_rpm, default_rpm;
diff --git a/sys/powerpc/powermac/smu.c b/sys/powerpc/powermac/smu.c
index cbc0e35..54f30ad 100644
--- a/sys/powerpc/powermac/smu.c
+++ b/sys/powerpc/powermac/smu.c
@@ -1110,7 +1110,7 @@ smu_sensor_read(struct smu_sensor *sens)
value <<= 1;
/* Convert from 16.16 fixed point degC into integer 0.1 K. */
- value = 10*(value >> 16) + ((10*(value & 0xffff)) >> 16) + 2732;
+ value = 10*(value >> 16) + ((10*(value & 0xffff)) >> 16) + 2731;
break;
case SMU_VOLTAGE_SENSOR:
value *= sc->sc_cpu_volt_scale;
@@ -1245,8 +1245,8 @@ smu_attach_sensors(device_t dev, phandle_t sensroot)
if (sens->type == SMU_TEMP_SENSOR) {
/* Make up some numbers */
- sens->therm.target_temp = 500 + 2732; /* 50 C */
- sens->therm.max_temp = 900 + 2732; /* 90 C */
+ sens->therm.target_temp = 500 + 2731; /* 50 C */
+ sens->therm.max_temp = 900 + 2731; /* 90 C */
sens->therm.read =
(int (*)(struct pmac_therm *))smu_sensor_read;
diff --git a/sys/powerpc/powermac/smusat.c b/sys/powerpc/powermac/smusat.c
index 2e37ae4..f8556ee 100644
--- a/sys/powerpc/powermac/smusat.c
+++ b/sys/powerpc/powermac/smusat.c
@@ -184,8 +184,8 @@ smusat_attach(device_t dev)
if (sens->type == SMU_TEMP_SENSOR) {
/* Make up some numbers */
- sens->therm.target_temp = 500 + 2732; /* 50 C */
- sens->therm.max_temp = 900 + 2732; /* 90 C */
+ sens->therm.target_temp = 500 + 2731; /* 50 C */
+ sens->therm.max_temp = 900 + 2731; /* 90 C */
sens->therm.read =
(int (*)(struct pmac_therm *))smusat_sensor_read;
pmac_thermal_sensor_register(&sens->therm);
@@ -248,7 +248,7 @@ smusat_sensor_read(struct smu_sensor *sens)
/* 16.16 */
value <<= 10;
/* From 16.16 to 0.1 C */
- value = 10*(value >> 16) + ((10*(value & 0xffff)) >> 16) + 2732;
+ value = 10*(value >> 16) + ((10*(value & 0xffff)) >> 16) + 2731;
break;
case SMU_VOLTAGE_SENSOR:
/* 16.16 */
diff --git a/sys/powerpc/powerpc/exec_machdep.c b/sys/powerpc/powerpc/exec_machdep.c
index c78899d..fc88f9d 100644
--- a/sys/powerpc/powerpc/exec_machdep.c
+++ b/sys/powerpc/powerpc/exec_machdep.c
@@ -908,11 +908,7 @@ cpu_set_syscall_retval(struct thread *td, int error)
tf->srr0 -= 4;
break;
default:
- if (p->p_sysent->sv_errsize) {
- error = (error < p->p_sysent->sv_errsize) ?
- p->p_sysent->sv_errtbl[error] : -1;
- }
- tf->fixreg[FIRSTARG] = error;
+ tf->fixreg[FIRSTARG] = SV_ABI_ERRNO(p, error);
tf->cr |= 0x10000000; /* Set summary overflow */
break;
}
diff --git a/sys/sparc64/include/vm.h b/sys/sparc64/include/vm.h
index 7ec2d9e..2257e72 100644
--- a/sys/sparc64/include/vm.h
+++ b/sys/sparc64/include/vm.h
@@ -31,5 +31,6 @@
/* Memory attribute configuration is not (yet) implemented. */
#define VM_MEMATTR_DEFAULT 0
+#define VM_MEMATTR_UNCACHEABLE 0
#endif /* !_MACHINE_VM_H_ */
diff --git a/sys/sparc64/sparc64/vm_machdep.c b/sys/sparc64/sparc64/vm_machdep.c
index df8be0b..5807f29 100644
--- a/sys/sparc64/sparc64/vm_machdep.c
+++ b/sys/sparc64/sparc64/vm_machdep.c
@@ -167,13 +167,7 @@ cpu_set_syscall_retval(struct thread *td, int error)
break;
default:
- if (td->td_proc->p_sysent->sv_errsize) {
- if (error >= td->td_proc->p_sysent->sv_errsize)
- error = -1; /* XXX */
- else
- error = td->td_proc->p_sysent->sv_errtbl[error];
- }
- td->td_frame->tf_out[0] = error;
+ td->td_frame->tf_out[0] = SV_ABI_ERRNO(td->td_proc, error);
td->td_frame->tf_tstate |= TSTATE_XCC_C;
break;
}
diff --git a/sys/sys/_task.h b/sys/sys/_task.h
index 4cfa171..ce89781 100644
--- a/sys/sys/_task.h
+++ b/sys/sys/_task.h
@@ -45,8 +45,7 @@ typedef void task_fn_t(void *context, int pending);
struct task {
STAILQ_ENTRY(task) ta_link; /* (q) link for queue */
- uint8_t ta_pending; /* (q) count times queued */
- uint8_t ta_flags; /* (q) flags */
+ uint16_t ta_pending; /* (q) count times queued */
u_short ta_priority; /* (c) Priority */
task_fn_t *ta_func; /* (c) task handler */
void *ta_context; /* (c) argument for handler */
diff --git a/sys/sys/ata.h b/sys/sys/ata.h
index 672ee41..7210414 100644
--- a/sys/sys/ata.h
+++ b/sys/sys/ata.h
@@ -105,6 +105,10 @@ struct ata_params {
/*069*/ u_int16_t support3;
#define ATA_SUPPORT_RZAT 0x0020
#define ATA_SUPPORT_DRAT 0x4000
+#define ATA_SUPPORT_ZONE_MASK 0x0003
+#define ATA_SUPPORT_ZONE_NR 0x0000
+#define ATA_SUPPORT_ZONE_HOST_AWARE 0x0001
+#define ATA_SUPPORT_ZONE_DEV_MANAGED 0x0002
u_int16_t reserved70;
/*071*/ u_int16_t rlsovlap; /* rel time (us) for overlap */
/*072*/ u_int16_t rlsservice; /* rel time (us) for service */
@@ -228,7 +232,14 @@ struct ata_params {
#define ATA_SUPPORT_RWLOGDMAEXT 0x0008
#define ATA_SUPPORT_MICROCODE3 0x0010
#define ATA_SUPPORT_FREEFALL 0x0020
+#define ATA_SUPPORT_SENSE_REPORT 0x0040
+#define ATA_SUPPORT_EPC 0x0080
/*120*/ u_int16_t enabled2;
+#define ATA_ENABLED_WRITEREADVERIFY 0x0002
+#define ATA_ENABLED_WRITEUNCORREXT 0x0004
+#define ATA_ENABLED_FREEFALL 0x0020
+#define ATA_ENABLED_SENSE_REPORT 0x0040
+#define ATA_ENABLED_EPC 0x0080
u_int16_t reserved121[6];
/*127*/ u_int16_t removable_status;
/*128*/ u_int16_t security_status;
@@ -298,8 +309,14 @@ struct ata_params {
#define ATA_MAX_28BIT_LBA 268435455UL
/* ATA Status Register */
-#define ATA_STATUS_ERROR 0x01
-#define ATA_STATUS_DEVICE_FAULT 0x20
+#define ATA_STATUS_ERROR 0x01
+#define ATA_STATUS_SENSE_AVAIL 0x02
+#define ATA_STATUS_ALIGN_ERR 0x04
+#define ATA_STATUS_DATA_REQ 0x08
+#define ATA_STATUS_DEF_WRITE_ERR 0x10
+#define ATA_STATUS_DEVICE_FAULT 0x20
+#define ATA_STATUS_DEVICE_READY 0x40
+#define ATA_STATUS_BUSY 0x80
/* ATA Error Register */
#define ATA_ERROR_ABORT 0x04
@@ -372,17 +389,32 @@ struct ata_params {
#define ATA_WU_PSEUDO 0x55 /* pseudo-uncorrectable error */
#define ATA_WU_FLAGGED 0xaa /* flagged-uncorrectable error */
#define ATA_READ_LOG_DMA_EXT 0x47 /* read log DMA ext - PIO Data-In */
+#define ATA_ZAC_MANAGEMENT_IN 0x4a /* ZAC management in */
+#define ATA_ZM_REPORT_ZONES 0x00 /* report zones */
#define ATA_READ_FPDMA_QUEUED 0x60 /* read DMA NCQ */
#define ATA_WRITE_FPDMA_QUEUED 0x61 /* write DMA NCQ */
#define ATA_NCQ_NON_DATA 0x63 /* NCQ non-data command */
+#define ATA_ABORT_NCQ_QUEUE 0x00 /* abort NCQ queue */
+#define ATA_DEADLINE_HANDLING 0x01 /* deadline handling */
+#define ATA_SET_FEATURES 0x05 /* set features */
+#define ATA_ZERO_EXT 0x06 /* zero ext */
+#define ATA_NCQ_ZAC_MGMT_OUT 0x07 /* NCQ ZAC mgmt out no data */
#define ATA_SEND_FPDMA_QUEUED 0x64 /* send DMA NCQ */
#define ATA_SFPDMA_DSM 0x00 /* Data set management */
#define ATA_SFPDMA_DSM_TRIM 0x01 /* Set trim bit in auxiliary */
#define ATA_SFPDMA_HYBRID_EVICT 0x01 /* Hybrid Evict */
#define ATA_SFPDMA_WLDMA 0x02 /* Write Log DMA EXT */
-#define ATA_RECV_FPDMA_QUEUED 0x65 /* receive DMA NCQ */
+#define ATA_SFPDMA_ZAC_MGMT_OUT 0x03 /* NCQ ZAC mgmt out w/data */
+#define ATA_RECV_FPDMA_QUEUED 0x65 /* receive DMA NCQ */
+#define ATA_RFPDMA_RL_DMA_EXT 0x00 /* Read Log DMA EXT */
+#define ATA_RFPDMA_ZAC_MGMT_IN 0x02 /* NCQ ZAC mgmt in w/data */
#define ATA_SEP_ATTN 0x67 /* SEP request */
#define ATA_SEEK 0x70 /* seek */
+#define ATA_ZAC_MANAGEMENT_OUT 0x9f /* ZAC management out */
+#define ATA_ZM_CLOSE_ZONE 0x01 /* close zone */
+#define ATA_ZM_FINISH_ZONE 0x02 /* finish zone */
+#define ATA_ZM_OPEN_ZONE 0x03 /* open zone */
+#define ATA_ZM_RWP 0x04 /* reset write pointer */
#define ATA_PACKET_CMD 0xa0 /* packet command */
#define ATA_ATAPI_IDENTIFY 0xa1 /* get ATAPI params*/
#define ATA_SERVICE 0xa2 /* service command */
@@ -409,18 +441,29 @@ struct ata_params {
#define ATA_FLUSHCACHE48 0xea /* flush cache to disk */
#define ATA_ATA_IDENTIFY 0xec /* get ATA params */
#define ATA_SETFEATURES 0xef /* features command */
-#define ATA_SF_SETXFER 0x03 /* set transfer mode */
#define ATA_SF_ENAB_WCACHE 0x02 /* enable write cache */
#define ATA_SF_DIS_WCACHE 0x82 /* disable write cache */
+#define ATA_SF_SETXFER 0x03 /* set transfer mode */
+#define ATA_SF_APM 0x05 /* Enable APM feature set */
#define ATA_SF_ENAB_PUIS 0x06 /* enable PUIS */
#define ATA_SF_DIS_PUIS 0x86 /* disable PUIS */
#define ATA_SF_PUIS_SPINUP 0x07 /* PUIS spin-up */
+#define ATA_SF_WRV 0x0b /* Enable Write-Read-Verify */
+#define ATA_SF_DLC 0x0c /* Enable device life control */
+#define ATA_SF_SATA 0x10 /* Enable use of SATA feature */
+#define ATA_SF_FFC 0x41 /* Free-fall Control */
+#define ATA_SF_MHIST 0x43 /* Set Max Host Sect. Times */
+#define ATA_SF_RATE 0x45 /* Set Rate Basis */
+#define ATA_SF_EPC 0x4A /* Extended Power Conditions */
#define ATA_SF_ENAB_RCACHE 0xaa /* enable readahead cache */
#define ATA_SF_DIS_RCACHE 0x55 /* disable readahead cache */
#define ATA_SF_ENAB_RELIRQ 0x5d /* enable release interrupt */
#define ATA_SF_DIS_RELIRQ 0xdd /* disable release interrupt */
#define ATA_SF_ENAB_SRVIRQ 0x5e /* enable service interrupt */
#define ATA_SF_DIS_SRVIRQ 0xde /* disable service interrupt */
+#define ATA_SF_LPSAERC 0x62 /* Long Phys Sect Align ErrRep*/
+#define ATA_SF_DSN 0x63 /* Device Stats Notification */
+#define ATA_CHECK_POWER_MODE 0xe5 /* Check Power Mode */
#define ATA_SECURITY_SET_PASSWORD 0xf1 /* set drive password */
#define ATA_SECURITY_UNLOCK 0xf2 /* unlock drive using passwd */
#define ATA_SECURITY_ERASE_PREPARE 0xf3 /* prepare to erase drive */
@@ -547,6 +590,333 @@ struct atapi_sense {
u_int8_t specific2; /* sense key specific */
} __packed;
+/*
+ * SET FEATURES subcommands
+ */
+
+/*
+ * SET FEATURES command
+ * Extended Power Conditions subcommand -- ATA_SF_EPC (0x4A)
+ * These values go in the LBA 3:0.
+ */
+#define ATA_SF_EPC_RESTORE 0x00 /* Restore Power Condition Settings */
+#define ATA_SF_EPC_GOTO 0x01 /* Go To Power Condition */
+#define ATA_SF_EPC_SET_TIMER 0x02 /* Set Power Condition Timer */
+#define ATA_SF_EPC_SET_STATE 0x03 /* Set Power Condition State */
+#define ATA_SF_EPC_ENABLE 0x04 /* Enable the EPC feature set */
+#define ATA_SF_EPC_DISABLE 0x05 /* Disable the EPC feature set */
+#define ATA_SF_EPC_SET_SOURCE 0x06 /* Set EPC Power Source */
+
+/*
+ * SET FEATURES command
+ * Extended Power Conditions subcommand -- ATA_SF_EPC (0x4A)
+ * Power Condition ID field
+ * These values go in the count register.
+ */
+#define ATA_EPC_STANDBY_Z 0x00 /* Substate of PM2:Standby */
+#define ATA_EPC_STANDBY_Y 0x01 /* Substate of PM2:Standby */
+#define ATA_EPC_IDLE_A 0x81 /* Substate of PM1:Idle */
+#define ATA_EPC_IDLE_B 0x82 /* Substate of PM1:Idle */
+#define ATA_EPC_IDLE_C 0x83 /* Substate of PM1:Idle */
+#define ATA_EPC_ALL 0xff /* All supported power conditions */
+
+/*
+ * SET FEATURES command
+ * Extended Power Conditions subcommand -- ATA_SF_EPC (0x4A)
+ * Restore Power Conditions Settings subcommand
+ * These values go in the LBA register.
+ */
+#define ATA_SF_EPC_RST_DFLT 0x40 /* 1=Rst from Default, 0= from Saved */
+#define ATA_SF_EPC_RST_SAVE 0x10 /* 1=Save on completion */
+
+/*
+ * SET FEATURES command
+ * Extended Power Conditions subcommand -- ATA_SF_EPC (0x4A)
+ * Got To Power Condition subcommand
+ * These values go in the LBA register.
+ */
+#define ATA_SF_EPC_GOTO_DELAY 0x02000000 /* Delayed entry bit */
+#define ATA_SF_EPC_GOTO_HOLD 0x01000000 /* Hold Power Cond bit */
+
+/*
+ * SET FEATURES command
+ * Extended Power Conditions subcommand -- ATA_SF_EPC (0x4A)
+ * Set Power Condition Timer subcommand
+ * These values go in the LBA register.
+ */
+#define ATA_SF_EPC_TIMER_MASK 0x00ffff00 /* Timer field */
+#define ATA_SF_EPC_TIMER_SHIFT 8
+#define ATA_SF_EPC_TIMER_SEC 0x00000080 /* Timer units, 1=sec, 0=.1s */
+#define ATA_SF_EPC_TIMER_EN 0x00000020 /* Enable/disable cond. */
+#define ATA_SF_EPC_TIMER_SAVE 0x00000010 /* Save settings on comp. */
+
+/*
+ * SET FEATURES command
+ * Extended Power Conditions subcommand -- ATA_SF_EPC (0x4A)
+ * Set Power Condition State subcommand
+ * These values go in the LBA register.
+ */
+#define ATA_SF_EPC_SETCON_EN 0x00000020 /* Enable power cond. */
+#define ATA_SF_EPC_SETCON_SAVE 0x00000010 /* Save settings on comp */
+
+/*
+ * SET FEATURES command
+ * Extended Power Conditions subcommand -- ATA_SF_EPC (0x4A)
+ * Set EPC Power Source subcommand
+ * These values go in the count register.
+ */
+#define ATA_SF_EPC_SRC_UNKNOWN 0x0000 /* Unknown source */
+#define ATA_SF_EPC_SRC_BAT 0x0001 /* battery source */
+#define ATA_SF_EPC_SRC_NOT_BAT 0x0002 /* not battery source */
+
+#define ATA_LOG_DIRECTORY 0x00 /* Directory of all logs */
+#define ATA_POWER_COND_LOG 0x08 /* Power Conditions Log */
+#define ATA_PCL_IDLE 0x00 /* Idle Power Conditions Page */
+#define ATA_PCL_STANDBY 0x01 /* Standby Power Conditions Page */
+#define ATA_IDENTIFY_DATA_LOG 0x30 /* Identify Device Data Log */
+#define ATA_IDL_PAGE_LIST 0x00 /* List of supported pages */
+#define ATA_IDL_IDENTIFY_DATA 0x01 /* Copy of Identify Device data */
+#define ATA_IDL_CAPACITY 0x02 /* Capacity */
+#define ATA_IDL_SUP_CAP 0x03 /* Supported Capabilities */
+#define ATA_IDL_CUR_SETTINGS 0x04 /* Current Settings */
+#define ATA_IDL_ATA_STRINGS 0x05 /* ATA Strings */
+#define ATA_IDL_SECURITY 0x06 /* Security */
+#define ATA_IDL_PARALLEL_ATA 0x07 /* Parallel ATA */
+#define ATA_IDL_SERIAL_ATA 0x08 /* Seiral ATA */
+#define ATA_IDL_ZDI 0x09 /* Zoned Device Information */
+
+struct ata_gp_log_dir {
+ uint8_t header[2];
+#define ATA_GP_LOG_DIR_VERSION 0x0001
+ uint8_t num_pages[255*2]; /* Number of log pages at address */
+};
+
+/*
+ * ATA Power Conditions log descriptor
+ */
+struct ata_power_cond_log_desc {
+ uint8_t reserved1;
+ uint8_t flags;
+#define ATA_PCL_COND_SUPPORTED 0x80
+#define ATA_PCL_COND_SAVEABLE 0x40
+#define ATA_PCL_COND_CHANGEABLE 0x20
+#define ATA_PCL_DEFAULT_TIMER_EN 0x10
+#define ATA_PCL_SAVED_TIMER_EN 0x08
+#define ATA_PCL_CURRENT_TIMER_EN 0x04
+#define ATA_PCL_HOLD_PC_NOT_SUP 0x02
+ uint8_t reserved2[2];
+ uint8_t default_timer[4];
+ uint8_t saved_timer[4];
+ uint8_t current_timer[4];
+ uint8_t nom_time_to_active[4];
+ uint8_t min_timer[4];
+ uint8_t max_timer[4];
+ uint8_t num_transitions_to_pc[4];
+ uint8_t hours_in_pc[4];
+ uint8_t reserved3[28];
+};
+
+/*
+ * ATA Power Conditions Log (0x08), Idle power conditions page (0x00)
+ */
+struct ata_power_cond_log_idle {
+ struct ata_power_cond_log_desc idle_a_desc;
+ struct ata_power_cond_log_desc idle_b_desc;
+ struct ata_power_cond_log_desc idle_c_desc;
+ uint8_t reserved[320];
+};
+
+/*
+ * ATA Power Conditions Log (0x08), Standby power conditions page (0x01)
+ */
+struct ata_power_cond_log_standby {
+ uint8_t reserved[384];
+ struct ata_power_cond_log_desc standby_y_desc;
+ struct ata_power_cond_log_desc standby_z_desc;
+};
+
+/*
+ * ATA IDENTIFY DEVICE data log (0x30) page 0x00
+ * List of Supported IDENTIFY DEVICE data pages.
+ */
+struct ata_identify_log_pages {
+ uint8_t header[8];
+#define ATA_IDLOG_REVISION 0x0000000000000001
+ uint8_t entry_count;
+ uint8_t entries[503];
+};
+
+/*
+ * ATA IDENTIFY DEVICE data log (0x30)
+ * Capacity (Page 0x02).
+ */
+struct ata_identify_log_capacity {
+ uint8_t header[8];
+#define ATA_CAP_HEADER_VALID 0x8000000000000000
+#define ATA_CAP_PAGE_NUM_MASK 0x0000000000ff0000
+#define ATA_CAP_PAGE_NUM_SHIFT 16
+#define ATA_CAP_REV_MASK 0x00000000000000ff
+ uint8_t capacity[8];
+#define ATA_CAP_CAPACITY_VALID 0x8000000000000000
+#define ATA_CAP_ACCESSIBLE_CAP 0x0000ffffffffffff
+ uint8_t phys_logical_sect_size[8];
+#define ATA_CAP_PL_VALID 0x8000000000000000
+#define ATA_CAP_LTOP_REL_SUP 0x4000000000000000
+#define ATA_CAP_LOG_SECT_SUP 0x2000000000000000
+#define ATA_CAP_ALIGN_ERR_MASK 0x0000000000300000
+#define ATA_CAP_LTOP_MASK 0x00000000000f0000
+#define ATA_CAP_LOG_SECT_OFF 0x000000000000ffff
+ uint8_t logical_sect_size[8];
+#define ATA_CAP_LOG_SECT_VALID 0x8000000000000000
+#define ATA_CAP_LOG_SECT_SIZE 0x00000000ffffffff
+ uint8_t nominal_buffer_size[8];
+#define ATA_CAP_NOM_BUF_VALID 0x8000000000000000
+#define ATA_CAP_NOM_BUF_SIZE 0x7fffffffffffffff
+ uint8_t reserved[472];
+};
+
+/*
+ * ATA IDENTIFY DEVICE data log (0x30)
+ * Supported Capabilities (Page 0x03).
+ */
+
+struct ata_identify_log_sup_cap {
+ uint8_t header[8];
+#define ATA_SUP_CAP_HEADER_VALID 0x8000000000000000
+#define ATA_SUP_CAP_PAGE_NUM_MASK 0x0000000000ff0000
+#define ATA_SUP_CAP_PAGE_NUM_SHIFT 16
+#define ATA_SUP_CAP_REV_MASK 0x00000000000000ff
+ uint8_t sup_cap[8];
+#define ATA_SUP_CAP_VALID 0x8000000000000000
+#define ATA_SC_SET_SECT_CONFIG_SUP 0x0002000000000000 /* Set Sect Conf*/
+#define ATA_SC_ZERO_EXT_SUP 0x0001000000000000 /* Zero EXT */
+#define ATA_SC_SUCC_NCQ_SENSE_SUP 0x0000800000000000 /* Succ. NCQ Sns */
+#define ATA_SC_DLC_SUP 0x0000400000000000 /* DLC */
+#define ATA_SC_RQSN_DEV_FAULT_SUP 0x0000200000000000 /* Req Sns Dev Flt*/
+#define ATA_SC_DSN_SUP 0x0000100000000000 /* DSN */
+#define ATA_SC_LP_STANDBY_SUP 0x0000080000000000 /* LP Standby */
+#define ATA_SC_SET_EPC_PS_SUP 0x0000040000000000 /* Set EPC PS */
+#define ATA_SC_AMAX_ADDR_SUP 0x0000020000000000 /* AMAX Addr */
+#define ATA_SC_DRAT_SUP 0x0000008000000000 /* DRAT */
+#define ATA_SC_LPS_MISALGN_SUP 0x0000004000000000 /* LPS Misalign */
+#define ATA_SC_RB_DMA_SUP 0x0000001000000000 /* Read Buf DMA */
+#define ATA_SC_WB_DMA_SUP 0x0000000800000000 /* Write Buf DMA */
+#define ATA_SC_DNLD_MC_DMA_SUP 0x0000000200000000 /* DL MCode DMA */
+#define ATA_SC_28BIT_SUP 0x0000000100000000 /* 28-bit */
+#define ATA_SC_RZAT_SUP 0x0000000080000000 /* RZAT */
+#define ATA_SC_NOP_SUP 0x0000000020000000 /* NOP */
+#define ATA_SC_READ_BUFFER_SUP 0x0000000010000000 /* Read Buffer */
+#define ATA_SC_WRITE_BUFFER_SUP 0x0000000008000000 /* Write Buffer */
+#define ATA_SC_READ_LOOK_AHEAD_SUP 0x0000000002000000 /* Read Look-Ahead*/
+#define ATA_SC_VOLATILE_WC_SUP 0x0000000001000000 /* Volatile WC */
+#define ATA_SC_SMART_SUP 0x0000000000800000 /* SMART */
+#define ATA_SC_FLUSH_CACHE_EXT_SUP 0x0000000000400000 /* Flush Cache Ext */
+#define ATA_SC_48BIT_SUP 0x0000000000100000 /* 48-Bit */
+#define ATA_SC_SPINUP_SUP 0x0000000000040000 /* Spin-Up */
+#define ATA_SC_PUIS_SUP 0x0000000000020000 /* PUIS */
+#define ATA_SC_APM_SUP 0x0000000000010000 /* APM */
+#define ATA_SC_DL_MICROCODE_SUP 0x0000000000004000 /* DL Microcode */
+#define ATA_SC_UNLOAD_SUP 0x0000000000002000 /* Unload */
+#define ATA_SC_WRITE_FUA_EXT_SUP 0x0000000000001000 /* Write FUA EXT */
+#define ATA_SC_GPL_SUP 0x0000000000000800 /* GPL */
+#define ATA_SC_STREAMING_SUP 0x0000000000000400 /* Streaming */
+#define ATA_SC_SMART_SELFTEST_SUP 0x0000000000000100 /* SMART self-test */
+#define ATA_SC_SMART_ERR_LOG_SUP 0x0000000000000080 /* SMART Err Log */
+#define ATA_SC_EPC_SUP 0x0000000000000040 /* EPC */
+#define ATA_SC_SENSE_SUP 0x0000000000000020 /* Sense data */
+#define ATA_SC_FREEFALL_SUP 0x0000000000000010 /* Free-Fall */
+#define ATA_SC_DM_MODE3_SUP 0x0000000000000008 /* DM Mode 3 */
+#define ATA_SC_GPL_DMA_SUP 0x0000000000000004 /* GPL DMA */
+#define ATA_SC_WRITE_UNCOR_SUP 0x0000000000000002 /* Write uncorr. */
+#define ATA_SC_WRV_SUP 0x0000000000000001 /* WRV */
+ uint8_t download_code_cap[8];
+#define ATA_DL_CODE_VALID 0x8000000000000000
+#define ATA_DLC_DM_OFFSETS_DEFER_SUP 0x0000000400000000
+#define ATA_DLC_DM_IMMED_SUP 0x0000000200000000
+#define ATA_DLC_DM_OFF_IMMED_SUP 0x0000000100000000
+#define ATA_DLC_DM_MAX_XFER_SIZE_MASK 0x00000000ffff0000
+#define ATA_DLC_DM_MAX_XFER_SIZE_SHIFT 16
+#define ATA_DLC_DM_MIN_XFER_SIZE_MASK 0x000000000000ffff
+ uint8_t nom_media_rotation_rate[8];
+#define ATA_NOM_MEDIA_ROTATION_VALID 0x8000000000000000
+#define ATA_ROTATION_MASK 0x000000000000ffff
+ uint8_t form_factor[8];
+#define ATA_FORM_FACTOR_VALID 0x8000000000000000
+#define ATA_FF_MASK 0x000000000000000f
+#define ATA_FF_NOT_REPORTED 0x0000000000000000 /* Not reported */
+#define ATA_FF_525_IN 0x0000000000000001 /* 5.25 inch */
+#define ATA_FF_35_IN 0x0000000000000002 /* 3.5 inch */
+#define ATA_FF_25_IN 0x0000000000000003 /* 2.5 inch */
+#define ATA_FF_18_IN 0x0000000000000004 /* 1.8 inch */
+#define ATA_FF_LT_18_IN 0x0000000000000005 /* < 1.8 inch */
+#define ATA_FF_MSATA 0x0000000000000006 /* mSATA */
+#define ATA_FF_M2 0x0000000000000007 /* M.2 */
+#define ATA_FF_MICROSSD 0x0000000000000008 /* MicroSSD */
+#define ATA_FF_CFAST 0x0000000000000009 /* CFast */
+ uint8_t wrv_sec_cnt_mode3[8];
+#define ATA_WRV_MODE3_VALID 0x8000000000000000
+#define ATA_WRV_MODE3_COUNT 0x00000000ffffffff
+ uint8_t wrv_sec_cnt_mode2[8];
+#define ATA_WRV_MODE2_VALID 0x8000000000000000
+#define ATA_WRV_MODE2_COUNT 0x00000000ffffffff
+ uint8_t wwn[16];
+ /* XXX KDM need to figure out how to handle 128-bit fields */
+ uint8_t dsm[8];
+#define ATA_DSM_VALID 0x8000000000000000
+#define ATA_LB_MARKUP_SUP 0x000000000000ff00
+#define ATA_TRIM_SUP 0x0000000000000001
+ uint8_t util_per_unit_time[16];
+ /* XXX KDM need to figure out how to handle 128-bit fields */
+ uint8_t util_usage_rate_sup[8];
+#define ATA_UTIL_USAGE_RATE_VALID 0x8000000000000000
+#define ATA_SETTING_RATE_SUP 0x0000000000800000
+#define ATA_SINCE_POWERON_SUP 0x0000000000000100
+#define ATA_POH_RATE_SUP 0x0000000000000010
+#define ATA_DATE_TIME_RATE_SUP 0x0000000000000001
+ uint8_t zoned_cap[8];
+#define ATA_ZONED_VALID 0x8000000000000000
+#define ATA_ZONED_MASK 0x0000000000000003
+ uint8_t sup_zac_cap[8];
+#define ATA_SUP_ZAC_CAP_VALID 0x8000000000000000
+#define ATA_ND_RWP_SUP 0x0000000000000010 /* Reset Write Ptr*/
+#define ATA_ND_FINISH_ZONE_SUP 0x0000000000000008 /* Finish Zone */
+#define ATA_ND_CLOSE_ZONE_SUP 0x0000000000000004 /* Close Zone */
+#define ATA_ND_OPEN_ZONE_SUP 0x0000000000000002 /* Open Zone */
+#define ATA_REPORT_ZONES_SUP 0x0000000000000001 /* Report Zones */
+ uint8_t reserved[392];
+};
+
+/*
+ * ATA Identify Device Data Log Zoned Device Information Page (0x09).
+ * Current as of ZAC r04a, August 25, 2015.
+ */
+struct ata_zoned_info_log {
+ uint8_t header[8];
+#define ATA_ZDI_HEADER_VALID 0x8000000000000000
+#define ATA_ZDI_PAGE_NUM_MASK 0x0000000000ff0000
+#define ATA_ZDI_PAGE_NUM_SHIFT 16
+#define ATA_ZDI_REV_MASK 0x00000000000000ff
+ uint8_t zoned_cap[8];
+#define ATA_ZDI_CAP_VALID 0x8000000000000000
+#define ATA_ZDI_CAP_URSWRZ 0x0000000000000001
+ uint8_t zoned_settings[8];
+#define ATA_ZDI_SETTINGS_VALID 0x8000000000000000
+ uint8_t optimal_seq_zones[8];
+#define ATA_ZDI_OPT_SEQ_VALID 0x8000000000000000
+#define ATA_ZDI_OPT_SEQ_MASK 0x00000000ffffffff
+ uint8_t optimal_nonseq_zones[8];
+#define ATA_ZDI_OPT_NS_VALID 0x8000000000000000
+#define ATA_ZDI_OPT_NS_MASK 0x00000000ffffffff
+ uint8_t max_seq_req_zones[8];
+#define ATA_ZDI_MAX_SEQ_VALID 0x8000000000000000
+#define ATA_ZDI_MAX_SEQ_MASK 0x00000000ffffffff
+ uint8_t version_info[8];
+#define ATA_ZDI_VER_VALID 0x8000000000000000
+#define ATA_ZDI_VER_ZAC_SUP 0x0100000000000000
+#define ATA_ZDI_VER_ZAC_MASK 0x00000000000000ff
+ uint8_t reserved[456];
+};
+
struct ata_ioc_request {
union {
struct {
diff --git a/sys/sys/bio.h b/sys/sys/bio.h
index 37fefbf..01149dd 100644
--- a/sys/sys/bio.h
+++ b/sys/sys/bio.h
@@ -39,16 +39,18 @@
#define _SYS_BIO_H_
#include <sys/queue.h>
+#include <sys/disk_zone.h>
/* bio_cmd */
#define BIO_READ 0x01 /* Read I/O data */
#define BIO_WRITE 0x02 /* Write I/O data */
-#define BIO_DELETE 0x04 /* TRIM or free blocks, i.e. mark as unused */
-#define BIO_GETATTR 0x08 /* Get GEOM attributes of object */
-#define BIO_FLUSH 0x10 /* Commit outstanding I/O now */
-#define BIO_CMD0 0x20 /* Available for local hacks */
-#define BIO_CMD1 0x40 /* Available for local hacks */
-#define BIO_CMD2 0x80 /* Available for local hacks */
+#define BIO_DELETE 0x03 /* TRIM or free blocks, i.e. mark as unused */
+#define BIO_GETATTR 0x04 /* Get GEOM attributes of object */
+#define BIO_FLUSH 0x05 /* Commit outstanding I/O now */
+#define BIO_CMD0 0x06 /* Available for local hacks */
+#define BIO_CMD1 0x07 /* Available for local hacks */
+#define BIO_CMD2 0x08 /* Available for local hacks */
+#define BIO_ZONE 0x09 /* Zone command */
/* bio_flags */
#define BIO_ERROR 0x01 /* An error occurred processing this bio. */
@@ -98,6 +100,7 @@ struct bio {
void *bio_caller2; /* Private use by the consumer. */
TAILQ_ENTRY(bio) bio_queue; /* Disksort queue. */
const char *bio_attribute; /* Attribute for BIO_[GS]ETATTR */
+ struct disk_zone_args bio_zone;/* Used for BIO_ZONE */
struct g_consumer *bio_from; /* GEOM linkage */
struct g_provider *bio_to; /* GEOM linkage */
off_t bio_length; /* Like bio_bcount */
diff --git a/sys/sys/bus.h b/sys/sys/bus.h
index 9b989af..4e7d375 100644
--- a/sys/sys/bus.h
+++ b/sys/sys/bus.h
@@ -294,6 +294,31 @@ struct driver {
KOBJ_CLASS_FIELDS;
};
+/**
+ * @brief A resource mapping.
+ */
+struct resource_map {
+ bus_space_tag_t r_bustag;
+ bus_space_handle_t r_bushandle;
+ bus_size_t r_size;
+ void *r_vaddr;
+};
+
+/**
+ * @brief Optional properties of a resource mapping request.
+ */
+struct resource_map_request {
+ size_t size;
+ rman_res_t offset;
+ rman_res_t length;
+ vm_memattr_t memattr;
+};
+
+void resource_init_map_request_impl(struct resource_map_request *_args,
+ size_t _sz);
+#define resource_init_map_request(rmr) \
+ resource_init_map_request_impl((rmr), sizeof(*(rmr)))
+
/*
* Definitions for drivers which need to keep simple lists of resources
* for their child devices.
@@ -407,6 +432,10 @@ bus_space_tag_t
int bus_generic_get_domain(device_t dev, device_t child, int *domain);
struct resource_list *
bus_generic_get_resource_list (device_t, device_t);
+int bus_generic_map_resource(device_t dev, device_t child, int type,
+ struct resource *r,
+ struct resource_map_request *args,
+ struct resource_map *map);
void bus_generic_new_pass(device_t dev);
int bus_print_child_header(device_t dev, device_t child);
int bus_print_child_domain(device_t dev, device_t child);
@@ -440,6 +469,9 @@ int bus_generic_suspend(device_t dev);
int bus_generic_suspend_child(device_t dev, device_t child);
int bus_generic_teardown_intr(device_t dev, device_t child,
struct resource *irq, void *cookie);
+int bus_generic_unmap_resource(device_t dev, device_t child, int type,
+ struct resource *r,
+ struct resource_map *map);
int bus_generic_write_ivar(device_t dev, device_t child, int which,
uintptr_t value);
int bus_null_rescan(device_t dev);
@@ -469,6 +501,11 @@ int bus_activate_resource(device_t dev, int type, int rid,
struct resource *r);
int bus_deactivate_resource(device_t dev, int type, int rid,
struct resource *r);
+int bus_map_resource(device_t dev, int type, struct resource *r,
+ struct resource_map_request *args,
+ struct resource_map *map);
+int bus_unmap_resource(device_t dev, int type, struct resource *r,
+ struct resource_map *map);
int bus_get_cpus(device_t dev, enum cpu_sets op, size_t setsize,
struct _cpuset *cpuset);
bus_dma_tag_t bus_get_dma_tag(device_t dev);
diff --git a/sys/sys/disk.h b/sys/sys/disk.h
index 5735a4f..6b35d74 100644
--- a/sys/sys/disk.h
+++ b/sys/sys/disk.h
@@ -15,6 +15,7 @@
#include <sys/ioccom.h>
#include <sys/types.h>
+#include <sys/disk_zone.h>
#ifdef _KERNEL
@@ -136,4 +137,6 @@ struct diocgattr_arg {
};
#define DIOCGATTR _IOWR('d', 142, struct diocgattr_arg)
+#define DIOCZONECMD _IOWR('d', 143, struct disk_zone_args)
+
#endif /* _SYS_DISK_H_ */
diff --git a/sys/sys/disk_zone.h b/sys/sys/disk_zone.h
new file mode 100644
index 0000000..6f1fe5c
--- /dev/null
+++ b/sys/sys/disk_zone.h
@@ -0,0 +1,184 @@
+/*-
+ * Copyright (c) 2015 Spectra Logic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * Authors: Ken Merry (Spectra Logic Corporation)
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_DISK_ZONE_H_
+#define _SYS_DISK_ZONE_H_
+
+/*
+ * Interface for Zone-based disks. This allows managing devices that
+ * conform to the SCSI Zoned Block Commands (ZBC) and ATA Zoned ATA Command
+ * Set (ZAC) specifications. Devices using these command sets are
+ * currently (October 2015) hard drives using Shingled Magnetic Recording
+ * (SMR).
+ */
+
+/*
+ * There are currently three types of zoned devices:
+ *
+ * Drive Managed:
+ * Drive Managed drives look and act just like a standard random access
+ * block device, but underneath, the drive reads and writes the bulk of
+ * its capacity using SMR zones. Sequential writes will yield better
+ * performance, but writing sequentially is not required.
+ *
+ * Host Aware:
+ * Host Aware drives expose the underlying zone layout via SCSI or ATA
+ * commands and allow the host to manage the zone conditions. The host
+ * is not required to manage the zones on the drive, though. Sequential
+ * writes will yield better performance in Sequential Write Preferred
+ * zones, but the host can write randomly in those zones.
+ *
+ * Host Managed:
+ * Host Managed drives expose the underlying zone layout via SCSI or ATA
+ * commands. The host is required to access the zones according to the
+ * rules described by the zone layout. Any commands that violate the
+ * rules will be returned with an error.
+ */
+struct disk_zone_disk_params {
+ uint32_t zone_mode;
+#define DISK_ZONE_MODE_NONE 0x00
+#define DISK_ZONE_MODE_HOST_AWARE 0x01
+#define DISK_ZONE_MODE_DRIVE_MANAGED 0x02
+#define DISK_ZONE_MODE_HOST_MANAGED 0x04
+ uint64_t flags;
+#define DISK_ZONE_DISK_URSWRZ 0x001
+#define DISK_ZONE_OPT_SEQ_SET 0x002
+#define DISK_ZONE_OPT_NONSEQ_SET 0x004
+#define DISK_ZONE_MAX_SEQ_SET 0x008
+#define DISK_ZONE_RZ_SUP 0x010
+#define DISK_ZONE_OPEN_SUP 0x020
+#define DISK_ZONE_CLOSE_SUP 0x040
+#define DISK_ZONE_FINISH_SUP 0x080
+#define DISK_ZONE_RWP_SUP 0x100
+#define DISK_ZONE_CMD_SUP_MASK 0x1f0
+ uint64_t optimal_seq_zones;
+ uint64_t optimal_nonseq_zones;
+ uint64_t max_seq_zones;
+};
+
+/*
+ * Used for reset write pointer, open, close and finish.
+ */
+struct disk_zone_rwp {
+ uint64_t id;
+ uint8_t flags;
+#define DISK_ZONE_RWP_FLAG_NONE 0x00
+#define DISK_ZONE_RWP_FLAG_ALL 0x01
+};
+
+/*
+ * Report Zones header. All of these values are passed out.
+ */
+struct disk_zone_rep_header {
+ uint8_t same;
+#define DISK_ZONE_SAME_ALL_DIFFERENT 0x0 /* Lengths and types vary */
+#define DISK_ZONE_SAME_ALL_SAME 0x1 /* Lengths and types the same */
+#define DISK_ZONE_SAME_LAST_DIFFERENT 0x2 /* Types same, last len varies */
+#define DISK_ZONE_SAME_TYPES_DIFFERENT 0x3 /* Types vary, length the same */
+ uint64_t maximum_lba;
+ /*
+ * XXX KDM padding space may not be a good idea inside the bio.
+ */
+ uint8_t reserved[64];
+};
+
+/*
+ * Report Zones entry. Note that the zone types, conditions, and flags
+ * are mapped directly from the SCSI/ATA flag values. Any additional
+ * SCSI/ATA zone types or conditions or flags that are defined in the
+ * future could result in additional values that are not yet defined here.
+ */
+struct disk_zone_rep_entry {
+ uint8_t zone_type;
+#define DISK_ZONE_TYPE_CONVENTIONAL 0x01
+#define DISK_ZONE_TYPE_SEQ_REQUIRED 0x02 /* Host Managed */
+#define DISK_ZONE_TYPE_SEQ_PREFERRED 0x03 /* Host Aware */
+ uint8_t zone_condition;
+#define DISK_ZONE_COND_NOT_WP 0x00
+#define DISK_ZONE_COND_EMPTY 0x01
+#define DISK_ZONE_COND_IMPLICIT_OPEN 0x02
+#define DISK_ZONE_COND_EXPLICIT_OPEN 0x03
+#define DISK_ZONE_COND_CLOSED 0x04
+#define DISK_ZONE_COND_READONLY 0x0D
+#define DISK_ZONE_COND_FULL 0x0E
+#define DISK_ZONE_COND_OFFLINE 0x0F
+ uint8_t zone_flags;
+#define DISK_ZONE_FLAG_RESET 0x01 /* Zone needs RWP */
+#define DISK_ZONE_FLAG_NON_SEQ 0x02 /* Zone accssessed nonseq */
+ uint64_t zone_length;
+ uint64_t zone_start_lba;
+ uint64_t write_pointer_lba;
+ /* XXX KDM padding space may not be a good idea inside the bio */
+ uint8_t reserved[32];
+};
+
+struct disk_zone_report {
+ uint64_t starting_id; /* Passed In */
+ uint8_t rep_options; /* Passed In */
+#define DISK_ZONE_REP_ALL 0x00
+#define DISK_ZONE_REP_EMPTY 0x01
+#define DISK_ZONE_REP_IMP_OPEN 0x02
+#define DISK_ZONE_REP_EXP_OPEN 0x03
+#define DISK_ZONE_REP_CLOSED 0x04
+#define DISK_ZONE_REP_FULL 0x05
+#define DISK_ZONE_REP_READONLY 0x06
+#define DISK_ZONE_REP_OFFLINE 0x07
+#define DISK_ZONE_REP_RWP 0x10
+#define DISK_ZONE_REP_NON_SEQ 0x11
+#define DISK_ZONE_REP_NON_WP 0x3F
+ struct disk_zone_rep_header header;
+ uint32_t entries_allocated; /* Passed In */
+ uint32_t entries_filled; /* Passed Out */
+ uint32_t entries_available; /* Passed Out */
+ struct disk_zone_rep_entry *entries;
+};
+
+union disk_zone_params {
+ struct disk_zone_disk_params disk_params;
+ struct disk_zone_rwp rwp;
+ struct disk_zone_report report;
+};
+
+struct disk_zone_args {
+ uint8_t zone_cmd;
+#define DISK_ZONE_OPEN 0x00
+#define DISK_ZONE_CLOSE 0x01
+#define DISK_ZONE_FINISH 0x02
+#define DISK_ZONE_REPORT_ZONES 0x03
+#define DISK_ZONE_RWP 0x04
+#define DISK_ZONE_GET_PARAMS 0x05
+ union disk_zone_params zone_params;
+};
+
+#endif /* _SYS_DISK_ZONE_H_ */
diff --git a/sys/sys/elf_common.h b/sys/sys/elf_common.h
index f4579c8..6793d5f 100644
--- a/sys/sys/elf_common.h
+++ b/sys/sys/elf_common.h
@@ -473,6 +473,7 @@ typedef struct {
#define SHF_OS_NONCONFORMING 0x100 /* OS-specific processing required. */
#define SHF_GROUP 0x200 /* Member of section group. */
#define SHF_TLS 0x400 /* Section contains TLS data. */
+#define SHF_COMPRESSED 0x800 /* Section contains compressed data. */
#define SHF_MASKOS 0x0ff00000 /* OS-specific semantics. */
#define SHF_MASKPROC 0xf0000000 /* Processor-specific semantics. */
@@ -608,6 +609,8 @@ typedef struct {
*/
#define DT_ADDRRNGLO 0x6ffffe00
#define DT_GNU_HASH 0x6ffffef5 /* GNU-style hash table */
+#define DT_TLSDESC_PLT 0x6ffffef6 /* loc. of PLT for tlsdesc resolver */
+#define DT_TLSDESC_GOT 0x6ffffef7 /* loc. of GOT for tlsdesc resolver */
#define DT_GNU_CONFLICT 0x6ffffef8 /* address of conflict section */
#define DT_GNU_LIBLIST 0x6ffffef9 /* address of library list */
#define DT_CONFIG 0x6ffffefa /* configuration information */
diff --git a/sys/sys/kobj.h b/sys/sys/kobj.h
index 0cb25fb..36d8d2a 100644
--- a/sys/sys/kobj.h
+++ b/sys/sys/kobj.h
@@ -146,13 +146,13 @@ struct kobj_class classvar = { \
* DEFINE_CLASS_2(foo, foo_class, foo_methods, sizeof(foo_softc),
* bar, baz);
*/
-#define DEFINE_CLASS_2(name, methods, size, \
+#define DEFINE_CLASS_2(name, classvar, methods, size, \
base1, base2) \
\
static kobj_class_t name ## _baseclasses[] = \
{ &base1, \
&base2, NULL }; \
-struct kobj_class name ## _class = { \
+struct kobj_class classvar = { \
#name, methods, size, name ## _baseclasses \
}
@@ -162,14 +162,14 @@ struct kobj_class name ## _class = { \
* DEFINE_CLASS_3(foo, foo_class, foo_methods, sizeof(foo_softc),
* bar, baz, foobar);
*/
-#define DEFINE_CLASS_3(name, methods, size, \
+#define DEFINE_CLASS_3(name, classvar, methods, size, \
base1, base2, base3) \
\
static kobj_class_t name ## _baseclasses[] = \
{ &base1, \
&base2, \
&base3, NULL }; \
-struct kobj_class name ## _class = { \
+struct kobj_class classvar = { \
#name, methods, size, name ## _baseclasses \
}
diff --git a/sys/sys/malloc.h b/sys/sys/malloc.h
index c09d749..b530663 100644
--- a/sys/sys/malloc.h
+++ b/sys/sys/malloc.h
@@ -146,9 +146,6 @@ MALLOC_DECLARE(M_CACHE);
MALLOC_DECLARE(M_DEVBUF);
MALLOC_DECLARE(M_TEMP);
-MALLOC_DECLARE(M_IP6OPT); /* for INET6 */
-MALLOC_DECLARE(M_IP6NDP); /* for INET6 */
-
/*
* Deprecated macro versions of not-quite-malloc() and free().
*/
diff --git a/sys/sys/param.h b/sys/sys/param.h
index 8daf4b7..665d806 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -58,7 +58,7 @@
* in the range 5 to 9.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 1100109 /* Master, propagated to newvers */
+#define __FreeBSD_version 1100111 /* Master, propagated to newvers */
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
diff --git a/sys/sys/rman.h b/sys/sys/rman.h
index 2d58f4a..9d1cd58 100644
--- a/sys/sys/rman.h
+++ b/sys/sys/rman.h
@@ -47,6 +47,7 @@
#define RF_FIRSTSHARE 0x0020 /* first in sharing list */
#define RF_PREFETCHABLE 0x0040 /* resource is prefetchable */
#define RF_OPTIONAL 0x0080 /* for bus_alloc_resources() */
+#define RF_UNMAPPED 0x0100 /* don't map resource when activating */
#define RF_ALIGNMENT_SHIFT 10 /* alignment size bit starts bit 10 */
#define RF_ALIGNMENT_MASK (0x003F << RF_ALIGNMENT_SHIFT)
@@ -105,6 +106,7 @@ struct resource {
};
struct resource_i;
+struct resource_map;
TAILQ_HEAD(resource_head, resource_i);
@@ -121,13 +123,13 @@ TAILQ_HEAD(rman_head, rman);
int rman_activate_resource(struct resource *r);
int rman_adjust_resource(struct resource *r, rman_res_t start, rman_res_t end);
-int rman_await_resource(struct resource *r, int pri, int timo);
int rman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end);
bus_space_handle_t rman_get_bushandle(struct resource *);
bus_space_tag_t rman_get_bustag(struct resource *);
rman_res_t rman_get_end(struct resource *);
struct device *rman_get_device(struct resource *);
u_int rman_get_flags(struct resource *);
+void rman_get_mapping(struct resource *, struct resource_map *);
int rman_get_rid(struct resource *);
rman_res_t rman_get_size(struct resource *);
rman_res_t rman_get_start(struct resource *);
@@ -151,6 +153,7 @@ void rman_set_bushandle(struct resource *_r, bus_space_handle_t _h);
void rman_set_bustag(struct resource *_r, bus_space_tag_t _t);
void rman_set_device(struct resource *_r, struct device *_dev);
void rman_set_end(struct resource *_r, rman_res_t _end);
+void rman_set_mapping(struct resource *, struct resource_map *);
void rman_set_rid(struct resource *_r, int _rid);
void rman_set_start(struct resource *_r, rman_res_t _start);
void rman_set_virtual(struct resource *_r, void *_v);
diff --git a/sys/sys/sglist.h b/sys/sys/sglist.h
index c712f63..1c09858 100644
--- a/sys/sys/sglist.h
+++ b/sys/sys/sglist.h
@@ -91,10 +91,13 @@ int sglist_append_phys(struct sglist *sg, vm_paddr_t paddr,
int sglist_append_uio(struct sglist *sg, struct uio *uio);
int sglist_append_user(struct sglist *sg, void *buf, size_t len,
struct thread *td);
+int sglist_append_vmpages(struct sglist *sg, vm_page_t *m, size_t pgoff,
+ size_t len);
struct sglist *sglist_build(void *buf, size_t len, int mflags);
struct sglist *sglist_clone(struct sglist *sg, int mflags);
int sglist_consume_uio(struct sglist *sg, struct uio *uio, size_t resid);
int sglist_count(void *buf, size_t len);
+int sglist_count_vmpages(vm_page_t *m, size_t pgoff, size_t len);
void sglist_free(struct sglist *sg);
int sglist_join(struct sglist *first, struct sglist *second);
size_t sglist_length(struct sglist *sg);
diff --git a/sys/sys/sysent.h b/sys/sys/sysent.h
index a79ff04..25dc940 100644
--- a/sys/sys/sysent.h
+++ b/sys/sys/sysent.h
@@ -141,6 +141,8 @@ struct sysentvec {
#define SV_TIMEKEEP 0x040000
#define SV_ABI_MASK 0xff
+#define SV_ABI_ERRNO(p, e) ((p)->p_sysent->sv_errsize <= 0 ? e : \
+ ((e) >= (p)->p_sysent->sv_errsize ? -1 : (p)->p_sysent->sv_errtbl[e]))
#define SV_PROC_FLAG(p, x) ((p)->p_sysent->sv_flags & (x))
#define SV_PROC_ABI(p) ((p)->p_sysent->sv_flags & SV_ABI_MASK)
#define SV_CURPROC_FLAG(x) SV_PROC_FLAG(curproc, x)
@@ -153,8 +155,6 @@ struct sysentvec {
#ifdef _KERNEL
extern struct sysentvec aout_sysvec;
-extern struct sysentvec elf_freebsd_sysvec;
-extern struct sysentvec null_sysvec;
extern struct sysent sysent[];
extern const char *syscallnames[];
diff --git a/sys/sys/taskqueue.h b/sys/sys/taskqueue.h
index 432a75e..4c4044f 100644
--- a/sys/sys/taskqueue.h
+++ b/sys/sys/taskqueue.h
@@ -56,6 +56,7 @@ enum taskqueue_callback_type {
#define TASKQUEUE_CALLBACK_TYPE_MIN TASKQUEUE_CALLBACK_TYPE_INIT
#define TASKQUEUE_CALLBACK_TYPE_MAX TASKQUEUE_CALLBACK_TYPE_SHUTDOWN
#define TASKQUEUE_NUM_CALLBACKS TASKQUEUE_CALLBACK_TYPE_MAX + 1
+#define TASKQUEUE_NAMELEN 32
typedef void (*taskqueue_callback_fn)(void *context);
@@ -97,7 +98,6 @@ void taskqueue_set_callback(struct taskqueue *queue,
#define TASK_INITIALIZER(priority, func, context) \
{ .ta_pending = 0, \
- .ta_flags = 0, \
.ta_priority = (priority), \
.ta_func = (func), \
.ta_context = (context) }
@@ -113,7 +113,6 @@ void taskqueue_thread_enqueue(void *context);
*/
#define TASK_INIT(task, priority, func, context) do { \
(task)->ta_pending = 0; \
- (task)->ta_flags = 0; \
(task)->ta_priority = (priority); \
(task)->ta_func = (func); \
(task)->ta_context = (context); \
@@ -223,7 +222,6 @@ int taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride);
#define GTASK_INIT(task, priority, func, context) do { \
(task)->ta_pending = 0; \
- (task)->ta_flags = TASK_SKIP_WAKEUP; \
(task)->ta_priority = (priority); \
(task)->ta_func = (func); \
(task)->ta_context = (context); \
diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h
index 4d3df29..9ff2903 100644
--- a/sys/sys/vmmeter.h
+++ b/sys/sys/vmmeter.h
@@ -125,13 +125,12 @@ extern int vm_pageout_wakeup_thresh;
* This routine is typically used at the user<->system interface to determine
* whether we need to block in order to avoid a low memory deadlock.
*/
-
-static __inline
-int
+static inline int
vm_page_count_severe(void)
{
- return (vm_cnt.v_free_severe > (vm_cnt.v_free_count +
- vm_cnt.v_cache_count));
+
+ return (vm_cnt.v_free_severe > vm_cnt.v_free_count +
+ vm_cnt.v_cache_count);
}
/*
@@ -141,52 +140,48 @@ vm_page_count_severe(void)
* we can execute potentially very expensive code in terms of memory. It
* is also used by the pageout daemon to calculate when to sleep, when
* to wake waiters up, and when (after making a pass) to become more
- * desparate.
+ * desperate.
*/
-
-static __inline
-int
+static inline int
vm_page_count_min(void)
{
- return (vm_cnt.v_free_min > (vm_cnt.v_free_count + vm_cnt.v_cache_count));
+
+ return (vm_cnt.v_free_min > vm_cnt.v_free_count + vm_cnt.v_cache_count);
}
/*
* Return TRUE if we have not reached our free page target during
* free page recovery operations.
*/
-
-static __inline
-int
+static inline int
vm_page_count_target(void)
{
- return (vm_cnt.v_free_target > (vm_cnt.v_free_count +
- vm_cnt.v_cache_count));
+
+ return (vm_cnt.v_free_target > vm_cnt.v_free_count +
+ vm_cnt.v_cache_count);
}
/*
* Return the number of pages we need to free-up or cache
* A positive number indicates that we do not have enough free pages.
*/
-
-static __inline
-int
+static inline int
vm_paging_target(void)
{
- return (vm_cnt.v_free_target - (vm_cnt.v_free_count +
- vm_cnt.v_cache_count));
+
+ return (vm_cnt.v_free_target - (vm_cnt.v_free_count +
+ vm_cnt.v_cache_count));
}
/*
* Returns TRUE if the pagedaemon needs to be woken up.
*/
-
-static __inline
-int
+static inline int
vm_paging_needed(void)
{
- return (vm_cnt.v_free_count + vm_cnt.v_cache_count <
- vm_pageout_wakeup_thresh);
+
+ return (vm_cnt.v_free_count + vm_cnt.v_cache_count <
+ (u_int)vm_pageout_wakeup_thresh);
}
#endif
diff --git a/sys/ufs/ffs/ffs_alloc.c b/sys/ufs/ffs/ffs_alloc.c
index 797a42b..0e4dc03 100644
--- a/sys/ufs/ffs/ffs_alloc.c
+++ b/sys/ufs/ffs/ffs_alloc.c
@@ -1102,8 +1102,8 @@ dup_alloc:
/*
* Set up a new generation number for this inode.
*/
- if (ip->i_gen == 0 || ++ip->i_gen == 0)
- ip->i_gen = arc4random() / 2 + 1;
+ while (ip->i_gen == 0 || ++ip->i_gen == 0)
+ ip->i_gen = arc4random();
DIP_SET(ip, i_gen, ip->i_gen);
if (fs->fs_magic == FS_UFS2_MAGIC) {
vfs_timestamp(&ts);
@@ -2080,7 +2080,8 @@ gotit:
bzero(ibp->b_data, (int)fs->fs_bsize);
dp2 = (struct ufs2_dinode *)(ibp->b_data);
for (i = 0; i < INOPB(fs); i++) {
- dp2->di_gen = arc4random() / 2 + 1;
+ while (dp2->di_gen == 0)
+ dp2->di_gen = arc4random();
dp2++;
}
/*
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c
index 712fc21..d644b73 100644
--- a/sys/ufs/ffs/ffs_vfsops.c
+++ b/sys/ufs/ffs/ffs_vfsops.c
@@ -239,14 +239,12 @@ ffs_mount(struct mount *mp)
if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 ||
(error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0)
return (error);
- DROP_GIANT();
g_topology_lock();
/*
* Return to normal read-only mode.
*/
error = g_access(ump->um_cp, 0, -1, 0);
g_topology_unlock();
- PICKUP_GIANT();
ump->um_fsckpid = 0;
}
if (fs->fs_ronly == 0 &&
@@ -294,14 +292,12 @@ ffs_mount(struct mount *mp)
}
if (MOUNTEDSOFTDEP(mp))
softdep_unmount(mp);
- DROP_GIANT();
g_topology_lock();
/*
* Drop our write and exclusive access.
*/
g_access(ump->um_cp, 0, -1, -1);
g_topology_unlock();
- PICKUP_GIANT();
fs->fs_ronly = 1;
MNT_ILOCK(mp);
mp->mnt_flag |= MNT_RDONLY;
@@ -359,14 +355,12 @@ ffs_mount(struct mount *mp)
return (EPERM);
}
}
- DROP_GIANT();
g_topology_lock();
/*
* Request exclusive write access.
*/
error = g_access(ump->um_cp, 0, 1, 1);
g_topology_unlock();
- PICKUP_GIANT();
if (error)
return (error);
if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
@@ -433,14 +427,12 @@ ffs_mount(struct mount *mp)
}
KASSERT(MOUNTEDSOFTDEP(mp) == 0,
("soft updates enabled on read-only file system"));
- DROP_GIANT();
g_topology_lock();
/*
* Request write access.
*/
error = g_access(ump->um_cp, 0, 1, 0);
g_topology_unlock();
- PICKUP_GIANT();
if (error) {
vfs_mount_error(mp,
"Checker activation failed on %s",
@@ -523,14 +515,12 @@ ffs_mount(struct mount *mp)
("soft updates enabled on read-only file system"));
ump = VFSTOUFS(mp);
fs = ump->um_fs;
- DROP_GIANT();
g_topology_lock();
/*
* Request write access.
*/
error = g_access(ump->um_cp, 0, 1, 0);
g_topology_unlock();
- PICKUP_GIANT();
if (error) {
printf("WARNING: %s: Checker activation "
"failed\n", fs->fs_fsmnt);
@@ -764,25 +754,29 @@ ffs_mountfs(devvp, mp, td)
cred = td ? td->td_ucred : NOCRED;
ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
+ KASSERT(devvp->v_type == VCHR, ("reclaimed devvp"));
dev = devvp->v_rdev;
- dev_ref(dev);
- DROP_GIANT();
+ if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0,
+ (uintptr_t)mp) == 0) {
+ VOP_UNLOCK(devvp, 0);
+ return (EBUSY);
+ }
g_topology_lock();
error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
g_topology_unlock();
- PICKUP_GIANT();
+ if (error != 0) {
+ atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
+ VOP_UNLOCK(devvp, 0);
+ return (error);
+ }
+ dev_ref(dev);
+ devvp->v_bufobj.bo_ops = &ffs_ops;
VOP_UNLOCK(devvp, 0);
- if (error)
- goto out;
- if (devvp->v_rdev->si_iosize_max != 0)
- mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
+ if (dev->si_iosize_max != 0)
+ mp->mnt_iosize_max = dev->si_iosize_max;
if (mp->mnt_iosize_max > MAXPHYS)
mp->mnt_iosize_max = MAXPHYS;
- devvp->v_bufobj.bo_ops = &ffs_ops;
- if (devvp->v_type == VCHR)
- devvp->v_rdev->si_mountpt = mp;
-
fs = NULL;
sblockloc = 0;
/*
@@ -1083,14 +1077,10 @@ ffs_mountfs(devvp, mp, td)
out:
if (bp)
brelse(bp);
- if (devvp->v_type == VCHR && devvp->v_rdev != NULL)
- devvp->v_rdev->si_mountpt = NULL;
if (cp != NULL) {
- DROP_GIANT();
g_topology_lock();
g_vfs_close(cp);
g_topology_unlock();
- PICKUP_GIANT();
}
if (ump) {
mtx_destroy(UFS_MTX(ump));
@@ -1102,6 +1092,7 @@ out:
free(ump, M_UFSMNT);
mp->mnt_data = NULL;
}
+ atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
dev_rel(dev);
return (error);
}
@@ -1275,7 +1266,6 @@ ffs_unmount(mp, mntflags)
taskqueue_drain_all(ump->um_trim_tq);
taskqueue_free(ump->um_trim_tq);
}
- DROP_GIANT();
g_topology_lock();
if (ump->um_fsckpid > 0) {
/*
@@ -1286,9 +1276,7 @@ ffs_unmount(mp, mntflags)
}
g_vfs_close(ump->um_cp);
g_topology_unlock();
- PICKUP_GIANT();
- if (ump->um_devvp->v_type == VCHR && ump->um_devvp->v_rdev != NULL)
- ump->um_devvp->v_rdev->si_mountpt = NULL;
+ atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0);
vrele(ump->um_devvp);
dev_rel(ump->um_dev);
mtx_destroy(UFS_MTX(ump));
@@ -1780,7 +1768,8 @@ ffs_vgetf(mp, ino, flags, vpp, ffs_flags)
* already have one. This should only happen on old filesystems.
*/
if (ip->i_gen == 0) {
- ip->i_gen = arc4random() / 2 + 1;
+ while (ip->i_gen == 0)
+ ip->i_gen = arc4random();
if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
ip->i_flag |= IN_MODIFIED;
DIP_SET(ip, i_gen, ip->i_gen);
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index ae8adae..db1a2c4 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -152,7 +152,7 @@ static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq);
static struct swdevt *swdevhd; /* Allocate from here next */
static int nswapdev; /* Number of swap devices */
int swap_pager_avail;
-static int swdev_syscall_active = 0; /* serialize swap(on|off) */
+static struct sx swdev_syscall_lock; /* serialize swap(on|off) */
static vm_ooffset_t swap_total;
SYSCTL_QUAD(_vm, OID_AUTO, swap_total, CTLFLAG_RD, &swap_total, 0,
@@ -325,8 +325,9 @@ static int nsw_wcount_async_max;/* assigned maximum */
static int nsw_cluster_max; /* maximum VOP I/O allowed */
static int sysctl_swap_async_max(SYSCTL_HANDLER_ARGS);
-SYSCTL_PROC(_vm, OID_AUTO, swap_async_max, CTLTYPE_INT | CTLFLAG_RW,
- NULL, 0, sysctl_swap_async_max, "I", "Maximum running async swap ops");
+SYSCTL_PROC(_vm, OID_AUTO, swap_async_max, CTLTYPE_INT | CTLFLAG_RW |
+ CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_async_max, "I",
+ "Maximum running async swap ops");
static struct swblock **swhash;
static int swhash_mask;
@@ -487,6 +488,7 @@ swap_pager_init(void)
TAILQ_INIT(&swap_pager_object_list[i]);
mtx_init(&sw_alloc_mtx, "swap_pager list", NULL, MTX_DEF);
mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF);
+ sx_init(&swdev_syscall_lock, "swsysc");
/*
* Device Stripe, in PAGE_SIZE'd blocks
@@ -1664,7 +1666,7 @@ swap_pager_swapoff(struct swdevt *sp)
struct swblock *swap;
int i, j, retries;
- GIANT_REQUIRED;
+ sx_assert(&swdev_syscall_lock, SA_XLOCKED);
retries = 0;
full_rescan:
@@ -2005,10 +2007,7 @@ sys_swapon(struct thread *td, struct swapon_args *uap)
if (error)
return (error);
- mtx_lock(&Giant);
- while (swdev_syscall_active)
- tsleep(&swdev_syscall_active, PUSER - 1, "swpon", 0);
- swdev_syscall_active = 1;
+ sx_xlock(&swdev_syscall_lock);
/*
* Swap metadata may not fit in the KVM if we have physical
@@ -2043,9 +2042,7 @@ sys_swapon(struct thread *td, struct swapon_args *uap)
if (error)
vrele(vp);
done:
- swdev_syscall_active = 0;
- wakeup_one(&swdev_syscall_active);
- mtx_unlock(&Giant);
+ sx_xunlock(&swdev_syscall_lock);
return (error);
}
@@ -2175,10 +2172,7 @@ sys_swapoff(struct thread *td, struct swapoff_args *uap)
if (error)
return (error);
- mtx_lock(&Giant);
- while (swdev_syscall_active)
- tsleep(&swdev_syscall_active, PUSER - 1, "swpoff", 0);
- swdev_syscall_active = 1;
+ sx_xlock(&swdev_syscall_lock);
NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNODE1, UIO_USERSPACE, uap->name,
td);
@@ -2200,9 +2194,7 @@ sys_swapoff(struct thread *td, struct swapoff_args *uap)
}
error = swapoff_one(sp, td->td_ucred);
done:
- swdev_syscall_active = 0;
- wakeup_one(&swdev_syscall_active);
- mtx_unlock(&Giant);
+ sx_xunlock(&swdev_syscall_lock);
return (error);
}
@@ -2214,7 +2206,7 @@ swapoff_one(struct swdevt *sp, struct ucred *cred)
int error;
#endif
- mtx_assert(&Giant, MA_OWNED);
+ sx_assert(&swdev_syscall_lock, SA_XLOCKED);
#ifdef MAC
(void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY);
error = mac_system_check_swapoff(cred, sp->sw_vp);
@@ -2276,10 +2268,7 @@ swapoff_all(void)
const char *devname;
int error;
- mtx_lock(&Giant);
- while (swdev_syscall_active)
- tsleep(&swdev_syscall_active, PUSER - 1, "swpoff", 0);
- swdev_syscall_active = 1;
+ sx_xlock(&swdev_syscall_lock);
mtx_lock(&sw_dev_mtx);
TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) {
@@ -2299,9 +2288,7 @@ swapoff_all(void)
}
mtx_unlock(&sw_dev_mtx);
- swdev_syscall_active = 0;
- wakeup_one(&swdev_syscall_active);
- mtx_unlock(&Giant);
+ sx_xunlock(&swdev_syscall_lock);
}
void
@@ -2370,7 +2357,8 @@ sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0,
"Number of swap devices");
-SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD, sysctl_vm_swap_info,
+SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD | CTLFLAG_MPSAFE,
+ sysctl_vm_swap_info,
"Swap statistics by device");
/*
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index d87495d..1df51fe 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -109,8 +109,9 @@ typedef struct vm_object *vm_object_t;
typedef int boolean_t;
/*
- * The exact set of memory attributes is machine dependent. However, every
- * machine is required to define VM_MEMATTR_DEFAULT.
+ * The exact set of memory attributes is machine dependent. However,
+ * every machine is required to define VM_MEMATTR_DEFAULT and
+ * VM_MEMATTR_UNCACHEABLE.
*/
typedef char vm_memattr_t; /* memory attribute codes */
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 13a5757..86e7f7b 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -657,48 +657,40 @@ vnode_locked:
hardfault++;
break; /* break to PAGE HAS BEEN FOUND */
}
- /*
- * Remove the bogus page (which does not exist at this
- * object/offset); before doing so, we must get back
- * our object lock to preserve our invariant.
- *
- * Also wake up any other process that may want to bring
- * in this page.
- *
- * If this is the top-level object, we must leave the
- * busy page to prevent another process from rushing
- * past us, and inserting the page in that object at
- * the same time that we are.
- */
if (rv == VM_PAGER_ERROR)
printf("vm_fault: pager read error, pid %d (%s)\n",
curproc->p_pid, curproc->p_comm);
+
/*
- * Data outside the range of the pager or an I/O error
- */
- /*
- * XXX - the check for kernel_map is a kludge to work
- * around having the machine panic on a kernel space
- * fault w/ I/O error.
+ * If an I/O error occurred or the requested page was
+ * outside the range of the pager, clean up and return
+ * an error.
*/
- if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) ||
- (rv == VM_PAGER_BAD)) {
+ if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) {
vm_page_lock(fs.m);
vm_page_free(fs.m);
vm_page_unlock(fs.m);
fs.m = NULL;
unlock_and_deallocate(&fs);
- return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE);
+ return (rv == VM_PAGER_ERROR ? KERN_FAILURE :
+ KERN_PROTECTION_FAILURE);
}
+
+ /*
+ * The requested page does not exist at this object/
+ * offset. Remove the invalid page from the object,
+ * waking up anyone waiting for it, and continue on to
+ * the next object. However, if this is the top-level
+ * object, we must leave the busy page in place to
+ * prevent another process from rushing past us, and
+ * inserting the page in that object at the same time
+ * that we are.
+ */
if (fs.object != fs.first_object) {
vm_page_lock(fs.m);
vm_page_free(fs.m);
vm_page_unlock(fs.m);
fs.m = NULL;
- /*
- * XXX - we cannot just fall out at this
- * point, m has been freed and is invalid!
- */
}
}
@@ -713,7 +705,6 @@ vnode_locked:
* Move on to the next object. Lock the next object before
* unlocking the current one.
*/
- fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset);
next_object = fs.object->backing_object;
if (next_object == NULL) {
/*
@@ -751,6 +742,8 @@ vnode_locked:
vm_object_pip_add(next_object, 1);
if (fs.object != fs.first_object)
vm_object_pip_wakeup(fs.object);
+ fs.pindex +=
+ OFF_TO_IDX(fs.object->backing_object_offset);
VM_OBJECT_WUNLOCK(fs.object);
fs.object = next_object;
}
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 118348d..83dc01a 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -731,8 +731,6 @@ faultin(p)
* This swapin algorithm attempts to swap-in processes only if there
* is enough space for them. Of course, if a process waits for a long
* time, it will be swapped in anyway.
- *
- * Giant is held on entry.
*/
void
swapper(void)
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 2f3b17f..0ea8d86 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -384,11 +384,11 @@ vm_page_domain_init(struct vm_domain *vmd)
*__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
"vm inactive pagequeue";
- *__DECONST(int **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_vcnt) =
+ *__DECONST(u_int **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_vcnt) =
&vm_cnt.v_inactive_count;
*__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
"vm active pagequeue";
- *__DECONST(int **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_vcnt) =
+ *__DECONST(u_int **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_vcnt) =
&vm_cnt.v_active_count;
vmd->vmd_page_count = 0;
vmd->vmd_free_count = 0;
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 4eb0050..0f0b330 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -215,7 +215,7 @@ struct vm_pagequeue {
struct mtx pq_mutex;
struct pglist pq_pl;
int pq_cnt;
- int * const pq_vcnt;
+ u_int * const pq_vcnt;
const char * const pq_name;
} __aligned(CACHE_LINE_SIZE);
diff --git a/sys/x86/x86/nexus.c b/sys/x86/x86/nexus.c
index 345b70f..9d125e7 100644
--- a/sys/x86/x86/nexus.c
+++ b/sys/x86/x86/nexus.c
@@ -114,6 +114,12 @@ static int nexus_activate_resource(device_t, device_t, int, int,
struct resource *);
static int nexus_deactivate_resource(device_t, device_t, int, int,
struct resource *);
+static int nexus_map_resource(device_t bus, device_t child, int type,
+ struct resource *r,
+ struct resource_map_request *argsp,
+ struct resource_map *map);
+static int nexus_unmap_resource(device_t bus, device_t child, int type,
+ struct resource *r, struct resource_map *map);
static int nexus_release_resource(device_t, device_t, int, int,
struct resource *);
static int nexus_setup_intr(device_t, device_t, struct resource *, int flags,
@@ -154,6 +160,8 @@ static device_method_t nexus_methods[] = {
DEVMETHOD(bus_release_resource, nexus_release_resource),
DEVMETHOD(bus_activate_resource, nexus_activate_resource),
DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource),
+ DEVMETHOD(bus_map_resource, nexus_map_resource),
+ DEVMETHOD(bus_unmap_resource, nexus_unmap_resource),
DEVMETHOD(bus_setup_intr, nexus_setup_intr),
DEVMETHOD(bus_teardown_intr, nexus_teardown_intr),
#ifdef SMP
@@ -432,11 +440,81 @@ static int
nexus_activate_resource(device_t bus, device_t child, int type, int rid,
struct resource *r)
{
+ struct resource_map map;
+ int error;
+
+ error = rman_activate_resource(r);
+ if (error != 0)
+ return (error);
+
+ if (!(rman_get_flags(r) & RF_UNMAPPED) &&
+ (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT)) {
+ error = nexus_map_resource(bus, child, type, r, NULL, &map);
+ if (error) {
+ rman_deactivate_resource(r);
+ return (error);
+ }
+
+ rman_set_mapping(r,&map);
+ }
+ return (0);
+}
+
+static int
+nexus_deactivate_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ struct resource_map map;
+ int error;
+
+ error = rman_deactivate_resource(r);
+ if (error)
+ return (error);
+
+ if (!(rman_get_flags(r) & RF_UNMAPPED) &&
+ (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT)) {
+ rman_get_mapping(r, &map);
+ nexus_unmap_resource(bus, child, type, r, &map);
+ }
+ return (0);
+}
+
+static int
+nexus_map_resource(device_t bus, device_t child, int type, struct resource *r,
+ struct resource_map_request *argsp, struct resource_map *map)
+{
+ struct resource_map_request args;
+ rman_res_t end, length, start;
#ifdef PC98
- bus_space_handle_t bh;
int error;
#endif
- void *vaddr;
+
+ /* Resources must be active to be mapped. */
+ if (!(rman_get_flags(r) & RF_ACTIVE))
+ return (ENXIO);
+
+ /* Mappings are only supported on I/O and memory resources. */
+ switch (type) {
+ case SYS_RES_IOPORT:
+ case SYS_RES_MEMORY:
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ resource_init_map_request(&args);
+ if (argsp != NULL)
+ bcopy(argsp, &args, imin(argsp->size, args.size));
+ start = rman_get_start(r) + args.offset;
+ if (args.length == 0)
+ length = rman_get_size(r);
+ else
+ length = args.length;
+ end = start + length - 1;
+ if (start > rman_get_end(r) || start < rman_get_start(r))
+ return (EINVAL);
+ if (end > rman_get_end(r) || end < start)
+ return (EINVAL);
/*
* If this is a memory resource, map it into the kernel.
@@ -445,58 +523,64 @@ nexus_activate_resource(device_t bus, device_t child, int type, int rid,
case SYS_RES_IOPORT:
#ifdef PC98
error = i386_bus_space_handle_alloc(X86_BUS_SPACE_IO,
- rman_get_start(r), rman_get_size(r), &bh);
+ start, length, &map->r_bushandle);
if (error)
return (error);
- rman_set_bushandle(r, bh);
#else
- rman_set_bushandle(r, rman_get_start(r));
+ map->r_bushandle = start;
#endif
- rman_set_bustag(r, X86_BUS_SPACE_IO);
+ map->r_bustag = X86_BUS_SPACE_IO;
+ map->r_size = length;
+ map->r_vaddr = NULL;
break;
case SYS_RES_MEMORY:
#ifdef PC98
error = i386_bus_space_handle_alloc(X86_BUS_SPACE_MEM,
- rman_get_start(r), rman_get_size(r), &bh);
+ start, length, &map->r_bushandle);
if (error)
return (error);
#endif
- vaddr = pmap_mapdev(rman_get_start(r), rman_get_size(r));
- rman_set_virtual(r, vaddr);
- rman_set_bustag(r, X86_BUS_SPACE_MEM);
+ map->r_vaddr = pmap_mapdev_attr(start, length, args.memattr);
+ map->r_bustag = X86_BUS_SPACE_MEM;
+ map->r_size = length;
+
+ /*
+ * PC-98 stores the virtual address as a member of the
+ * structure in the handle. On plain x86, the handle is
+ * the virtual address.
+ */
#ifdef PC98
- /* PC-98: the type of bus_space_handle_t is the structure. */
- bh->bsh_base = (bus_addr_t) vaddr;
- rman_set_bushandle(r, bh);
+ map->r_bushandle->bsh_base = (bus_addr_t)map->r_vaddr;
#else
- /* IBM-PC: the type of bus_space_handle_t is u_int */
- rman_set_bushandle(r, (bus_space_handle_t) vaddr);
+ map->r_bushandle = (bus_space_handle_t)map->r_vaddr;
#endif
+ break;
}
- return (rman_activate_resource(r));
+ return (0);
}
static int
-nexus_deactivate_resource(device_t bus, device_t child, int type, int rid,
- struct resource *r)
+nexus_unmap_resource(device_t bus, device_t child, int type, struct resource *r,
+ struct resource_map *map)
{
-
+
/*
* If this is a memory resource, unmap it.
*/
- if (type == SYS_RES_MEMORY) {
- pmap_unmapdev((vm_offset_t)rman_get_virtual(r),
- rman_get_size(r));
- }
+ switch (type) {
+ case SYS_RES_MEMORY:
+ pmap_unmapdev((vm_offset_t)map->r_vaddr, map->r_size);
+ /* FALLTHROUGH */
+ case SYS_RES_IOPORT:
#ifdef PC98
- if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) {
- bus_space_handle_t bh;
-
- bh = rman_get_bushandle(r);
- i386_bus_space_handle_free(rman_get_bustag(r), bh, bh->bsh_sz);
- }
+ i386_bus_space_handle_free(map->r_bustag, map->r_bushandle,
+ map->r_bushandle->bsh_sz);
#endif
- return (rman_deactivate_resource(r));
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (0);
}
static int
OpenPOWER on IntegriCloud