summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorRenato Botelho <renato@netgate.com>2016-06-06 08:05:51 -0300
committerRenato Botelho <renato@netgate.com>2016-06-06 08:05:51 -0300
commit47dfb8d658406ebf07225c0104ebe4be06ae405f (patch)
tree83174cbd9592560c48ad2cd11afe42c5c12b6c1b /sys
parent131cd15b13bbd3e141e911a65cf7a1895ec6ab05 (diff)
parent13d657a35d96e65f1be391830f36e1adff33534f (diff)
downloadFreeBSD-src-47dfb8d658406ebf07225c0104ebe4be06ae405f.zip
FreeBSD-src-47dfb8d658406ebf07225c0104ebe4be06ae405f.tar.gz
Merge remote-tracking branch 'origin/stable/10' into devel
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/vm_machdep.c8
-rw-r--r--sys/amd64/linux/linux_proto.h6
-rw-r--r--sys/amd64/linux/linux_systrace_args.c12
-rw-r--r--sys/amd64/linux/syscalls.master6
-rw-r--r--sys/amd64/linux32/linux32_proto.h6
-rw-r--r--sys/amd64/linux32/linux32_systrace_args.c12
-rw-r--r--sys/amd64/linux32/syscalls.master6
-rw-r--r--sys/boot/common/dev_net.c6
-rw-r--r--sys/boot/i386/libi386/pxe.c5
-rw-r--r--sys/boot/libstand32/Makefile2
-rw-r--r--sys/boot/userboot/libstand/Makefile2
-rw-r--r--sys/cam/ctl/ctl_tpc.c120
-rw-r--r--sys/cam/scsi/scsi_enc_safte.c7
-rw-r--r--sys/cam/scsi/scsi_enc_ses.c41
-rw-r--r--sys/compat/linux/linux_file.c226
-rw-r--r--sys/compat/linux/linux_futex.c235
-rw-r--r--sys/compat/linux/linux_ioctl.c4
-rw-r--r--sys/compat/linux/linux_misc.c1
-rw-r--r--sys/compat/linux/linux_socket.c29
-rw-r--r--sys/compat/ndis/subr_ntoskrnl.c9
-rw-r--r--sys/conf/files.amd6424
-rw-r--r--sys/conf/kmod.mk4
-rw-r--r--sys/dev/fb/vesa.c3
-rw-r--r--sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c124
-rw-r--r--sys/dev/hyperv/storvsc/hv_vstorage.h5
-rw-r--r--sys/dev/iicbus/iic.c9
-rw-r--r--sys/dev/iicbus/iic.h2
-rw-r--r--sys/dev/ioat/ioat.c2091
-rw-r--r--sys/dev/ioat/ioat.h218
-rw-r--r--sys/dev/ioat/ioat_hw.h167
-rw-r--r--sys/dev/ioat/ioat_internal.h600
-rw-r--r--sys/dev/ioat/ioat_test.c602
-rw-r--r--sys/dev/ioat/ioat_test.h90
-rw-r--r--sys/dev/isp/isp.c36
-rw-r--r--sys/dev/isp/isp_freebsd.c4
-rw-r--r--sys/dev/isp/isp_library.c4
-rw-r--r--sys/dev/isp/isp_target.c41
-rw-r--r--sys/dev/isp/ispvar.h9
-rw-r--r--sys/dev/mlx5/mlx5_en/en.h23
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c381
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_main.c111
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_tx.c91
-rw-r--r--sys/dev/mpr/mpr_sas.c17
-rw-r--r--sys/dev/mps/mps_sas.c17
-rw-r--r--sys/dev/mrsas/mrsas.c700
-rw-r--r--sys/dev/mrsas/mrsas.h65
-rw-r--r--sys/dev/mrsas/mrsas_cam.c294
-rw-r--r--sys/dev/mrsas/mrsas_fp.c27
-rw-r--r--sys/dev/mrsas/mrsas_ioctl.c25
-rw-r--r--sys/dev/sfxge/common/ef10_ev.c (renamed from sys/dev/sfxge/common/hunt_ev.c)32
-rw-r--r--sys/dev/sfxge/common/ef10_filter.c (renamed from sys/dev/sfxge/common/hunt_filter.c)298
-rw-r--r--sys/dev/sfxge/common/ef10_impl.h1001
-rw-r--r--sys/dev/sfxge/common/ef10_intr.c (renamed from sys/dev/sfxge/common/hunt_intr.c)6
-rw-r--r--sys/dev/sfxge/common/ef10_mac.c (renamed from sys/dev/sfxge/common/hunt_mac.c)75
-rw-r--r--sys/dev/sfxge/common/ef10_mcdi.c (renamed from sys/dev/sfxge/common/hunt_mcdi.c)4
-rw-r--r--sys/dev/sfxge/common/ef10_nic.c1697
-rw-r--r--sys/dev/sfxge/common/ef10_nvram.c (renamed from sys/dev/sfxge/common/hunt_nvram.c)633
-rw-r--r--sys/dev/sfxge/common/ef10_phy.c477
-rw-r--r--sys/dev/sfxge/common/ef10_rx.c (renamed from sys/dev/sfxge/common/hunt_rx.c)21
-rw-r--r--sys/dev/sfxge/common/ef10_tlv_layout.h141
-rwxr-xr-xsys/dev/sfxge/common/ef10_tx.c (renamed from sys/dev/sfxge/common/hunt_tx.c)12
-rw-r--r--sys/dev/sfxge/common/ef10_vpd.c (renamed from sys/dev/sfxge/common/hunt_vpd.c)13
-rw-r--r--sys/dev/sfxge/common/efsys.h41
-rw-r--r--sys/dev/sfxge/common/efx.h222
-rw-r--r--sys/dev/sfxge/common/efx_bootcfg.c2
-rw-r--r--sys/dev/sfxge/common/efx_check.h284
-rw-r--r--sys/dev/sfxge/common/efx_crc32.c2
-rw-r--r--sys/dev/sfxge/common/efx_ev.c219
-rw-r--r--sys/dev/sfxge/common/efx_filter.c677
-rw-r--r--sys/dev/sfxge/common/efx_hash.c2
-rw-r--r--sys/dev/sfxge/common/efx_impl.h236
-rw-r--r--sys/dev/sfxge/common/efx_intr.c141
-rw-r--r--sys/dev/sfxge/common/efx_lic.c1036
-rw-r--r--sys/dev/sfxge/common/efx_mac.c258
-rw-r--r--sys/dev/sfxge/common/efx_mcdi.c57
-rw-r--r--sys/dev/sfxge/common/efx_mcdi.h2
-rw-r--r--sys/dev/sfxge/common/efx_mon.c104
-rw-r--r--sys/dev/sfxge/common/efx_nic.c270
-rw-r--r--sys/dev/sfxge/common/efx_nvram.c97
-rw-r--r--sys/dev/sfxge/common/efx_phy.c343
-rw-r--r--sys/dev/sfxge/common/efx_phy_ids.h2
-rw-r--r--sys/dev/sfxge/common/efx_port.c12
-rw-r--r--sys/dev/sfxge/common/efx_regs.h2
-rw-r--r--sys/dev/sfxge/common/efx_regs_ef10.h2
-rw-r--r--sys/dev/sfxge/common/efx_regs_mcdi.h790
-rw-r--r--sys/dev/sfxge/common/efx_regs_pci.h2
-rw-r--r--sys/dev/sfxge/common/efx_rx.c174
-rw-r--r--sys/dev/sfxge/common/efx_sram.c9
-rw-r--r--sys/dev/sfxge/common/efx_tx.c172
-rw-r--r--sys/dev/sfxge/common/efx_types.h2
-rw-r--r--sys/dev/sfxge/common/efx_vpd.c61
-rw-r--r--sys/dev/sfxge/common/efx_wol.c2
-rw-r--r--sys/dev/sfxge/common/hunt_impl.h976
-rw-r--r--sys/dev/sfxge/common/hunt_nic.c1602
-rw-r--r--sys/dev/sfxge/common/hunt_phy.c483
-rw-r--r--sys/dev/sfxge/common/hunt_sram.c68
-rw-r--r--sys/dev/sfxge/common/mcdi_mon.c6
-rw-r--r--sys/dev/sfxge/common/mcdi_mon.h2
-rw-r--r--sys/dev/sfxge/common/medford_impl.h2
-rw-r--r--sys/dev/sfxge/common/medford_nic.c67
-rw-r--r--sys/dev/sfxge/common/siena_flash.h2
-rw-r--r--sys/dev/sfxge/common/siena_impl.h52
-rw-r--r--sys/dev/sfxge/common/siena_mac.c23
-rw-r--r--sys/dev/sfxge/common/siena_mcdi.c6
-rw-r--r--sys/dev/sfxge/common/siena_nic.c28
-rw-r--r--sys/dev/sfxge/common/siena_nvram.c2
-rw-r--r--sys/dev/sfxge/common/siena_phy.c43
-rw-r--r--sys/dev/sfxge/common/siena_sram.c2
-rw-r--r--sys/dev/sfxge/common/siena_vpd.c8
-rw-r--r--sys/dev/sfxge/sfxge.c7
-rw-r--r--sys/dev/sfxge/sfxge.h39
-rw-r--r--sys/dev/sfxge/sfxge_dma.c2
-rw-r--r--sys/dev/sfxge/sfxge_ev.c31
-rw-r--r--sys/dev/sfxge/sfxge_intr.c2
-rw-r--r--sys/dev/sfxge/sfxge_ioc.h2
-rw-r--r--sys/dev/sfxge/sfxge_mcdi.c2
-rw-r--r--sys/dev/sfxge/sfxge_nvram.c2
-rw-r--r--sys/dev/sfxge/sfxge_port.c2
-rw-r--r--sys/dev/sfxge/sfxge_rx.c9
-rw-r--r--sys/dev/sfxge/sfxge_rx.h20
-rw-r--r--sys/dev/sfxge/sfxge_tx.c2
-rw-r--r--sys/dev/sfxge/sfxge_tx.h2
-rw-r--r--sys/dev/sfxge/sfxge_version.h4
-rw-r--r--sys/dev/usb/net/uhso.c4
-rw-r--r--sys/dev/usb/usb_dev.c8
-rw-r--r--sys/dev/usb/usb_device.c27
-rw-r--r--sys/dev/usb/usb_device.h3
-rw-r--r--sys/fs/devfs/devfs_devs.c5
-rw-r--r--sys/fs/fuse/fuse_file.c34
-rw-r--r--sys/fs/fuse/fuse_file.h1
-rw-r--r--sys/fs/fuse/fuse_node.c2
-rw-r--r--sys/fs/fuse/fuse_vnops.c26
-rw-r--r--sys/fs/nfsserver/nfs_nfsdserv.c4
-rw-r--r--sys/i386/i386/vm_machdep.c8
-rw-r--r--sys/i386/linux/linux_proto.h6
-rw-r--r--sys/i386/linux/linux_systrace_args.c12
-rw-r--r--sys/i386/linux/syscalls.master6
-rw-r--r--sys/kern/kern_synch.c2
-rw-r--r--sys/kern/vfs_syscalls.c1
-rw-r--r--sys/kern/vfs_vnops.c2
-rw-r--r--sys/modules/Makefile2
-rw-r--r--sys/modules/ioat/Makefile15
-rw-r--r--sys/modules/sfxge/Makefile6
-rw-r--r--sys/net/if.c3
-rw-r--r--sys/netinet/ip_dummynet.h1
-rw-r--r--sys/netpfil/ipfw/ip_dn_io.c78
-rw-r--r--sys/netpfil/ipfw/ip_dummynet.c5
-rw-r--r--sys/netpfil/pf/pf_norm.c2
-rw-r--r--sys/nfs/bootp_subr.c94
-rw-r--r--sys/ofed/include/linux/etherdevice.h30
-rw-r--r--sys/powerpc/powerpc/exec_machdep.c6
-rw-r--r--sys/sparc64/sparc64/vm_machdep.c8
-rw-r--r--sys/sys/sysent.h4
-rw-r--r--sys/sys/vnode.h1
-rw-r--r--sys/ufs/ffs/ffs_inode.c2
-rw-r--r--sys/ufs/ffs/ffs_vfsops.c32
-rw-r--r--sys/ufs/ufs/ufs_lookup.c4
-rw-r--r--sys/ufs/ufs/ufs_vnops.c3
-rw-r--r--sys/vm/vm_glue.c2
-rw-r--r--sys/vm/vm_object.c21
-rw-r--r--sys/vm/vm_page.c7
161 files changed, 13632 insertions, 6503 deletions
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index aaa3741..751966f 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -414,13 +414,7 @@ cpu_set_syscall_retval(struct thread *td, int error)
break;
default:
- if (td->td_proc->p_sysent->sv_errsize) {
- if (error >= td->td_proc->p_sysent->sv_errsize)
- error = -1; /* XXX */
- else
- error = td->td_proc->p_sysent->sv_errtbl[error];
- }
- td->td_frame->tf_rax = error;
+ td->td_frame->tf_rax = SV_ABI_ERRNO(td->td_proc, error);
td->td_frame->tf_rflags |= PSL_C;
break;
}
diff --git a/sys/amd64/linux/linux_proto.h b/sys/amd64/linux/linux_proto.h
index 0a0cbe6..4946805 100644
--- a/sys/amd64/linux/linux_proto.h
+++ b/sys/amd64/linux/linux_proto.h
@@ -524,16 +524,16 @@ struct linux_getpriority_args {
};
struct linux_sched_setparam_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_getparam_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_setscheduler_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
char policy_l_[PADL_(l_int)]; l_int policy; char policy_r_[PADR_(l_int)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_getscheduler_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
diff --git a/sys/amd64/linux/linux_systrace_args.c b/sys/amd64/linux/linux_systrace_args.c
index d649eed..5dcdd55 100644
--- a/sys/amd64/linux/linux_systrace_args.c
+++ b/sys/amd64/linux/linux_systrace_args.c
@@ -1178,7 +1178,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 142: {
struct linux_sched_setparam_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
- uarg[1] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[1] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 2;
break;
}
@@ -1186,7 +1186,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 143: {
struct linux_sched_getparam_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
- uarg[1] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[1] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 2;
break;
}
@@ -1195,7 +1195,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
struct linux_sched_setscheduler_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
iarg[1] = p->policy; /* l_int */
- uarg[2] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[2] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 3;
break;
}
@@ -4209,7 +4209,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_pid_t";
break;
case 1:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
@@ -4222,7 +4222,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_pid_t";
break;
case 1:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
@@ -4238,7 +4238,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_int";
break;
case 2:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
diff --git a/sys/amd64/linux/syscalls.master b/sys/amd64/linux/syscalls.master
index 87c8000..50ddd92 100644
--- a/sys/amd64/linux/syscalls.master
+++ b/sys/amd64/linux/syscalls.master
@@ -283,12 +283,12 @@
141 AUE_SETPRIORITY NOPROTO { int setpriority(int which, int who, \
int prio); }
142 AUE_SCHED_SETPARAM STD { int linux_sched_setparam(l_pid_t pid, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
143 AUE_SCHED_GETPARAM STD { int linux_sched_getparam(l_pid_t pid, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
144 AUE_SCHED_SETSCHEDULER STD { int linux_sched_setscheduler( \
l_pid_t pid, l_int policy, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
145 AUE_SCHED_GETSCHEDULER STD { int linux_sched_getscheduler( \
l_pid_t pid); }
146 AUE_SCHED_GET_PRIORITY_MAX STD { int linux_sched_get_priority_max( \
diff --git a/sys/amd64/linux32/linux32_proto.h b/sys/amd64/linux32/linux32_proto.h
index 0f98b94..a960c09 100644
--- a/sys/amd64/linux32/linux32_proto.h
+++ b/sys/amd64/linux32/linux32_proto.h
@@ -480,16 +480,16 @@ struct linux_sysctl_args {
};
struct linux_sched_setparam_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_getparam_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_setscheduler_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
char policy_l_[PADL_(l_int)]; l_int policy; char policy_r_[PADR_(l_int)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_getscheduler_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
diff --git a/sys/amd64/linux32/linux32_systrace_args.c b/sys/amd64/linux32/linux32_systrace_args.c
index fc4c89a..cabfab7 100644
--- a/sys/amd64/linux32/linux32_systrace_args.c
+++ b/sys/amd64/linux32/linux32_systrace_args.c
@@ -1047,7 +1047,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 154: {
struct linux_sched_setparam_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
- uarg[1] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[1] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 2;
break;
}
@@ -1055,7 +1055,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 155: {
struct linux_sched_getparam_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
- uarg[1] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[1] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 2;
break;
}
@@ -1064,7 +1064,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
struct linux_sched_setscheduler_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
iarg[1] = p->policy; /* l_int */
- uarg[2] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[2] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 3;
break;
}
@@ -3938,7 +3938,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_pid_t";
break;
case 1:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
@@ -3951,7 +3951,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_pid_t";
break;
case 1:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
@@ -3967,7 +3967,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_int";
break;
case 2:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
diff --git a/sys/amd64/linux32/syscalls.master b/sys/amd64/linux32/syscalls.master
index e40247e..79cd2c8 100644
--- a/sys/amd64/linux32/syscalls.master
+++ b/sys/amd64/linux32/syscalls.master
@@ -268,12 +268,12 @@
152 AUE_MLOCKALL NOPROTO { int mlockall(int how); }
153 AUE_MUNLOCKALL NOPROTO { int munlockall(void); }
154 AUE_SCHED_SETPARAM STD { int linux_sched_setparam(l_pid_t pid, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
155 AUE_SCHED_GETPARAM STD { int linux_sched_getparam(l_pid_t pid, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
156 AUE_SCHED_SETSCHEDULER STD { int linux_sched_setscheduler( \
l_pid_t pid, l_int policy, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
157 AUE_SCHED_GETSCHEDULER STD { int linux_sched_getscheduler( \
l_pid_t pid); }
158 AUE_NULL NOPROTO { int sched_yield(void); }
diff --git a/sys/boot/common/dev_net.c b/sys/boot/common/dev_net.c
index 091ed02..d9d64ca 100644
--- a/sys/boot/common/dev_net.c
+++ b/sys/boot/common/dev_net.c
@@ -171,6 +171,12 @@ net_open(struct open_file *f, ...)
setenv("boot.netif.gateway", inet_ntoa(gateip), 1);
setenv("boot.nfsroot.server", inet_ntoa(rootip), 1);
setenv("boot.nfsroot.path", rootpath, 1);
+ if (intf_mtu != 0) {
+ char mtu[16];
+ sprintf(mtu, "%u", intf_mtu);
+ setenv("boot.netif.mtu", mtu, 1);
+ }
+
}
netdev_opens++;
f->f_devdata = &netdev_sock;
diff --git a/sys/boot/i386/libi386/pxe.c b/sys/boot/i386/libi386/pxe.c
index 49814dd..e57e0af 100644
--- a/sys/boot/i386/libi386/pxe.c
+++ b/sys/boot/i386/libi386/pxe.c
@@ -312,6 +312,11 @@ pxe_open(struct open_file *f, ...)
sprintf(temp, "%6D", bootplayer.CAddr, ":");
setenv("boot.netif.hwaddr", temp, 1);
}
+ if (intf_mtu != 0) {
+ char mtu[16];
+ sprintf(mtu, "%u", intf_mtu);
+ setenv("boot.netif.mtu", mtu, 1);
+ }
setenv("boot.nfsroot.server", inet_ntoa(rootip), 1);
setenv("boot.nfsroot.path", rootpath, 1);
setenv("dhcp.host-name", hostname, 1);
diff --git a/sys/boot/libstand32/Makefile b/sys/boot/libstand32/Makefile
index 4ee8690..c2bb701 100644
--- a/sys/boot/libstand32/Makefile
+++ b/sys/boot/libstand32/Makefile
@@ -45,7 +45,7 @@ CFLAGS+= -G0 -fno-pic -mno-abicalls
.endif
# standalone components and stuff we have modified locally
-SRCS+= gzguts.h zutil.h __main.c assert.c bcd.c bswap.c environment.c getopt.c gets.c \
+SRCS+= gzguts.h zutil.h __main.c assert.c bcd.c environment.c getopt.c gets.c \
globals.c pager.c printf.c strdup.c strerror.c strtol.c strtoul.c random.c \
sbrk.c twiddle.c zalloc.c zalloc_malloc.c
diff --git a/sys/boot/userboot/libstand/Makefile b/sys/boot/userboot/libstand/Makefile
index 4ef62f3..53d6e85 100644
--- a/sys/boot/userboot/libstand/Makefile
+++ b/sys/boot/userboot/libstand/Makefile
@@ -42,7 +42,7 @@ CFLAGS+= -msoft-float -D_STANDALONE
.endif
# standalone components and stuff we have modified locally
-SRCS+= gzguts.h zutil.h __main.c assert.c bcd.c bswap.c environment.c getopt.c gets.c \
+SRCS+= gzguts.h zutil.h __main.c assert.c bcd.c environment.c getopt.c gets.c \
globals.c pager.c printf.c strdup.c strerror.c strtol.c random.c \
sbrk.c twiddle.c zalloc.c zalloc_malloc.c
diff --git a/sys/cam/ctl/ctl_tpc.c b/sys/cam/ctl/ctl_tpc.c
index d21b5e3..bebaccf 100644
--- a/sys/cam/ctl/ctl_tpc.c
+++ b/sys/cam/ctl/ctl_tpc.c
@@ -1104,6 +1104,42 @@ tpc_ranges_length(struct scsi_range_desc *range, int nrange)
}
static int
+tpc_check_ranges_l(struct scsi_range_desc *range, int nrange, uint64_t maxlba)
+{
+ uint64_t b1;
+ uint32_t l1;
+ int i;
+
+ for (i = 0; i < nrange; i++) {
+ b1 = scsi_8btou64(range[i].lba);
+ l1 = scsi_4btoul(range[i].length);
+ if (b1 + l1 < b1 || b1 + l1 > maxlba + 1)
+ return (-1);
+ }
+ return (0);
+}
+
+static int
+tpc_check_ranges_x(struct scsi_range_desc *range, int nrange)
+{
+ uint64_t b1, b2;
+ uint32_t l1, l2;
+ int i, j;
+
+ for (i = 0; i < nrange - 1; i++) {
+ b1 = scsi_8btou64(range[i].lba);
+ l1 = scsi_4btoul(range[i].length);
+ for (j = i + 1; j < nrange; j++) {
+ b2 = scsi_8btou64(range[j].lba);
+ l2 = scsi_4btoul(range[j].length);
+ if (b1 + l1 > b2 && b2 + l2 > b1)
+ return (-1);
+ }
+ }
+ return (0);
+}
+
+static int
tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip,
int *srange, off_t *soffset)
{
@@ -1916,7 +1952,7 @@ ctl_populate_token(struct ctl_scsiio *ctsio)
struct ctl_port *port;
struct tpc_list *list, *tlist;
struct tpc_token *token;
- int len, lendesc;
+ int len, lendata, lendesc;
CTL_DEBUG_PRINT(("ctl_populate_token\n"));
@@ -1953,10 +1989,19 @@ ctl_populate_token(struct ctl_scsiio *ctsio)
}
data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr;
+ lendata = scsi_2btoul(data->length);
+ if (lendata < sizeof(struct scsi_populate_token_data) - 2 +
+ sizeof(struct scsi_range_desc)) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
+ /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
+ goto done;
+ }
lendesc = scsi_2btoul(data->range_descriptor_length);
- if (len < sizeof(struct scsi_populate_token_data) + lendesc) {
+ if (lendesc < sizeof(struct scsi_range_desc) ||
+ len < sizeof(struct scsi_populate_token_data) + lendesc ||
+ lendata < sizeof(struct scsi_populate_token_data) - 2 + lendesc) {
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
- /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
+ /*field*/ 14, /*bit_valid*/ 0, /*bit*/ 0);
goto done;
}
/*
@@ -1966,6 +2011,16 @@ ctl_populate_token(struct ctl_scsiio *ctsio)
scsi_4btoul(data->rod_type),
scsi_2btoul(data->range_descriptor_length));
*/
+
+ /* Validate INACTIVITY TIMEOUT field */
+ if (scsi_4btoul(data->inactivity_timeout) > TPC_MAX_TOKEN_TIMEOUT) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+ /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0,
+ /*bit*/ 0);
+ goto done;
+ }
+
+ /* Validate ROD TYPE field */
if ((data->flags & EC_PT_RTV) &&
scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) {
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
@@ -1973,6 +2028,23 @@ ctl_populate_token(struct ctl_scsiio *ctsio)
goto done;
}
+ /* Validate list of ranges */
+ if (tpc_check_ranges_l(&data->desc[0],
+ scsi_2btoul(data->range_descriptor_length) /
+ sizeof(struct scsi_range_desc),
+ lun->be_lun->maxlba) != 0) {
+ ctl_set_lba_out_of_range(ctsio);
+ goto done;
+ }
+ if (tpc_check_ranges_x(&data->desc[0],
+ scsi_2btoul(data->range_descriptor_length) /
+ sizeof(struct scsi_range_desc)) != 0) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
+ /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
+ /*bit*/ 0);
+ goto done;
+ }
+
list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
list->service_action = cdb->service_action;
list->init_port = ctsio->io_hdr.nexus.targ_port;
@@ -2016,11 +2088,6 @@ ctl_populate_token(struct ctl_scsiio *ctsio)
token->timeout = TPC_DFL_TOKEN_TIMEOUT;
else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT)
token->timeout = TPC_MIN_TOKEN_TIMEOUT;
- else if (token->timeout > TPC_MAX_TOKEN_TIMEOUT) {
- ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
- /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0,
- /*bit*/ 0);
- }
memcpy(list->res_token, token->token, sizeof(list->res_token));
list->res_token_valid = 1;
list->curseg = 0;
@@ -2051,7 +2118,7 @@ ctl_write_using_token(struct ctl_scsiio *ctsio)
struct ctl_lun *lun;
struct tpc_list *list, *tlist;
struct tpc_token *token;
- int len, lendesc;
+ int len, lendata, lendesc;
CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
@@ -2060,8 +2127,8 @@ ctl_write_using_token(struct ctl_scsiio *ctsio)
cdb = (struct scsi_write_using_token *)ctsio->cdb;
len = scsi_4btoul(cdb->length);
- if (len < sizeof(struct scsi_populate_token_data) ||
- len > sizeof(struct scsi_populate_token_data) +
+ if (len < sizeof(struct scsi_write_using_token_data) ||
+ len > sizeof(struct scsi_write_using_token_data) +
TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
/*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
@@ -2087,10 +2154,19 @@ ctl_write_using_token(struct ctl_scsiio *ctsio)
}
data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr;
+ lendata = scsi_2btoul(data->length);
+ if (lendata < sizeof(struct scsi_write_using_token_data) - 2 +
+ sizeof(struct scsi_range_desc)) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
+ /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
+ goto done;
+ }
lendesc = scsi_2btoul(data->range_descriptor_length);
- if (len < sizeof(struct scsi_populate_token_data) + lendesc) {
+ if (lendesc < sizeof(struct scsi_range_desc) ||
+ len < sizeof(struct scsi_write_using_token_data) + lendesc ||
+ lendata < sizeof(struct scsi_write_using_token_data) - 2 + lendesc) {
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
- /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
+ /*field*/ 534, /*bit_valid*/ 0, /*bit*/ 0);
goto done;
}
/*
@@ -2099,6 +2175,24 @@ ctl_write_using_token(struct ctl_scsiio *ctsio)
data->flags, scsi_8btou64(data->offset_into_rod),
scsi_2btoul(data->range_descriptor_length));
*/
+
+ /* Validate list of ranges */
+ if (tpc_check_ranges_l(&data->desc[0],
+ scsi_2btoul(data->range_descriptor_length) /
+ sizeof(struct scsi_range_desc),
+ lun->be_lun->maxlba) != 0) {
+ ctl_set_lba_out_of_range(ctsio);
+ goto done;
+ }
+ if (tpc_check_ranges_x(&data->desc[0],
+ scsi_2btoul(data->range_descriptor_length) /
+ sizeof(struct scsi_range_desc)) != 0) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
+ /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
+ /*bit*/ 0);
+ goto done;
+ }
+
list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
list->service_action = cdb->service_action;
list->init_port = ctsio->io_hdr.nexus.targ_port;
diff --git a/sys/cam/scsi/scsi_enc_safte.c b/sys/cam/scsi/scsi_enc_safte.c
index 8282d01..8b2a592 100644
--- a/sys/cam/scsi/scsi_enc_safte.c
+++ b/sys/cam/scsi/scsi_enc_safte.c
@@ -292,11 +292,8 @@ safte_process_config(enc_softc_t *enc, struct enc_fsm_state *state,
cfg->DoorLock + cfg->Ntherm + cfg->Nspkrs + cfg->Ntstats + 1;
ENC_FREE_AND_NULL(enc->enc_cache.elm_map);
enc->enc_cache.elm_map =
- ENC_MALLOCZ(enc->enc_cache.nelms * sizeof(enc_element_t));
- if (enc->enc_cache.elm_map == NULL) {
- enc->enc_cache.nelms = 0;
- return (ENOMEM);
- }
+ malloc(enc->enc_cache.nelms * sizeof(enc_element_t),
+ M_SCSIENC, M_WAITOK|M_ZERO);
r = 0;
/*
diff --git a/sys/cam/scsi/scsi_enc_ses.c b/sys/cam/scsi/scsi_enc_ses.c
index 34b0238..8387ef2 100644
--- a/sys/cam/scsi/scsi_enc_ses.c
+++ b/sys/cam/scsi/scsi_enc_ses.c
@@ -715,13 +715,15 @@ ses_cache_clone(enc_softc_t *enc, enc_cache_t *src, enc_cache_t *dst)
* The element map is independent even though it starts out
* pointing to the same constant page data.
*/
- dst->elm_map = ENC_MALLOCZ(dst->nelms * sizeof(enc_element_t));
+ dst->elm_map = malloc(dst->nelms * sizeof(enc_element_t),
+ M_SCSIENC, M_WAITOK);
memcpy(dst->elm_map, src->elm_map, dst->nelms * sizeof(enc_element_t));
for (dst_elm = dst->elm_map, src_elm = src->elm_map,
last_elm = &src->elm_map[src->nelms];
src_elm != last_elm; src_elm++, dst_elm++) {
- dst_elm->elm_private = ENC_MALLOCZ(sizeof(ses_element_t));
+ dst_elm->elm_private = malloc(sizeof(ses_element_t),
+ M_SCSIENC, M_WAITOK);
memcpy(dst_elm->elm_private, src_elm->elm_private,
sizeof(ses_element_t));
}
@@ -1066,11 +1068,7 @@ ses_set_physpath(enc_softc_t *enc, enc_element_t *elm,
cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
cdai.buftype = CDAI_TYPE_SCSI_DEVID;
cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
- cdai.buf = devid = ENC_MALLOCZ(cdai.bufsiz);
- if (devid == NULL) {
- ret = ENOMEM;
- goto out;
- }
+ cdai.buf = devid = malloc(cdai.bufsiz, M_SCSIENC, M_WAITOK|M_ZERO);
cam_periph_lock(enc->periph);
xpt_action((union ccb *)&cdai);
if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
@@ -1371,12 +1369,8 @@ ses_process_config(enc_softc_t *enc, struct enc_fsm_state *state,
* Now waltz through all the subenclosures summing the number of
* types available in each.
*/
- subencs = ENC_MALLOCZ(ses_cfg_page_get_num_subenc(cfg_page)
- * sizeof(*subencs));
- if (subencs == NULL) {
- err = ENOMEM;
- goto out;
- }
+ subencs = malloc(ses_cfg_page_get_num_subenc(cfg_page)
+ * sizeof(*subencs), M_SCSIENC, M_WAITOK|M_ZERO);
/*
* Sub-enclosure data is const after construction (i.e. when
* accessed via our cache object.
@@ -1414,11 +1408,8 @@ ses_process_config(enc_softc_t *enc, struct enc_fsm_state *state,
}
/* Process the type headers. */
- ses_types = ENC_MALLOCZ(ntype * sizeof(*ses_types));
- if (ses_types == NULL) {
- err = ENOMEM;
- goto out;
- }
+ ses_types = malloc(ntype * sizeof(*ses_types),
+ M_SCSIENC, M_WAITOK|M_ZERO);
/*
* Type data is const after construction (i.e. when accessed via
* our cache object.
@@ -1455,11 +1446,8 @@ ses_process_config(enc_softc_t *enc, struct enc_fsm_state *state,
}
/* Create the object map. */
- enc_cache->elm_map = ENC_MALLOCZ(nelm * sizeof(enc_element_t));
- if (enc_cache->elm_map == NULL) {
- err = ENOMEM;
- goto out;
- }
+ enc_cache->elm_map = malloc(nelm * sizeof(enc_element_t),
+ M_SCSIENC, M_WAITOK|M_ZERO);
enc_cache->nelms = nelm;
ses_iter_init(enc, enc_cache, &iter);
@@ -1473,11 +1461,8 @@ ses_process_config(enc_softc_t *enc, struct enc_fsm_state *state,
element->subenclosure = thdr->etype_subenc;
element->enctype = thdr->etype_elm_type;
element->overall_status_elem = iter.type_element_index == 0;
- element->elm_private = ENC_MALLOCZ(sizeof(ses_element_t));
- if (element->elm_private == NULL) {
- err = ENOMEM;
- goto out;
- }
+ element->elm_private = malloc(sizeof(ses_element_t),
+ M_SCSIENC, M_WAITOK|M_ZERO);
ENC_DLOG(enc, "%s: creating elmpriv %d(%d,%d) subenc %d "
"type 0x%x\n", __func__, iter.global_element_index,
iter.type_index, iter.type_element_index,
diff --git a/sys/compat/linux/linux_file.c b/sys/compat/linux/linux_file.c
index 1a47196..e371985 100644
--- a/sys/compat/linux/linux_file.c
+++ b/sys/compat/linux/linux_file.c
@@ -69,108 +69,106 @@ __FBSDID("$FreeBSD$");
int
linux_creat(struct thread *td, struct linux_creat_args *args)
{
- char *path;
- int error;
-
- LCONVPATHEXIST(td, args->path, &path);
+ char *path;
+ int error;
+ LCONVPATHEXIST(td, args->path, &path);
#ifdef DEBUG
if (ldebug(creat))
printf(ARGS(creat, "%s, %d"), path, args->mode);
#endif
- error = kern_open(td, path, UIO_SYSSPACE, O_WRONLY | O_CREAT | O_TRUNC,
- args->mode);
- LFREEPATH(path);
- return (error);
+ error = kern_openat(td, AT_FDCWD, path, UIO_SYSSPACE,
+ O_WRONLY | O_CREAT | O_TRUNC, args->mode);
+ LFREEPATH(path);
+ return (error);
}
static int
linux_common_open(struct thread *td, int dirfd, char *path, int l_flags, int mode)
{
- cap_rights_t rights;
- struct proc *p = td->td_proc;
- struct file *fp;
- int fd;
- int bsd_flags, error;
-
- bsd_flags = 0;
- switch (l_flags & LINUX_O_ACCMODE) {
- case LINUX_O_WRONLY:
- bsd_flags |= O_WRONLY;
- break;
- case LINUX_O_RDWR:
- bsd_flags |= O_RDWR;
- break;
- default:
- bsd_flags |= O_RDONLY;
- }
- if (l_flags & LINUX_O_NDELAY)
- bsd_flags |= O_NONBLOCK;
- if (l_flags & LINUX_O_APPEND)
- bsd_flags |= O_APPEND;
- if (l_flags & LINUX_O_SYNC)
- bsd_flags |= O_FSYNC;
- if (l_flags & LINUX_O_NONBLOCK)
- bsd_flags |= O_NONBLOCK;
- if (l_flags & LINUX_FASYNC)
- bsd_flags |= O_ASYNC;
- if (l_flags & LINUX_O_CREAT)
- bsd_flags |= O_CREAT;
- if (l_flags & LINUX_O_TRUNC)
- bsd_flags |= O_TRUNC;
- if (l_flags & LINUX_O_EXCL)
- bsd_flags |= O_EXCL;
- if (l_flags & LINUX_O_NOCTTY)
- bsd_flags |= O_NOCTTY;
- if (l_flags & LINUX_O_DIRECT)
- bsd_flags |= O_DIRECT;
- if (l_flags & LINUX_O_NOFOLLOW)
- bsd_flags |= O_NOFOLLOW;
- if (l_flags & LINUX_O_DIRECTORY)
- bsd_flags |= O_DIRECTORY;
- /* XXX LINUX_O_NOATIME: unable to be easily implemented. */
-
- error = kern_openat(td, dirfd, path, UIO_SYSSPACE, bsd_flags, mode);
- if (error != 0)
- goto done;
-
- if (bsd_flags & O_NOCTTY)
- goto done;
-
- /*
- * XXX In between kern_open() and fget(), another process
- * having the same filedesc could use that fd without
- * checking below.
- */
- fd = td->td_retval[0];
- if (fget(td, fd, cap_rights_init(&rights, CAP_IOCTL), &fp) == 0) {
- if (fp->f_type != DTYPE_VNODE) {
- fdrop(fp, td);
- goto done;
- }
- sx_slock(&proctree_lock);
- PROC_LOCK(p);
- if (SESS_LEADER(p) && !(p->p_flag & P_CONTROLT)) {
- PROC_UNLOCK(p);
- sx_sunlock(&proctree_lock);
- /* XXXPJD: Verify if TIOCSCTTY is allowed. */
- (void) fo_ioctl(fp, TIOCSCTTY, (caddr_t) 0,
- td->td_ucred, td);
- } else {
- PROC_UNLOCK(p);
- sx_sunlock(&proctree_lock);
- }
- fdrop(fp, td);
- }
+ cap_rights_t rights;
+ struct proc *p = td->td_proc;
+ struct file *fp;
+ int fd;
+ int bsd_flags, error;
+
+ bsd_flags = 0;
+ switch (l_flags & LINUX_O_ACCMODE) {
+ case LINUX_O_WRONLY:
+ bsd_flags |= O_WRONLY;
+ break;
+ case LINUX_O_RDWR:
+ bsd_flags |= O_RDWR;
+ break;
+ default:
+ bsd_flags |= O_RDONLY;
+ }
+ if (l_flags & LINUX_O_NDELAY)
+ bsd_flags |= O_NONBLOCK;
+ if (l_flags & LINUX_O_APPEND)
+ bsd_flags |= O_APPEND;
+ if (l_flags & LINUX_O_SYNC)
+ bsd_flags |= O_FSYNC;
+ if (l_flags & LINUX_O_NONBLOCK)
+ bsd_flags |= O_NONBLOCK;
+ if (l_flags & LINUX_FASYNC)
+ bsd_flags |= O_ASYNC;
+ if (l_flags & LINUX_O_CREAT)
+ bsd_flags |= O_CREAT;
+ if (l_flags & LINUX_O_TRUNC)
+ bsd_flags |= O_TRUNC;
+ if (l_flags & LINUX_O_EXCL)
+ bsd_flags |= O_EXCL;
+ if (l_flags & LINUX_O_NOCTTY)
+ bsd_flags |= O_NOCTTY;
+ if (l_flags & LINUX_O_DIRECT)
+ bsd_flags |= O_DIRECT;
+ if (l_flags & LINUX_O_NOFOLLOW)
+ bsd_flags |= O_NOFOLLOW;
+ if (l_flags & LINUX_O_DIRECTORY)
+ bsd_flags |= O_DIRECTORY;
+ /* XXX LINUX_O_NOATIME: unable to be easily implemented. */
+
+ error = kern_openat(td, dirfd, path, UIO_SYSSPACE, bsd_flags, mode);
+ if (error != 0)
+ goto done;
+ if (bsd_flags & O_NOCTTY)
+ goto done;
+
+ /*
+ * XXX In between kern_open() and fget(), another process
+ * having the same filedesc could use that fd without
+ * checking below.
+ */
+ fd = td->td_retval[0];
+ if (fget(td, fd, cap_rights_init(&rights, CAP_IOCTL), &fp) == 0) {
+ if (fp->f_type != DTYPE_VNODE) {
+ fdrop(fp, td);
+ goto done;
+ }
+ sx_slock(&proctree_lock);
+ PROC_LOCK(p);
+ if (SESS_LEADER(p) && !(p->p_flag & P_CONTROLT)) {
+ PROC_UNLOCK(p);
+ sx_sunlock(&proctree_lock);
+ /* XXXPJD: Verify if TIOCSCTTY is allowed. */
+ (void) fo_ioctl(fp, TIOCSCTTY, (caddr_t) 0,
+ td->td_ucred, td);
+ } else {
+ PROC_UNLOCK(p);
+ sx_sunlock(&proctree_lock);
+ }
+ fdrop(fp, td);
+ }
done:
#ifdef DEBUG
- if (ldebug(open))
- printf(LMSG("open returns error %d"), error);
+ if (ldebug(open))
+ printf(LMSG("open returns error %d"), error);
#endif
- LFREEPATH(path);
- return (error);
+ LFREEPATH(path);
+ return (error);
}
int
@@ -195,44 +193,41 @@ linux_openat(struct thread *td, struct linux_openat_args *args)
int
linux_open(struct thread *td, struct linux_open_args *args)
{
- char *path;
-
- if (args->flags & LINUX_O_CREAT)
- LCONVPATHCREAT(td, args->path, &path);
- else
- LCONVPATHEXIST(td, args->path, &path);
+ char *path;
+ if (args->flags & LINUX_O_CREAT)
+ LCONVPATHCREAT(td, args->path, &path);
+ else
+ LCONVPATHEXIST(td, args->path, &path);
#ifdef DEBUG
if (ldebug(open))
printf(ARGS(open, "%s, 0x%x, 0x%x"),
path, args->flags, args->mode);
#endif
-
return (linux_common_open(td, AT_FDCWD, path, args->flags, args->mode));
}
int
linux_lseek(struct thread *td, struct linux_lseek_args *args)
{
-
- struct lseek_args /* {
- int fd;
- int pad;
- off_t offset;
- int whence;
- } */ tmp_args;
- int error;
+ struct lseek_args /* {
+ int fd;
+ int pad;
+ off_t offset;
+ int whence;
+ } */ tmp_args;
+ int error;
#ifdef DEBUG
if (ldebug(lseek))
printf(ARGS(lseek, "%d, %ld, %d"),
args->fdes, (long)args->off, args->whence);
#endif
- tmp_args.fd = args->fdes;
- tmp_args.offset = (off_t)args->off;
- tmp_args.whence = args->whence;
- error = sys_lseek(td, &tmp_args);
- return error;
+ tmp_args.fd = args->fdes;
+ tmp_args.offset = (off_t)args->off;
+ tmp_args.whence = args->whence;
+ error = sys_lseek(td, &tmp_args);
+ return (error);
}
#if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
@@ -255,13 +250,13 @@ linux_llseek(struct thread *td, struct linux_llseek_args *args)
bsd_args.whence = args->whence;
if ((error = sys_lseek(td, &bsd_args)))
- return error;
+ return (error);
if ((error = copyout(td->td_retval, args->res, sizeof (off_t))))
- return error;
+ return (error);
td->td_retval[0] = 0;
- return 0;
+ return (0);
}
int
@@ -272,7 +267,7 @@ linux_readdir(struct thread *td, struct linux_readdir_args *args)
lda.fd = args->fd;
lda.dent = args->dent;
lda.count = 1;
- return linux_getdents(td, &lda);
+ return (linux_getdents(td, &lda));
}
#endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
@@ -945,7 +940,7 @@ linux_ftruncate(struct thread *td, struct linux_ftruncate_args *args)
int pad;
off_t length;
} */ nuap;
-
+
nuap.fd = args->fd;
nuap.length = args->length;
return (sys_ftruncate(td, &nuap));
@@ -1016,7 +1011,7 @@ linux_fdatasync(td, uap)
struct fsync_args bsd;
bsd.fd = uap->fd;
- return sys_fsync(td, &bsd);
+ return (sys_fsync(td, &bsd));
}
int
@@ -1033,9 +1028,7 @@ linux_pread(td, uap)
bsd.buf = uap->buf;
bsd.nbyte = uap->nbyte;
bsd.offset = uap->offset;
-
error = sys_pread(td, &bsd);
-
if (error == 0) {
/* This seems to violate POSIX but linux does it */
error = fgetvp(td, uap->fd,
@@ -1048,7 +1041,6 @@ linux_pread(td, uap)
}
vrele(vp);
}
-
return (error);
}
@@ -1063,7 +1055,7 @@ linux_pwrite(td, uap)
bsd.buf = uap->buf;
bsd.nbyte = uap->nbyte;
bsd.offset = uap->offset;
- return sys_pwrite(td, &bsd);
+ return (sys_pwrite(td, &bsd));
}
int
diff --git a/sys/compat/linux/linux_futex.c b/sys/compat/linux/linux_futex.c
index c7688ae..14d2d528 100644
--- a/sys/compat/linux/linux_futex.c
+++ b/sys/compat/linux/linux_futex.c
@@ -1,7 +1,9 @@
/* $NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $ */
/*-
- * Copyright (c) 2005 Emmanuel Dreyfus, all rights reserved.
+ * Copyright (c) 2009-2016 Dmitry Chagin
+ * Copyright (c) 2005 Emmanuel Dreyfus
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -53,9 +55,10 @@ __KERNEL_RCSID(1, "$NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $")
#include <sys/queue.h>
#include <sys/sched.h>
#include <sys/sdt.h>
-#include <sys/sx.h>
#include <sys/umtx.h>
+#include <vm/vm_extern.h>
+
#ifdef COMPAT_LINUX32
#include <machine/../linux32/linux.h>
#include <machine/../linux32/linux32_proto.h>
@@ -107,7 +110,7 @@ LIN_SDT_PROBE_DEFINE3(futex, futex_get, entry, "uint32_t *",
LIN_SDT_PROBE_DEFINE0(futex, futex_get, error);
LIN_SDT_PROBE_DEFINE1(futex, futex_get, return, "int");
LIN_SDT_PROBE_DEFINE3(futex, futex_sleep, entry, "struct futex *",
- "struct waiting_proc **", "int");
+ "struct waiting_proc **", "struct timespec *");
LIN_SDT_PROBE_DEFINE5(futex, futex_sleep, requeue_error, "int", "uint32_t *",
"struct waiting_proc *", "uint32_t *", "uint32_t");
LIN_SDT_PROBE_DEFINE3(futex, futex_sleep, sleep_error, "int", "uint32_t *",
@@ -126,7 +129,7 @@ LIN_SDT_PROBE_DEFINE3(futex, futex_requeue, requeue, "uint32_t *",
"struct waiting_proc *", "uint32_t");
LIN_SDT_PROBE_DEFINE1(futex, futex_requeue, return, "int");
LIN_SDT_PROBE_DEFINE4(futex, futex_wait, entry, "struct futex *",
- "struct waiting_proc **", "int", "uint32_t");
+ "struct waiting_proc **", "struct timespec *", "uint32_t");
LIN_SDT_PROBE_DEFINE1(futex, futex_wait, sleep_error, "int");
LIN_SDT_PROBE_DEFINE1(futex, futex_wait, return, "int");
LIN_SDT_PROBE_DEFINE3(futex, futex_atomic_op, entry, "struct thread *",
@@ -140,7 +143,6 @@ LIN_SDT_PROBE_DEFINE1(futex, futex_atomic_op, return, "int");
LIN_SDT_PROBE_DEFINE2(futex, linux_sys_futex, entry, "struct thread *",
"struct linux_sys_futex_args *");
LIN_SDT_PROBE_DEFINE0(futex, linux_sys_futex, unimplemented_clockswitch);
-LIN_SDT_PROBE_DEFINE1(futex, linux_sys_futex, itimerfix_error, "int");
LIN_SDT_PROBE_DEFINE1(futex, linux_sys_futex, copyin_error, "int");
LIN_SDT_PROBE_DEFINE0(futex, linux_sys_futex, invalid_cmp_requeue_use);
LIN_SDT_PROBE_DEFINE3(futex, linux_sys_futex, debug_wait, "uint32_t *",
@@ -195,7 +197,7 @@ struct waiting_proc {
};
struct futex {
- struct sx f_lck;
+ struct mtx f_lck;
uint32_t *f_uaddr; /* user-supplied value, for debug */
struct umtx_key f_key;
uint32_t f_refcount;
@@ -206,20 +208,22 @@ struct futex {
struct futex_list futex_list;
-#define FUTEX_LOCK(f) sx_xlock(&(f)->f_lck)
-#define FUTEX_UNLOCK(f) sx_xunlock(&(f)->f_lck)
+#define FUTEX_LOCK(f) mtx_lock(&(f)->f_lck)
+#define FUTEX_LOCKED(f) mtx_owned(&(f)->f_lck)
+#define FUTEX_UNLOCK(f) mtx_unlock(&(f)->f_lck)
#define FUTEX_INIT(f) do { \
- sx_init_flags(&(f)->f_lck, "ftlk", \
- SX_DUPOK); \
+ mtx_init(&(f)->f_lck, "ftlk", NULL, \
+ MTX_DUPOK); \
LIN_SDT_PROBE1(futex, futex, create, \
&(f)->f_lck); \
} while (0)
#define FUTEX_DESTROY(f) do { \
LIN_SDT_PROBE1(futex, futex, destroy, \
&(f)->f_lck); \
- sx_destroy(&(f)->f_lck); \
+ mtx_destroy(&(f)->f_lck); \
} while (0)
-#define FUTEX_ASSERT_LOCKED(f) sx_assert(&(f)->f_lck, SA_XLOCKED)
+#define FUTEX_ASSERT_LOCKED(f) mtx_assert(&(f)->f_lck, MA_OWNED)
+#define FUTEX_ASSERT_UNLOCKED(f) mtx_assert(&(f)->f_lck, MA_NOTOWNED)
struct mtx futex_mtx; /* protects the futex list */
#define FUTEXES_LOCK do { \
@@ -238,6 +242,7 @@ struct mtx futex_mtx; /* protects the futex list */
#define FUTEX_DONTCREATE 0x2 /* don't create futex if not exists */
#define FUTEX_DONTEXISTS 0x4 /* return EINVAL if futex exists */
#define FUTEX_SHARED 0x8 /* shared futex */
+#define FUTEX_DONTLOCK 0x10 /* don't lock futex */
/* wp_flags */
#define FUTEX_WP_REQUEUED 0x1 /* wp requeued - wp moved from wp_list
@@ -252,11 +257,15 @@ static void futex_put(struct futex *, struct waiting_proc *);
static int futex_get0(uint32_t *, struct futex **f, uint32_t);
static int futex_get(uint32_t *, struct waiting_proc **, struct futex **,
uint32_t);
-static int futex_sleep(struct futex *, struct waiting_proc *, int);
+static int futex_sleep(struct futex *, struct waiting_proc *, struct timespec *);
static int futex_wake(struct futex *, int, uint32_t);
static int futex_requeue(struct futex *, int, struct futex *, int);
-static int futex_wait(struct futex *, struct waiting_proc *, int,
+static int futex_copyin_timeout(int, struct l_timespec *, int,
+ struct timespec *);
+static int futex_wait(struct futex *, struct waiting_proc *, struct timespec *,
uint32_t);
+static void futex_lock(struct futex *);
+static void futex_unlock(struct futex *);
static int futex_atomic_op(struct thread *, int, uint32_t *);
static int handle_futex_death(struct linux_emuldata *, uint32_t *,
unsigned int);
@@ -271,12 +280,39 @@ int futex_andl(int oparg, uint32_t *uaddr, int *oldval);
int futex_xorl(int oparg, uint32_t *uaddr, int *oldval);
+static int
+futex_copyin_timeout(int op, struct l_timespec *luts, int clockrt,
+ struct timespec *ts)
+{
+ struct l_timespec lts;
+ struct timespec kts;
+ int error;
+
+ error = copyin(luts, &lts, sizeof(lts));
+ if (error)
+ return (error);
+
+ error = linux_to_native_timespec(ts, &lts);
+ if (error)
+ return (error);
+ if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
+ return (EINVAL);
+
+ if (clockrt) {
+ nanotime(&kts);
+ timespecsub(ts, &kts);
+ } else if (op == LINUX_FUTEX_WAIT_BITSET) {
+ nanouptime(&kts);
+ timespecsub(ts, &kts);
+ }
+ return (error);
+}
+
static void
futex_put(struct futex *f, struct waiting_proc *wp)
{
LIN_SDT_PROBE2(futex, futex_put, entry, f, wp);
- FUTEX_ASSERT_LOCKED(f);
if (wp != NULL) {
if ((wp->wp_flags & FUTEX_WP_REMOVED) == 0)
TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
@@ -287,7 +323,8 @@ futex_put(struct futex *f, struct waiting_proc *wp)
if (--f->f_refcount == 0) {
LIST_REMOVE(f, f_list);
FUTEXES_UNLOCK;
- FUTEX_UNLOCK(f);
+ if (FUTEX_LOCKED(f))
+ futex_unlock(f);
LIN_SDT_PROBE3(futex, futex_put, destroy, f->f_uaddr,
f->f_refcount, f->f_key.shared);
@@ -306,7 +343,8 @@ futex_put(struct futex *f, struct waiting_proc *wp)
LINUX_CTR3(sys_futex, "futex_put uaddr %p ref %d shared %d",
f->f_uaddr, f->f_refcount, f->f_key.shared);
FUTEXES_UNLOCK;
- FUTEX_UNLOCK(f);
+ if (FUTEX_LOCKED(f))
+ futex_unlock(f);
LIN_SDT_PROBE0(futex, futex_put, return);
}
@@ -334,7 +372,8 @@ retry:
LIST_FOREACH(f, &futex_list, f_list) {
if (umtx_key_match(&f->f_key, &key)) {
if (tmpf != NULL) {
- FUTEX_UNLOCK(tmpf);
+ if (FUTEX_LOCKED(tmpf))
+ futex_unlock(tmpf);
FUTEX_DESTROY(tmpf);
free(tmpf, M_FUTEX);
}
@@ -355,7 +394,8 @@ retry:
FUTEXES_UNLOCK;
umtx_key_release(&key);
- FUTEX_LOCK(f);
+ if ((flags & FUTEX_DONTLOCK) == 0)
+ futex_lock(f);
*newf = f;
LIN_SDT_PROBE3(futex, futex_get0, shared, uaddr,
f->f_refcount, f->f_key.shared);
@@ -391,7 +431,8 @@ retry:
* Lock the new futex before an insert into the futex_list
* to prevent futex usage by other.
*/
- FUTEX_LOCK(tmpf);
+ if ((flags & FUTEX_DONTLOCK) == 0)
+ futex_lock(tmpf);
goto retry;
}
@@ -439,16 +480,56 @@ futex_get(uint32_t *uaddr, struct waiting_proc **wp, struct futex **f,
return (error);
}
+static inline void
+futex_lock(struct futex *f)
+{
+
+ LINUX_CTR3(sys_futex, "futex_lock uaddr %p ref %d shared %d",
+ f->f_uaddr, f->f_refcount, f->f_key.shared);
+ FUTEX_ASSERT_UNLOCKED(f);
+ FUTEX_LOCK(f);
+}
+
+static inline void
+futex_unlock(struct futex *f)
+{
+
+ LINUX_CTR3(sys_futex, "futex_unlock uaddr %p ref %d shared %d",
+ f->f_uaddr, f->f_refcount, f->f_key.shared);
+ FUTEX_ASSERT_LOCKED(f);
+ FUTEX_UNLOCK(f);
+}
+
static int
-futex_sleep(struct futex *f, struct waiting_proc *wp, int timeout)
+futex_sleep(struct futex *f, struct waiting_proc *wp, struct timespec *ts)
{
+ struct timespec uts;
+ sbintime_t sbt, prec, tmp;
+ time_t over;
int error;
FUTEX_ASSERT_LOCKED(f);
- LIN_SDT_PROBE3(futex, futex_sleep, entry, f, wp, timeout);
- LINUX_CTR4(sys_futex, "futex_sleep enter uaddr %p wp %p timo %d ref %d",
- f->f_uaddr, wp, timeout, f->f_refcount);
- error = sx_sleep(wp, &f->f_lck, PCATCH, "futex", timeout);
+ if (ts != NULL) {
+ uts = *ts;
+ if (uts.tv_sec > INT32_MAX / 2) {
+ over = uts.tv_sec - INT32_MAX / 2;
+ uts.tv_sec -= over;
+ }
+ tmp = tstosbt(uts);
+ if (TIMESEL(&sbt, tmp))
+ sbt += tc_tick_sbt;
+ sbt += tmp;
+ prec = tmp;
+ prec >>= tc_precexp;
+ } else {
+ sbt = 0;
+ prec = 0;
+ }
+ LIN_SDT_PROBE3(futex, futex_sleep, entry, f, wp, sbt);
+ LINUX_CTR4(sys_futex, "futex_sleep enter uaddr %p wp %p timo %ld ref %d",
+ f->f_uaddr, wp, sbt, f->f_refcount);
+
+ error = msleep_sbt(wp, &f->f_lck, PCATCH, "futex", sbt, prec, C_ABSOLUTE);
if (wp->wp_flags & FUTEX_WP_REQUEUED) {
KASSERT(f != wp->wp_futex, ("futex != wp_futex"));
@@ -464,7 +545,7 @@ futex_sleep(struct futex *f, struct waiting_proc *wp, int timeout)
wp->wp_futex->f_refcount);
futex_put(f, NULL);
f = wp->wp_futex;
- FUTEX_LOCK(f);
+ futex_lock(f);
} else {
if (error) {
LIN_SDT_PROBE3(futex, futex_sleep, sleep_error, error,
@@ -566,12 +647,12 @@ futex_requeue(struct futex *f, int n, struct futex *f2, int n2)
}
static int
-futex_wait(struct futex *f, struct waiting_proc *wp, int timeout_hz,
+futex_wait(struct futex *f, struct waiting_proc *wp, struct timespec *ts,
uint32_t bitset)
{
int error;
- LIN_SDT_PROBE4(futex, futex_wait, entry, f, wp, timeout_hz, bitset);
+ LIN_SDT_PROBE4(futex, futex_wait, entry, f, wp, ts, bitset);
if (bitset == 0) {
LIN_SDT_PROBE1(futex, futex_wait, return, EINVAL);
@@ -579,7 +660,7 @@ futex_wait(struct futex *f, struct waiting_proc *wp, int timeout_hz,
}
f->f_bitset = bitset;
- error = futex_sleep(f, wp, timeout_hz);
+ error = futex_sleep(f, wp, ts);
if (error)
LIN_SDT_PROBE1(futex, futex_wait, sleep_error, error);
if (error == EWOULDBLOCK)
@@ -605,7 +686,7 @@ futex_atomic_op(struct thread *td, int encoded_op, uint32_t *uaddr)
LIN_SDT_PROBE4(futex, futex_atomic_op, decoded_op, op, cmp, oparg,
cmparg);
-
+
/* XXX: Linux verifies access here and returns EFAULT */
LIN_SDT_PROBE0(futex, futex_atomic_op, missing_access_check);
@@ -671,11 +752,8 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
struct linux_pemuldata *pem;
struct waiting_proc *wp;
struct futex *f, *f2;
- struct l_timespec ltimeout;
- struct timespec timeout;
- struct timeval utv, ctv;
- int timeout_hz;
- int error;
+ struct timespec uts, *ts;
+ int error, save;
uint32_t flags, val;
LIN_SDT_PROBE2(futex, linux_sys_futex, entry, td, args);
@@ -717,37 +795,19 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
args->uaddr, args->val, args->val3);
if (args->timeout != NULL) {
- error = copyin(args->timeout, &ltimeout, sizeof(ltimeout));
+ error = futex_copyin_timeout(args->op, args->timeout,
+ clockrt, &uts);
if (error) {
LIN_SDT_PROBE1(futex, linux_sys_futex, copyin_error,
error);
LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
return (error);
}
- error = linux_to_native_timespec(&timeout, &ltimeout);
- if (error)
- return (error);
- TIMESPEC_TO_TIMEVAL(&utv, &timeout);
- error = itimerfix(&utv);
- if (error) {
- LIN_SDT_PROBE1(futex, linux_sys_futex, itimerfix_error,
- error);
- LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
- return (error);
- }
- if (clockrt) {
- microtime(&ctv);
- timevalsub(&utv, &ctv);
- } else if (args->op == LINUX_FUTEX_WAIT_BITSET) {
- microuptime(&ctv);
- timevalsub(&utv, &ctv);
- }
- if (utv.tv_sec < 0)
- timevalclear(&utv);
- timeout_hz = tvtohz(&utv);
+ ts = &uts;
} else
- timeout_hz = 0;
+ ts = NULL;
+retry0:
error = futex_get(args->uaddr, &wp, &f,
flags | FUTEX_CREATE_WP);
if (error) {
@@ -755,14 +815,16 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
return (error);
}
- error = copyin(args->uaddr, &val, sizeof(val));
+ error = copyin_nofault(args->uaddr, &val, sizeof(val));
if (error) {
+ futex_put(f, wp);
+ error = copyin(args->uaddr, &val, sizeof(val));
+ if (error == 0)
+ goto retry0;
LIN_SDT_PROBE1(futex, linux_sys_futex, copyin_error,
error);
LINUX_CTR1(sys_futex, "WAIT copyin failed %d",
error);
- futex_put(f, wp);
-
LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
return (error);
}
@@ -780,7 +842,7 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
return (EWOULDBLOCK);
}
- error = futex_wait(f, wp, timeout_hz, args->val3);
+ error = futex_wait(f, wp, ts, args->val3);
break;
case LINUX_FUTEX_WAKE:
@@ -830,7 +892,8 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
return (EINVAL);
}
- error = futex_get(args->uaddr, NULL, &f, flags);
+retry1:
+ error = futex_get(args->uaddr, NULL, &f, flags | FUTEX_DONTLOCK);
if (error) {
LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
return (error);
@@ -844,22 +907,26 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
* returned by FUTEX_CMP_REQUEUE.
*/
error = futex_get(args->uaddr2, NULL, &f2,
- flags | FUTEX_DONTEXISTS);
+ flags | FUTEX_DONTEXISTS | FUTEX_DONTLOCK);
if (error) {
futex_put(f, NULL);
LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
return (error);
}
- error = copyin(args->uaddr, &val, sizeof(val));
+ futex_lock(f);
+ futex_lock(f2);
+ error = copyin_nofault(args->uaddr, &val, sizeof(val));
if (error) {
+ futex_put(f2, NULL);
+ futex_put(f, NULL);
+ error = copyin(args->uaddr, &val, sizeof(val));
+ if (error == 0)
+ goto retry1;
LIN_SDT_PROBE1(futex, linux_sys_futex, copyin_error,
error);
LINUX_CTR1(sys_futex, "CMP_REQUEUE copyin failed %d",
error);
- futex_put(f2, NULL);
- futex_put(f, NULL);
-
LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
return (error);
}
@@ -889,50 +956,45 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
args->uaddr, args->val, args->uaddr2, args->val3,
args->timeout);
- error = futex_get(args->uaddr, NULL, &f, flags);
+retry2:
+ error = futex_get(args->uaddr, NULL, &f, flags | FUTEX_DONTLOCK);
if (error) {
LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
return (error);
}
if (args->uaddr != args->uaddr2)
- error = futex_get(args->uaddr2, NULL, &f2, flags);
+ error = futex_get(args->uaddr2, NULL, &f2,
+ flags | FUTEX_DONTLOCK);
if (error) {
futex_put(f, NULL);
LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
return (error);
}
+ futex_lock(f);
+ futex_lock(f2);
/*
* This function returns positive number as results and
* negative as errors
*/
+ save = vm_fault_disable_pagefaults();
op_ret = futex_atomic_op(td, args->val3, args->uaddr2);
+ vm_fault_enable_pagefaults(save);
LINUX_CTR2(sys_futex, "WAKE_OP atomic_op uaddr %p ret 0x%x",
args->uaddr, op_ret);
if (op_ret < 0) {
- /* XXX: We don't handle the EFAULT yet. */
- if (op_ret != -EFAULT) {
- if (f2 != NULL)
- futex_put(f2, NULL);
- futex_put(f, NULL);
-
- LIN_SDT_PROBE1(futex, linux_sys_futex, return,
- -op_ret);
- return (-op_ret);
- } else {
- LIN_SDT_PROBE0(futex, linux_sys_futex,
- unhandled_efault);
- }
if (f2 != NULL)
futex_put(f2, NULL);
futex_put(f, NULL);
-
- LIN_SDT_PROBE1(futex, linux_sys_futex, return, EFAULT);
- return (EFAULT);
+ error = copyin(args->uaddr2, &val, sizeof(val));
+ if (error == 0)
+ goto retry2;
+ LIN_SDT_PROBE1(futex, linux_sys_futex, return, error);
+ return (error);
}
ret = futex_wake(f, args->val, args->val3);
@@ -997,7 +1059,6 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
return (ENOSYS);
case LINUX_FUTEX_REQUEUE:
-
/*
* Glibc does not use this operation since version 2.3.3,
* as it is racy and replaced by FUTEX_CMP_REQUEUE operation.
diff --git a/sys/compat/linux/linux_ioctl.c b/sys/compat/linux/linux_ioctl.c
index 8858e2f..7185a12 100644
--- a/sys/compat/linux/linux_ioctl.c
+++ b/sys/compat/linux/linux_ioctl.c
@@ -915,6 +915,8 @@ linux_ioctl_termio(struct thread *td, struct linux_ioctl_args *args)
case LINUX_TIOCGSERIAL: {
struct linux_serial_struct lss;
+
+ bzero(&lss, sizeof(lss));
lss.type = LINUX_PORT_16550A;
lss.flags = 0;
lss.close_delay = 0;
@@ -976,7 +978,7 @@ linux_ioctl_termio(struct thread *td, struct linux_ioctl_args *args)
error = fo_ioctl(fp, TIOCGETD, (caddr_t)&bsd_line,
td->td_ucred, td);
if (error)
- return (error);
+ break;
switch (bsd_line) {
case TTYDISC:
linux_line = LINUX_N_TTY;
diff --git a/sys/compat/linux/linux_misc.c b/sys/compat/linux/linux_misc.c
index 86f9ac3..db7488b 100644
--- a/sys/compat/linux/linux_misc.c
+++ b/sys/compat/linux/linux_misc.c
@@ -150,6 +150,7 @@ linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args)
int i, j;
struct timespec ts;
+ bzero(&sysinfo, sizeof(sysinfo));
getnanouptime(&ts);
if (ts.tv_nsec != 0)
ts.tv_sec++;
diff --git a/sys/compat/linux/linux_socket.c b/sys/compat/linux/linux_socket.c
index b33b260..bc543c7 100644
--- a/sys/compat/linux/linux_socket.c
+++ b/sys/compat/linux/linux_socket.c
@@ -462,12 +462,16 @@ bsd_to_linux_sockaddr(struct sockaddr *arg)
{
struct sockaddr sa;
size_t sa_len = sizeof(struct sockaddr);
- int error;
+ int error, bdom;
if ((error = copyin(arg, &sa, sa_len)))
return (error);
- *(u_short *)&sa = sa.sa_family;
+ bdom = bsd_to_linux_domain(sa.sa_family);
+ if (bdom == -1)
+ return (EAFNOSUPPORT);
+
+ *(u_short *)&sa = bdom;
return (copyout(&sa, arg, sa_len));
}
@@ -476,12 +480,16 @@ linux_to_bsd_sockaddr(struct sockaddr *arg, int len)
{
struct sockaddr sa;
size_t sa_len = sizeof(struct sockaddr);
- int error;
+ int error, bdom;
if ((error = copyin(arg, &sa, sa_len)))
return (error);
- sa.sa_family = *(sa_family_t *)&sa;
+ bdom = linux_to_bsd_domain(*(sa_family_t *)&sa);
+ if (bdom == -1)
+ return (EAFNOSUPPORT);
+
+ sa.sa_family = bdom;
sa.sa_len = len;
return (copyout(&sa, arg, sa_len));
}
@@ -1594,10 +1602,10 @@ linux_getsockopt(struct thread *td, struct linux_getsockopt_args *args)
} */ bsd_args;
l_timeval linux_tv;
struct timeval tv;
- socklen_t tv_len, xulen;
+ socklen_t tv_len, xulen, len;
struct xucred xu;
struct l_ucred lxu;
- int error, name;
+ int error, name, newval;
bsd_args.s = args->s;
bsd_args.level = linux_to_bsd_sockopt_level(args->level);
@@ -1636,6 +1644,15 @@ linux_getsockopt(struct thread *td, struct linux_getsockopt_args *args)
return (copyout(&lxu, PTRIN(args->optval), sizeof(lxu)));
/* NOTREACHED */
break;
+ case SO_ERROR:
+ len = sizeof(newval);
+ error = kern_getsockopt(td, args->s, bsd_args.level,
+ name, &newval, UIO_SYSSPACE, &len);
+ if (error)
+ return (error);
+ newval = -SV_ABI_ERRNO(td->td_proc, newval);
+ return (copyout(&newval, PTRIN(args->optval), len));
+ /* NOTREACHED */
default:
break;
}
diff --git a/sys/compat/ndis/subr_ntoskrnl.c b/sys/compat/ndis/subr_ntoskrnl.c
index 482392d..1d198c0 100644
--- a/sys/compat/ndis/subr_ntoskrnl.c
+++ b/sys/compat/ndis/subr_ntoskrnl.c
@@ -3188,17 +3188,14 @@ atol(str)
static int
rand(void)
{
- struct timeval tv;
- microtime(&tv);
- srandom(tv.tv_usec);
- return ((int)random());
+ return (random());
}
static void
-srand(seed)
- unsigned int seed;
+srand(unsigned int seed)
{
+
srandom(seed);
}
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 2c22b3f..f96b4f3 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -208,6 +208,8 @@ dev/if_ndis/if_ndis_pccard.c optional ndis pccard
dev/if_ndis/if_ndis_pci.c optional ndis cardbus | ndis pci
dev/if_ndis/if_ndis_usb.c optional ndis usb
dev/io/iodev.c optional io
+dev/ioat/ioat.c optional ioat pci
+dev/ioat/ioat_test.c optional ioat pci
dev/ipmi/ipmi.c optional ipmi
dev/ipmi/ipmi_acpi.c optional ipmi acpi
dev/ipmi/ipmi_isa.c optional ipmi isa
@@ -317,6 +319,17 @@ dev/qlxgbe/ql_isr.c optional qlxgbe pci
dev/qlxgbe/ql_misc.c optional qlxgbe pci
dev/qlxgbe/ql_os.c optional qlxgbe pci
dev/qlxgbe/ql_reset.c optional qlxgbe pci
+dev/sfxge/common/ef10_ev.c optional sfxge pci
+dev/sfxge/common/ef10_filter.c optional sfxge pci
+dev/sfxge/common/ef10_intr.c optional sfxge pci
+dev/sfxge/common/ef10_mac.c optional sfxge pci
+dev/sfxge/common/ef10_mcdi.c optional sfxge pci
+dev/sfxge/common/ef10_nic.c optional sfxge pci
+dev/sfxge/common/ef10_nvram.c optional sfxge pci
+dev/sfxge/common/ef10_phy.c optional sfxge pci
+dev/sfxge/common/ef10_rx.c optional sfxge pci
+dev/sfxge/common/ef10_tx.c optional sfxge pci
+dev/sfxge/common/ef10_vpd.c optional sfxge pci
dev/sfxge/common/efx_bootcfg.c optional sfxge pci
dev/sfxge/common/efx_crc32.c optional sfxge pci
dev/sfxge/common/efx_ev.c optional sfxge pci
@@ -336,18 +349,9 @@ dev/sfxge/common/efx_sram.c optional sfxge pci
dev/sfxge/common/efx_tx.c optional sfxge pci
dev/sfxge/common/efx_vpd.c optional sfxge pci
dev/sfxge/common/efx_wol.c optional sfxge pci
-dev/sfxge/common/hunt_ev.c optional sfxge pci
-dev/sfxge/common/hunt_filter.c optional sfxge pci
-dev/sfxge/common/hunt_intr.c optional sfxge pci
-dev/sfxge/common/hunt_mac.c optional sfxge pci
-dev/sfxge/common/hunt_mcdi.c optional sfxge pci
dev/sfxge/common/hunt_nic.c optional sfxge pci
-dev/sfxge/common/hunt_nvram.c optional sfxge pci
dev/sfxge/common/hunt_phy.c optional sfxge pci
-dev/sfxge/common/hunt_rx.c optional sfxge pci
-dev/sfxge/common/hunt_sram.c optional sfxge pci
-dev/sfxge/common/hunt_tx.c optional sfxge pci
-dev/sfxge/common/hunt_vpd.c optional sfxge pci
+dev/sfxge/common/mcdi_mon.c optional sfxge pci
dev/sfxge/common/medford_nic.c optional sfxge pci
dev/sfxge/common/siena_mac.c optional sfxge pci
dev/sfxge/common/siena_mcdi.c optional sfxge pci
diff --git a/sys/conf/kmod.mk b/sys/conf/kmod.mk
index 6b81ec0..8b363d4 100644
--- a/sys/conf/kmod.mk
+++ b/sys/conf/kmod.mk
@@ -164,7 +164,7 @@ SRCS+= ${KMOD:S/$/.c/}
CLEANFILES+= ${KMOD:S/$/.c/}
.for _firmw in ${FIRMWS}
-${_firmw:C/\:.*$/.fwo/}: ${_firmw:C/\:.*$//}
+${_firmw:C/\:.*$/.fwo/:T}: ${_firmw:C/\:.*$//}
@${ECHO} ${_firmw:C/\:.*$//} ${.ALLSRC:M*${_firmw:C/\:.*$//}}
@if [ -e ${_firmw:C/\:.*$//} ]; then \
${LD} -b binary --no-warn-mismatch ${LDFLAGS} \
@@ -176,7 +176,7 @@ ${_firmw:C/\:.*$/.fwo/}: ${_firmw:C/\:.*$//}
rm ${_firmw:C/\:.*$//}; \
fi
-OBJS+= ${_firmw:C/\:.*$/.fwo/}
+OBJS+= ${_firmw:C/\:.*$/.fwo/:T}
.endfor
.endif
diff --git a/sys/dev/fb/vesa.c b/sys/dev/fb/vesa.c
index 48067b6..1db24bd 100644
--- a/sys/dev/fb/vesa.c
+++ b/sys/dev/fb/vesa.c
@@ -1026,7 +1026,8 @@ vesa_bios_init(void)
++modes;
}
- vesa_vmode[modes].vi_mode = EOT;
+ if (vesa_vmode != NULL)
+ vesa_vmode[modes].vi_mode = EOT;
if (bootverbose)
printf("VESA: %d mode(s) found\n", modes);
diff --git a/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c b/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c
index 0159a9d..a780f9e 100644
--- a/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c
+++ b/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c
@@ -81,12 +81,6 @@ __FBSDID("$FreeBSD$");
#define BLKVSC_MAX_IO_REQUESTS STORVSC_MAX_IO_REQUESTS
#define STORVSC_MAX_TARGETS (2)
-#define STORVSC_WIN7_MAJOR 4
-#define STORVSC_WIN7_MINOR 2
-
-#define STORVSC_WIN8_MAJOR 5
-#define STORVSC_WIN8_MINOR 1
-
#define VSTOR_PKT_SIZE (sizeof(struct vstor_packet) - vmscsi_size_delta)
#define HV_ALIGN(x, a) roundup2(x, a)
@@ -208,7 +202,7 @@ static struct storvsc_driver_props g_drv_props_table[] = {
* Sense buffer size changed in win8; have a run-time
* variable to track the size we should use.
*/
-static int sense_buffer_size;
+static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
/*
* The size of the vmscsi_request has changed in win8. The
@@ -218,9 +212,46 @@ static int sense_buffer_size;
* Track the correct size we need to apply.
*/
static int vmscsi_size_delta;
+/*
+ * The storage protocol version is determined during the
+ * initial exchange with the host. It will indicate which
+ * storage functionality is available in the host.
+*/
+static int vmstor_proto_version;
+
+struct vmstor_proto {
+ int proto_version;
+ int sense_buffer_size;
+ int vmscsi_size_delta;
+};
-static int storvsc_current_major;
-static int storvsc_current_minor;
+static const struct vmstor_proto vmstor_proto_list[] = {
+ {
+ VMSTOR_PROTOCOL_VERSION_WIN10,
+ POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
+ 0
+ },
+ {
+ VMSTOR_PROTOCOL_VERSION_WIN8_1,
+ POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
+ 0
+ },
+ {
+ VMSTOR_PROTOCOL_VERSION_WIN8,
+ POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
+ 0
+ },
+ {
+ VMSTOR_PROTOCOL_VERSION_WIN7,
+ PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
+ sizeof(struct vmscsi_win8_extension),
+ },
+ {
+ VMSTOR_PROTOCOL_VERSION_WIN6,
+ PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
+ sizeof(struct vmscsi_win8_extension),
+ }
+};
/* static functions */
static int storvsc_probe(device_t dev);
@@ -435,7 +466,7 @@ storvsc_send_multichannel_request(struct hv_device *dev, int max_chans)
static int
hv_storvsc_channel_init(struct hv_device *dev)
{
- int ret = 0;
+ int ret = 0, i;
struct hv_storvsc_request *request;
struct vstor_packet *vstor_packet;
struct storvsc_softc *sc;
@@ -484,19 +515,20 @@ hv_storvsc_channel_init(struct hv_device *dev)
goto cleanup;
}
- /* reuse the packet for version range supported */
+ for (i = 0; i < nitems(vmstor_proto_list); i++) {
+ /* reuse the packet for version range supported */
- memset(vstor_packet, 0, sizeof(struct vstor_packet));
- vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
- vstor_packet->u.version.major_minor =
- VMSTOR_PROTOCOL_VERSION(storvsc_current_major, storvsc_current_minor);
+ vstor_packet->u.version.major_minor =
+ vmstor_proto_list[i].proto_version;
- /* revision is only significant for Windows guests */
- vstor_packet->u.version.revision = 0;
+ /* revision is only significant for Windows guests */
+ vstor_packet->u.version.revision = 0;
- ret = hv_vmbus_channel_send_packet(
+ ret = hv_vmbus_channel_send_packet(
dev->channel,
vstor_packet,
VSTOR_PKT_SIZE,
@@ -504,20 +536,34 @@ hv_storvsc_channel_init(struct hv_device *dev)
HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
- goto cleanup;
+ if (ret != 0)
+ goto cleanup;
- /* wait 5 seconds */
- ret = sema_timedwait(&request->synch_sema, 5 * hz);
+ /* wait 5 seconds */
+ ret = sema_timedwait(&request->synch_sema, 5 * hz);
- if (ret)
- goto cleanup;
+ if (ret)
+ goto cleanup;
- /* TODO: Check returned version */
- if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
- vstor_packet->status != 0)
- goto cleanup;
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO) {
+ ret = EINVAL;
+ goto cleanup;
+ }
+ if (vstor_packet->status == 0) {
+ vmstor_proto_version =
+ vmstor_proto_list[i].proto_version;
+ sense_buffer_size =
+ vmstor_proto_list[i].sense_buffer_size;
+ vmscsi_size_delta =
+ vmstor_proto_list[i].vmscsi_size_delta;
+ break;
+ }
+ }
+ if (vstor_packet->status != 0) {
+ ret = EINVAL;
+ goto cleanup;
+ }
/**
* Query channel properties
*/
@@ -916,19 +962,6 @@ storvsc_probe(device_t dev)
int ata_disk_enable = 0;
int ret = ENXIO;
- if (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008 ||
- hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7) {
- sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
- vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
- storvsc_current_major = STORVSC_WIN7_MAJOR;
- storvsc_current_minor = STORVSC_WIN7_MINOR;
- } else {
- sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
- vmscsi_size_delta = 0;
- storvsc_current_major = STORVSC_WIN8_MAJOR;
- storvsc_current_minor = STORVSC_WIN8_MINOR;
- }
-
switch (storvsc_get_storage_type(dev)) {
case DRIVER_BLKVSC:
if(bootverbose)
@@ -2070,6 +2103,13 @@ storvsc_io_done(struct hv_storvsc_request *reqp)
((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes);
if (cmd->opcode == INQUIRY &&
+ /*
+ * XXX: Temporary work around disk hot plugin on win2k12r2,
+ * only filtering the invalid disk on win10 or 2016 server.
+ * So, the hot plugin on win10 and 2016 server needs
+ * to be fixed.
+ */
+ vmstor_proto_version == VMSTOR_PROTOCOL_VERSION_WIN10 &&
is_inquiry_valid(
(const struct scsi_inquiry_data *)csio->data_ptr) == 0) {
ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
diff --git a/sys/dev/hyperv/storvsc/hv_vstorage.h b/sys/dev/hyperv/storvsc/hv_vstorage.h
index 026189c..f2b9480 100644
--- a/sys/dev/hyperv/storvsc/hv_vstorage.h
+++ b/sys/dev/hyperv/storvsc/hv_vstorage.h
@@ -41,6 +41,11 @@
#define VMSTOR_PROTOCOL_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
(((MINOR_) & 0xff) ))
+#define VMSTOR_PROTOCOL_VERSION_WIN6 VMSTOR_PROTOCOL_VERSION(2, 0)
+#define VMSTOR_PROTOCOL_VERSION_WIN7 VMSTOR_PROTOCOL_VERSION(4, 2)
+#define VMSTOR_PROTOCOL_VERSION_WIN8 VMSTOR_PROTOCOL_VERSION(5, 1)
+#define VMSTOR_PROTOCOL_VERSION_WIN8_1 VMSTOR_PROTOCOL_VERSION(6, 0)
+#define VMSTOR_PROTOCOL_VERSION_WIN10 VMSTOR_PROTOCOL_VERSION(6, 2)
/*
* Invalid version.
*/
diff --git a/sys/dev/iicbus/iic.c b/sys/dev/iicbus/iic.c
index 84e1314..9ac7f74 100644
--- a/sys/dev/iicbus/iic.c
+++ b/sys/dev/iicbus/iic.c
@@ -299,9 +299,16 @@ iicrdwr(struct iic_cdevpriv *priv, struct iic_rdwr_data *d, int flags)
parent = device_get_parent(iicdev);
error = 0;
+ if (d->nmsgs > IIC_RDRW_MAX_MSGS)
+ return (EINVAL);
+
buf = malloc(sizeof(*d->msgs) * d->nmsgs, M_IIC, M_WAITOK);
error = copyin(d->msgs, buf, sizeof(*d->msgs) * d->nmsgs);
+ if (error != 0) {
+ free(buf, M_IIC);
+ return (error);
+ }
/* Alloc kernel buffers for userland data, copyin write data */
usrbufs = malloc(sizeof(void *) * d->nmsgs, M_IIC, M_WAITOK | M_ZERO);
@@ -317,6 +324,8 @@ iicrdwr(struct iic_cdevpriv *priv, struct iic_rdwr_data *d, int flags)
m->buf = NULL;
if (error != 0)
continue;
+
+ /* m->len is uint16_t, so allocation size is capped at 64K. */
m->buf = malloc(m->len, M_IIC, M_WAITOK);
if (!(m->flags & IIC_M_RD))
error = copyin(usrbufs[i], m->buf, m->len);
diff --git a/sys/dev/iicbus/iic.h b/sys/dev/iicbus/iic.h
index ba98d28..8ae1912 100644
--- a/sys/dev/iicbus/iic.h
+++ b/sys/dev/iicbus/iic.h
@@ -56,6 +56,8 @@ struct iic_rdwr_data {
uint32_t nmsgs;
};
+#define IIC_RDRW_MAX_MSGS 42
+
#define I2CSTART _IOW('i', 1, struct iiccmd) /* start condition */
#define I2CSTOP _IO('i', 2) /* stop condition */
#define I2CRSTCARD _IOW('i', 3, struct iiccmd) /* reset the card */
diff --git a/sys/dev/ioat/ioat.c b/sys/dev/ioat/ioat.c
new file mode 100644
index 0000000..aff048a
--- /dev/null
+++ b/sys/dev/ioat/ioat.c
@@ -0,0 +1,2091 @@
+/*-
+ * Copyright (C) 2012 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/ioccom.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/sbuf.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <sys/time.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+
+#include "ioat.h"
+#include "ioat_hw.h"
+#include "ioat_internal.h"
+
+#ifndef BUS_SPACE_MAXADDR_40BIT
+#define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFULL
+#endif
+#define IOAT_INTR_TIMO (hz / 10)
+#define IOAT_REFLK (&ioat->submit_lock)
+
+static int ioat_probe(device_t device);
+static int ioat_attach(device_t device);
+static int ioat_detach(device_t device);
+static int ioat_setup_intr(struct ioat_softc *ioat);
+static int ioat_teardown_intr(struct ioat_softc *ioat);
+static int ioat3_attach(device_t device);
+static int ioat_start_channel(struct ioat_softc *ioat);
+static int ioat_map_pci_bar(struct ioat_softc *ioat);
+static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg,
+ int error);
+static void ioat_interrupt_handler(void *arg);
+static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat);
+static int chanerr_to_errno(uint32_t);
+static void ioat_process_events(struct ioat_softc *ioat);
+static inline uint32_t ioat_get_active(struct ioat_softc *ioat);
+static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat);
+static void ioat_free_ring(struct ioat_softc *, uint32_t size,
+ struct ioat_descriptor **);
+static void ioat_free_ring_entry(struct ioat_softc *ioat,
+ struct ioat_descriptor *desc);
+static struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *,
+ int mflags);
+static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags);
+static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *ioat,
+ uint32_t index);
+static struct ioat_descriptor **ioat_prealloc_ring(struct ioat_softc *,
+ uint32_t size, boolean_t need_dscr, int mflags);
+static int ring_grow(struct ioat_softc *, uint32_t oldorder,
+ struct ioat_descriptor **);
+static int ring_shrink(struct ioat_softc *, uint32_t oldorder,
+ struct ioat_descriptor **);
+static void ioat_halted_debug(struct ioat_softc *, uint32_t);
+static void ioat_timer_callback(void *arg);
+static void dump_descriptor(void *hw_desc);
+static void ioat_submit_single(struct ioat_softc *ioat);
+static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg,
+ int error);
+static int ioat_reset_hw(struct ioat_softc *ioat);
+static void ioat_reset_hw_task(void *, int);
+static void ioat_setup_sysctl(device_t device);
+static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS);
+static inline struct ioat_softc *ioat_get(struct ioat_softc *,
+ enum ioat_ref_kind);
+static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind);
+static inline void _ioat_putn(struct ioat_softc *, uint32_t,
+ enum ioat_ref_kind, boolean_t);
+static inline void ioat_putn(struct ioat_softc *, uint32_t,
+ enum ioat_ref_kind);
+static inline void ioat_putn_locked(struct ioat_softc *, uint32_t,
+ enum ioat_ref_kind);
+static void ioat_drain_locked(struct ioat_softc *);
+
+#define ioat_log_message(v, ...) do { \
+ if ((v) <= g_ioat_debug_level) { \
+ device_printf(ioat->device, __VA_ARGS__); \
+ } \
+} while (0)
+
+MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations");
+SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node");
+
+static int g_force_legacy_interrupts;
+SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN,
+ &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled");
+
+int g_ioat_debug_level = 0;
+SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level,
+ 0, "Set log level (0-3) for ioat(4). Higher is more verbose.");
+
+/*
+ * OS <-> Driver interface structures
+ */
+static device_method_t ioat_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ioat_probe),
+ DEVMETHOD(device_attach, ioat_attach),
+ DEVMETHOD(device_detach, ioat_detach),
+ DEVMETHOD_END
+};
+
+static driver_t ioat_pci_driver = {
+ "ioat",
+ ioat_pci_methods,
+ sizeof(struct ioat_softc),
+};
+
+static devclass_t ioat_devclass;
+DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0);
+MODULE_VERSION(ioat, 1);
+
+/*
+ * Private data structures
+ */
+static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS];
+static int ioat_channel_index = 0;
+SYSCTL_INT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0,
+ "Number of IOAT channels attached");
+
+static struct _pcsid
+{
+ u_int32_t type;
+ const char *desc;
+} pci_ids[] = {
+ { 0x34308086, "TBG IOAT Ch0" },
+ { 0x34318086, "TBG IOAT Ch1" },
+ { 0x34328086, "TBG IOAT Ch2" },
+ { 0x34338086, "TBG IOAT Ch3" },
+ { 0x34298086, "TBG IOAT Ch4" },
+ { 0x342a8086, "TBG IOAT Ch5" },
+ { 0x342b8086, "TBG IOAT Ch6" },
+ { 0x342c8086, "TBG IOAT Ch7" },
+
+ { 0x37108086, "JSF IOAT Ch0" },
+ { 0x37118086, "JSF IOAT Ch1" },
+ { 0x37128086, "JSF IOAT Ch2" },
+ { 0x37138086, "JSF IOAT Ch3" },
+ { 0x37148086, "JSF IOAT Ch4" },
+ { 0x37158086, "JSF IOAT Ch5" },
+ { 0x37168086, "JSF IOAT Ch6" },
+ { 0x37178086, "JSF IOAT Ch7" },
+ { 0x37188086, "JSF IOAT Ch0 (RAID)" },
+ { 0x37198086, "JSF IOAT Ch1 (RAID)" },
+
+ { 0x3c208086, "SNB IOAT Ch0" },
+ { 0x3c218086, "SNB IOAT Ch1" },
+ { 0x3c228086, "SNB IOAT Ch2" },
+ { 0x3c238086, "SNB IOAT Ch3" },
+ { 0x3c248086, "SNB IOAT Ch4" },
+ { 0x3c258086, "SNB IOAT Ch5" },
+ { 0x3c268086, "SNB IOAT Ch6" },
+ { 0x3c278086, "SNB IOAT Ch7" },
+ { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" },
+ { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" },
+
+ { 0x0e208086, "IVB IOAT Ch0" },
+ { 0x0e218086, "IVB IOAT Ch1" },
+ { 0x0e228086, "IVB IOAT Ch2" },
+ { 0x0e238086, "IVB IOAT Ch3" },
+ { 0x0e248086, "IVB IOAT Ch4" },
+ { 0x0e258086, "IVB IOAT Ch5" },
+ { 0x0e268086, "IVB IOAT Ch6" },
+ { 0x0e278086, "IVB IOAT Ch7" },
+ { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" },
+ { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" },
+
+ { 0x2f208086, "HSW IOAT Ch0" },
+ { 0x2f218086, "HSW IOAT Ch1" },
+ { 0x2f228086, "HSW IOAT Ch2" },
+ { 0x2f238086, "HSW IOAT Ch3" },
+ { 0x2f248086, "HSW IOAT Ch4" },
+ { 0x2f258086, "HSW IOAT Ch5" },
+ { 0x2f268086, "HSW IOAT Ch6" },
+ { 0x2f278086, "HSW IOAT Ch7" },
+ { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" },
+ { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" },
+
+ { 0x0c508086, "BWD IOAT Ch0" },
+ { 0x0c518086, "BWD IOAT Ch1" },
+ { 0x0c528086, "BWD IOAT Ch2" },
+ { 0x0c538086, "BWD IOAT Ch3" },
+
+ { 0x6f508086, "BDXDE IOAT Ch0" },
+ { 0x6f518086, "BDXDE IOAT Ch1" },
+ { 0x6f528086, "BDXDE IOAT Ch2" },
+ { 0x6f538086, "BDXDE IOAT Ch3" },
+
+ { 0x6f208086, "BDX IOAT Ch0" },
+ { 0x6f218086, "BDX IOAT Ch1" },
+ { 0x6f228086, "BDX IOAT Ch2" },
+ { 0x6f238086, "BDX IOAT Ch3" },
+ { 0x6f248086, "BDX IOAT Ch4" },
+ { 0x6f258086, "BDX IOAT Ch5" },
+ { 0x6f268086, "BDX IOAT Ch6" },
+ { 0x6f278086, "BDX IOAT Ch7" },
+ { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" },
+ { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" },
+
+ { 0x00000000, NULL }
+};
+
+/*
+ * OS <-> Driver linkage functions
+ */
+static int
+ioat_probe(device_t device)
+{
+ struct _pcsid *ep;
+ u_int32_t type;
+
+ type = pci_get_devid(device);
+ for (ep = pci_ids; ep->type; ep++) {
+ if (ep->type == type) {
+ device_set_desc(device, ep->desc);
+ return (0);
+ }
+ }
+ return (ENXIO);
+}
+
+static int
+ioat_attach(device_t device)
+{
+ struct ioat_softc *ioat;
+ int error;
+
+ ioat = DEVICE2SOFTC(device);
+ ioat->device = device;
+
+ error = ioat_map_pci_bar(ioat);
+ if (error != 0)
+ goto err;
+
+ ioat->version = ioat_read_cbver(ioat);
+ if (ioat->version < IOAT_VER_3_0) {
+ error = ENODEV;
+ goto err;
+ }
+
+ error = ioat3_attach(device);
+ if (error != 0)
+ goto err;
+
+ error = pci_enable_busmaster(device);
+ if (error != 0)
+ goto err;
+
+ error = ioat_setup_intr(ioat);
+ if (error != 0)
+ goto err;
+
+ error = ioat_reset_hw(ioat);
+ if (error != 0)
+ goto err;
+
+ ioat_process_events(ioat);
+ ioat_setup_sysctl(device);
+
+ ioat->chan_idx = ioat_channel_index;
+ ioat_channel[ioat_channel_index++] = ioat;
+ ioat_test_attach();
+
+err:
+ if (error != 0)
+ ioat_detach(device);
+ return (error);
+}
+
+static int
+ioat_detach(device_t device)
+{
+ struct ioat_softc *ioat;
+
+ ioat = DEVICE2SOFTC(device);
+
+ ioat_test_detach();
+ taskqueue_drain(taskqueue_thread, &ioat->reset_task);
+
+ mtx_lock(IOAT_REFLK);
+ ioat->quiescing = TRUE;
+ ioat->destroying = TRUE;
+ wakeup(&ioat->quiescing);
+
+ ioat_channel[ioat->chan_idx] = NULL;
+
+ ioat_drain_locked(ioat);
+ mtx_unlock(IOAT_REFLK);
+
+ ioat_teardown_intr(ioat);
+ callout_drain(&ioat->timer);
+
+ pci_disable_busmaster(device);
+
+ if (ioat->pci_resource != NULL)
+ bus_release_resource(device, SYS_RES_MEMORY,
+ ioat->pci_resource_id, ioat->pci_resource);
+
+ if (ioat->ring != NULL)
+ ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring);
+
+ if (ioat->comp_update != NULL) {
+ bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map);
+ bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update,
+ ioat->comp_update_map);
+ bus_dma_tag_destroy(ioat->comp_update_tag);
+ }
+
+ bus_dma_tag_destroy(ioat->hw_desc_tag);
+
+ return (0);
+}
+
+static int
+ioat_teardown_intr(struct ioat_softc *ioat)
+{
+
+ if (ioat->tag != NULL)
+ bus_teardown_intr(ioat->device, ioat->res, ioat->tag);
+
+ if (ioat->res != NULL)
+ bus_release_resource(ioat->device, SYS_RES_IRQ,
+ rman_get_rid(ioat->res), ioat->res);
+
+ pci_release_msi(ioat->device);
+ return (0);
+}
+
+static int
+ioat_start_channel(struct ioat_softc *ioat)
+{
+ uint64_t status;
+ uint32_t chanerr;
+ int i;
+
+ ioat_acquire(&ioat->dmaengine);
+ ioat_null(&ioat->dmaengine, NULL, NULL, 0);
+ ioat_release(&ioat->dmaengine);
+
+ for (i = 0; i < 100; i++) {
+ DELAY(1);
+ status = ioat_get_chansts(ioat);
+ if (is_ioat_idle(status))
+ return (0);
+ }
+
+ chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
+ ioat_log_message(0, "could not start channel: "
+ "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr,
+ IOAT_CHANERR_STR);
+ return (ENXIO);
+}
+
+/*
+ * Initialize Hardware
+ */
+static int
+ioat3_attach(device_t device)
+{
+ struct ioat_softc *ioat;
+ struct ioat_descriptor **ring;
+ struct ioat_descriptor *next;
+ struct ioat_dma_hw_descriptor *dma_hw_desc;
+ int i, num_descriptors;
+ int error;
+ uint8_t xfercap;
+
+ error = 0;
+ ioat = DEVICE2SOFTC(device);
+ ioat->capabilities = ioat_read_dmacapability(ioat);
+
+ ioat_log_message(1, "Capabilities: %b\n", (int)ioat->capabilities,
+ IOAT_DMACAP_STR);
+
+ xfercap = ioat_read_xfercap(ioat);
+ ioat->max_xfer_size = 1 << xfercap;
+
+ ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) &
+ IOAT_INTRDELAY_SUPPORTED) != 0;
+ if (ioat->intrdelay_supported)
+ ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK;
+
+ /* TODO: need to check DCA here if we ever do XOR/PQ */
+
+ mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF);
+ mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF);
+ callout_init(&ioat->timer, 1);
+ TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat);
+
+ /* Establish lock order for Witness */
+ mtx_lock(&ioat->submit_lock);
+ mtx_lock(&ioat->cleanup_lock);
+ mtx_unlock(&ioat->cleanup_lock);
+ mtx_unlock(&ioat->submit_lock);
+
+ ioat->is_resize_pending = FALSE;
+ ioat->is_completion_pending = FALSE;
+ ioat->is_reset_pending = FALSE;
+ ioat->is_channel_running = FALSE;
+
+ bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0,
+ BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+ sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL,
+ &ioat->comp_update_tag);
+
+ error = bus_dmamem_alloc(ioat->comp_update_tag,
+ (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map);
+ if (ioat->comp_update == NULL)
+ return (ENOMEM);
+
+ error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map,
+ ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat,
+ 0);
+ if (error != 0)
+ return (error);
+
+ ioat->ring_size_order = IOAT_MIN_ORDER;
+
+ num_descriptors = 1 << ioat->ring_size_order;
+
+ bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0,
+ BUS_SPACE_MAXADDR_40BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+ sizeof(struct ioat_dma_hw_descriptor), 1,
+ sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL,
+ &ioat->hw_desc_tag);
+
+ ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT,
+ M_ZERO | M_WAITOK);
+
+ ring = ioat->ring;
+ for (i = 0; i < num_descriptors; i++) {
+ ring[i] = ioat_alloc_ring_entry(ioat, M_WAITOK);
+ if (ring[i] == NULL)
+ return (ENOMEM);
+
+ ring[i]->id = i;
+ }
+
+ for (i = 0; i < num_descriptors - 1; i++) {
+ next = ring[i + 1];
+ dma_hw_desc = ring[i]->u.dma;
+
+ dma_hw_desc->next = next->hw_desc_bus_addr;
+ }
+
+ ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr;
+
+ ioat->head = ioat->hw_head = 0;
+ ioat->tail = 0;
+ ioat->last_seen = 0;
+ return (0);
+}
+
+static int
+ioat_map_pci_bar(struct ioat_softc *ioat)
+{
+
+ ioat->pci_resource_id = PCIR_BAR(0);
+ ioat->pci_resource = bus_alloc_resource_any(ioat->device,
+ SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE);
+
+ if (ioat->pci_resource == NULL) {
+ ioat_log_message(0, "unable to allocate pci resource\n");
+ return (ENODEV);
+ }
+
+ ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource);
+ ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource);
+ return (0);
+}
+
+static void
+ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
+{
+ struct ioat_softc *ioat = arg;
+
+ KASSERT(error == 0, ("%s: error:%d", __func__, error));
+ ioat->comp_update_bus_addr = seg[0].ds_addr;
+}
+
+static void
+ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ bus_addr_t *baddr;
+
+ KASSERT(error == 0, ("%s: error:%d", __func__, error));
+ baddr = arg;
+ *baddr = segs->ds_addr;
+}
+
+/*
+ * Interrupt setup and handlers
+ */
+static int
+ioat_setup_intr(struct ioat_softc *ioat)
+{
+ uint32_t num_vectors;
+ int error;
+ boolean_t use_msix;
+ boolean_t force_legacy_interrupts;
+
+ use_msix = FALSE;
+ force_legacy_interrupts = FALSE;
+
+ if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) {
+ num_vectors = 1;
+ pci_alloc_msix(ioat->device, &num_vectors);
+ if (num_vectors == 1)
+ use_msix = TRUE;
+ }
+
+ if (use_msix) {
+ ioat->rid = 1;
+ ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
+ &ioat->rid, RF_ACTIVE);
+ } else {
+ ioat->rid = 0;
+ ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
+ &ioat->rid, RF_SHAREABLE | RF_ACTIVE);
+ }
+ if (ioat->res == NULL) {
+ ioat_log_message(0, "bus_alloc_resource failed\n");
+ return (ENOMEM);
+ }
+
+ ioat->tag = NULL;
+ error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE |
+ INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag);
+ if (error != 0) {
+ ioat_log_message(0, "bus_setup_intr failed\n");
+ return (error);
+ }
+
+ ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN);
+ return (0);
+}
+
+static boolean_t
+ioat_model_resets_msix(struct ioat_softc *ioat)
+{
+ u_int32_t pciid;
+
+ pciid = pci_get_devid(ioat->device);
+ switch (pciid) {
+ /* BWD: */
+ case 0x0c508086:
+ case 0x0c518086:
+ case 0x0c528086:
+ case 0x0c538086:
+ /* BDXDE: */
+ case 0x6f508086:
+ case 0x6f518086:
+ case 0x6f528086:
+ case 0x6f538086:
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+static void
+ioat_interrupt_handler(void *arg)
+{
+ struct ioat_softc *ioat = arg;
+
+ ioat->stats.interrupts++;
+ ioat_process_events(ioat);
+}
+
+static int
+chanerr_to_errno(uint32_t chanerr)
+{
+
+ if (chanerr == 0)
+ return (0);
+ if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0)
+ return (EFAULT);
+ if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0)
+ return (EIO);
+ /* This one is probably our fault: */
+ if ((chanerr & IOAT_CHANERR_NDADDERR) != 0)
+ return (EIO);
+ return (EIO);
+}
+
+static void
+ioat_process_events(struct ioat_softc *ioat)
+{
+ struct ioat_descriptor *desc;
+ struct bus_dmadesc *dmadesc;
+ uint64_t comp_update, status;
+ uint32_t completed, chanerr;
+ int error;
+
+ mtx_lock(&ioat->cleanup_lock);
+
+ completed = 0;
+ comp_update = *ioat->comp_update;
+ status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
+
+ CTR0(KTR_IOAT, __func__);
+
+ if (status == ioat->last_seen) {
+ /*
+ * If we landed in process_events and nothing has been
+ * completed, check for a timeout due to channel halt.
+ */
+ comp_update = ioat_get_chansts(ioat);
+ goto out;
+ }
+
+ while (1) {
+ desc = ioat_get_ring_entry(ioat, ioat->tail);
+ dmadesc = &desc->bus_dmadesc;
+ CTR1(KTR_IOAT, "completing desc %d", ioat->tail);
+
+ if (dmadesc->callback_fn != NULL)
+ dmadesc->callback_fn(dmadesc->callback_arg, 0);
+
+ completed++;
+ ioat->tail++;
+ if (desc->hw_desc_bus_addr == status)
+ break;
+ }
+
+ ioat->last_seen = desc->hw_desc_bus_addr;
+
+ if (ioat->head == ioat->tail) {
+ ioat->is_completion_pending = FALSE;
+ callout_reset(&ioat->timer, IOAT_INTR_TIMO,
+ ioat_timer_callback, ioat);
+ }
+
+ ioat->stats.descriptors_processed += completed;
+
+out:
+ ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
+ mtx_unlock(&ioat->cleanup_lock);
+
+ if (completed != 0) {
+ ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF);
+ wakeup(&ioat->tail);
+ }
+
+ if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update))
+ return;
+
+ ioat->stats.channel_halts++;
+
+ /*
+ * Fatal programming error on this DMA channel. Flush any outstanding
+ * work with error status and restart the engine.
+ */
+ ioat_log_message(0, "Channel halted due to fatal programming error\n");
+ mtx_lock(&ioat->submit_lock);
+ mtx_lock(&ioat->cleanup_lock);
+ ioat->quiescing = TRUE;
+
+ chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
+ ioat_halted_debug(ioat, chanerr);
+ ioat->stats.last_halt_chanerr = chanerr;
+
+ while (ioat_get_active(ioat) > 0) {
+ desc = ioat_get_ring_entry(ioat, ioat->tail);
+ dmadesc = &desc->bus_dmadesc;
+ CTR1(KTR_IOAT, "completing err desc %d", ioat->tail);
+
+ if (dmadesc->callback_fn != NULL)
+ dmadesc->callback_fn(dmadesc->callback_arg,
+ chanerr_to_errno(chanerr));
+
+ ioat_putn_locked(ioat, 1, IOAT_ACTIVE_DESCR_REF);
+ ioat->tail++;
+ ioat->stats.descriptors_processed++;
+ ioat->stats.descriptors_error++;
+ }
+
+ /* Clear error status */
+ ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
+
+ mtx_unlock(&ioat->cleanup_lock);
+ mtx_unlock(&ioat->submit_lock);
+
+ ioat_log_message(0, "Resetting channel to recover from error\n");
+ error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task);
+ KASSERT(error == 0,
+ ("%s: taskqueue_enqueue failed: %d", __func__, error));
+}
+
+static void
+ioat_reset_hw_task(void *ctx, int pending __unused)
+{
+ struct ioat_softc *ioat;
+ int error;
+
+ ioat = ctx;
+ ioat_log_message(1, "%s: Resetting channel\n", __func__);
+
+ error = ioat_reset_hw(ioat);
+ KASSERT(error == 0, ("%s: reset failed: %d", __func__, error));
+ (void)error;
+}
+
+/*
+ * User API functions
+ */
+bus_dmaengine_t
+ioat_get_dmaengine(uint32_t index, int flags)
+{
+ struct ioat_softc *ioat;
+
+ KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0,
+ ("invalid flags: 0x%08x", flags));
+ KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK),
+ ("invalid wait | nowait"));
+
+ if (index >= ioat_channel_index)
+ return (NULL);
+
+ ioat = ioat_channel[index];
+ if (ioat == NULL || ioat->destroying)
+ return (NULL);
+
+ if (ioat->quiescing) {
+ if ((flags & M_NOWAIT) != 0)
+ return (NULL);
+
+ mtx_lock(IOAT_REFLK);
+ while (ioat->quiescing && !ioat->destroying)
+ msleep(&ioat->quiescing, IOAT_REFLK, 0, "getdma", 0);
+ mtx_unlock(IOAT_REFLK);
+
+ if (ioat->destroying)
+ return (NULL);
+ }
+
+ /*
+ * There's a race here between the quiescing check and HW reset or
+ * module destroy.
+ */
+ return (&ioat_get(ioat, IOAT_DMAENGINE_REF)->dmaengine);
+}
+
+void
+ioat_put_dmaengine(bus_dmaengine_t dmaengine)
+{
+ struct ioat_softc *ioat;
+
+ ioat = to_ioat_softc(dmaengine);
+ ioat_put(ioat, IOAT_DMAENGINE_REF);
+}
+
+int
+ioat_get_hwversion(bus_dmaengine_t dmaengine)
+{
+ struct ioat_softc *ioat;
+
+ ioat = to_ioat_softc(dmaengine);
+ return (ioat->version);
+}
+
+size_t
+ioat_get_max_io_size(bus_dmaengine_t dmaengine)
+{
+ struct ioat_softc *ioat;
+
+ ioat = to_ioat_softc(dmaengine);
+ return (ioat->max_xfer_size);
+}
+
+int
+ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay)
+{
+ struct ioat_softc *ioat;
+
+ ioat = to_ioat_softc(dmaengine);
+ if (!ioat->intrdelay_supported)
+ return (ENODEV);
+ if (delay > ioat->intrdelay_max)
+ return (ERANGE);
+
+ ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay);
+ ioat->cached_intrdelay =
+ ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK;
+ return (0);
+}
+
+uint16_t
+ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine)
+{
+ struct ioat_softc *ioat;
+
+ ioat = to_ioat_softc(dmaengine);
+ return (ioat->intrdelay_max);
+}
+
+void
+ioat_acquire(bus_dmaengine_t dmaengine)
+{
+ struct ioat_softc *ioat;
+
+ ioat = to_ioat_softc(dmaengine);
+ mtx_lock(&ioat->submit_lock);
+ CTR0(KTR_IOAT, __func__);
+}
+
+int
+ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags)
+{
+ struct ioat_softc *ioat;
+ int error;
+
+ ioat = to_ioat_softc(dmaengine);
+ ioat_acquire(dmaengine);
+
+ error = ioat_reserve_space(ioat, n, mflags);
+ if (error != 0)
+ ioat_release(dmaengine);
+ return (error);
+}
+
+void
+ioat_release(bus_dmaengine_t dmaengine)
+{
+ struct ioat_softc *ioat;
+
+ ioat = to_ioat_softc(dmaengine);
+ CTR0(KTR_IOAT, __func__);
+ ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->hw_head);
+ mtx_unlock(&ioat->submit_lock);
+}
+
+static struct ioat_descriptor *
+ioat_op_generic(struct ioat_softc *ioat, uint8_t op,
+ uint32_t size, uint64_t src, uint64_t dst,
+ bus_dmaengine_callback_t callback_fn, void *callback_arg,
+ uint32_t flags)
+{
+ struct ioat_generic_hw_descriptor *hw_desc;
+ struct ioat_descriptor *desc;
+ int mflags;
+
+ mtx_assert(&ioat->submit_lock, MA_OWNED);
+
+ KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0,
+ ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS));
+ if ((flags & DMA_NO_WAIT) != 0)
+ mflags = M_NOWAIT;
+ else
+ mflags = M_WAITOK;
+
+ if (size > ioat->max_xfer_size) {
+ ioat_log_message(0, "%s: max_xfer_size = %d, requested = %u\n",
+ __func__, ioat->max_xfer_size, (unsigned)size);
+ return (NULL);
+ }
+
+ if (ioat_reserve_space(ioat, 1, mflags) != 0)
+ return (NULL);
+
+ desc = ioat_get_ring_entry(ioat, ioat->head);
+ hw_desc = desc->u.generic;
+
+ hw_desc->u.control_raw = 0;
+ hw_desc->u.control_generic.op = op;
+ hw_desc->u.control_generic.completion_update = 1;
+
+ if ((flags & DMA_INT_EN) != 0)
+ hw_desc->u.control_generic.int_enable = 1;
+ if ((flags & DMA_FENCE) != 0)
+ hw_desc->u.control_generic.fence = 1;
+
+ hw_desc->size = size;
+ hw_desc->src_addr = src;
+ hw_desc->dest_addr = dst;
+
+ desc->bus_dmadesc.callback_fn = callback_fn;
+ desc->bus_dmadesc.callback_arg = callback_arg;
+ return (desc);
+}
+
+struct bus_dmadesc *
+ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn,
+ void *callback_arg, uint32_t flags)
+{
+ struct ioat_dma_hw_descriptor *hw_desc;
+ struct ioat_descriptor *desc;
+ struct ioat_softc *ioat;
+
+ CTR0(KTR_IOAT, __func__);
+ ioat = to_ioat_softc(dmaengine);
+
+ desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn,
+ callback_arg, flags);
+ if (desc == NULL)
+ return (NULL);
+
+ hw_desc = desc->u.dma;
+ hw_desc->u.control.null = 1;
+ ioat_submit_single(ioat);
+ return (&desc->bus_dmadesc);
+}
+
+struct bus_dmadesc *
+ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
+ bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
+ void *callback_arg, uint32_t flags)
+{
+ struct ioat_dma_hw_descriptor *hw_desc;
+ struct ioat_descriptor *desc;
+ struct ioat_softc *ioat;
+
+ CTR0(KTR_IOAT, __func__);
+ ioat = to_ioat_softc(dmaengine);
+
+ if (((src | dst) & (0xffffull << 48)) != 0) {
+ ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
+ __func__);
+ return (NULL);
+ }
+
+ desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn,
+ callback_arg, flags);
+ if (desc == NULL)
+ return (NULL);
+
+ hw_desc = desc->u.dma;
+ if (g_ioat_debug_level >= 3)
+ dump_descriptor(hw_desc);
+
+ ioat_submit_single(ioat);
+ return (&desc->bus_dmadesc);
+}
+
+struct bus_dmadesc *
+ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1,
+ bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2,
+ bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
+{
+ struct ioat_dma_hw_descriptor *hw_desc;
+ struct ioat_descriptor *desc;
+ struct ioat_softc *ioat;
+
+ CTR0(KTR_IOAT, __func__);
+ ioat = to_ioat_softc(dmaengine);
+
+ if (((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) != 0) {
+ ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
+ __func__);
+ return (NULL);
+ }
+ if (((src1 | src2 | dst1 | dst2) & PAGE_MASK) != 0) {
+ ioat_log_message(0, "%s: Addresses must be page-aligned\n",
+ __func__);
+ return (NULL);
+ }
+
+ desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, src1, dst1,
+ callback_fn, callback_arg, flags);
+ if (desc == NULL)
+ return (NULL);
+
+ hw_desc = desc->u.dma;
+ if (src2 != src1 + PAGE_SIZE) {
+ hw_desc->u.control.src_page_break = 1;
+ hw_desc->next_src_addr = src2;
+ }
+ if (dst2 != dst1 + PAGE_SIZE) {
+ hw_desc->u.control.dest_page_break = 1;
+ hw_desc->next_dest_addr = dst2;
+ }
+
+ if (g_ioat_debug_level >= 3)
+ dump_descriptor(hw_desc);
+
+ ioat_submit_single(ioat);
+ return (&desc->bus_dmadesc);
+}
+
+struct bus_dmadesc *
+ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src,
+ bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr,
+ bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
+{
+ struct ioat_crc32_hw_descriptor *hw_desc;
+ struct ioat_descriptor *desc;
+ struct ioat_softc *ioat;
+ uint32_t teststore;
+ uint8_t op;
+
+ CTR0(KTR_IOAT, __func__);
+ ioat = to_ioat_softc(dmaengine);
+
+ if ((ioat->capabilities & IOAT_DMACAP_MOVECRC) == 0) {
+ ioat_log_message(0, "%s: Device lacks MOVECRC capability\n",
+ __func__);
+ return (NULL);
+ }
+ if (((src | dst) & (0xffffffull << 40)) != 0) {
+ ioat_log_message(0, "%s: High 24 bits of src/dst invalid\n",
+ __func__);
+ return (NULL);
+ }
+ teststore = (flags & _DMA_CRC_TESTSTORE);
+ if (teststore == _DMA_CRC_TESTSTORE) {
+ ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__);
+ return (NULL);
+ }
+ if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) {
+ ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n",
+ __func__);
+ return (NULL);
+ }
+
+ switch (teststore) {
+ case DMA_CRC_STORE:
+ op = IOAT_OP_MOVECRC_STORE;
+ break;
+ case DMA_CRC_TEST:
+ op = IOAT_OP_MOVECRC_TEST;
+ break;
+ default:
+ KASSERT(teststore == 0, ("bogus"));
+ op = IOAT_OP_MOVECRC;
+ break;
+ }
+
+ if ((flags & DMA_CRC_INLINE) == 0 &&
+ (crcptr & (0xffffffull << 40)) != 0) {
+ ioat_log_message(0,
+ "%s: High 24 bits of crcptr invalid\n", __func__);
+ return (NULL);
+ }
+
+ desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn,
+ callback_arg, flags & ~_DMA_CRC_FLAGS);
+ if (desc == NULL)
+ return (NULL);
+
+ hw_desc = desc->u.crc32;
+
+ if ((flags & DMA_CRC_INLINE) == 0)
+ hw_desc->crc_address = crcptr;
+ else
+ hw_desc->u.control.crc_location = 1;
+
+ if (initialseed != NULL) {
+ hw_desc->u.control.use_seed = 1;
+ hw_desc->seed = *initialseed;
+ }
+
+ if (g_ioat_debug_level >= 3)
+ dump_descriptor(hw_desc);
+
+ ioat_submit_single(ioat);
+ return (&desc->bus_dmadesc);
+}
+
+struct bus_dmadesc *
+ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len,
+ uint32_t *initialseed, bus_addr_t crcptr,
+ bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
+{
+ struct ioat_crc32_hw_descriptor *hw_desc;
+ struct ioat_descriptor *desc;
+ struct ioat_softc *ioat;
+ uint32_t teststore;
+ uint8_t op;
+
+ CTR0(KTR_IOAT, __func__);
+ ioat = to_ioat_softc(dmaengine);
+
+ if ((ioat->capabilities & IOAT_DMACAP_CRC) == 0) {
+ ioat_log_message(0, "%s: Device lacks CRC capability\n",
+ __func__);
+ return (NULL);
+ }
+ if ((src & (0xffffffull << 40)) != 0) {
+ ioat_log_message(0, "%s: High 24 bits of src invalid\n",
+ __func__);
+ return (NULL);
+ }
+ teststore = (flags & _DMA_CRC_TESTSTORE);
+ if (teststore == _DMA_CRC_TESTSTORE) {
+ ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__);
+ return (NULL);
+ }
+ if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) {
+ ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n",
+ __func__);
+ return (NULL);
+ }
+
+ switch (teststore) {
+ case DMA_CRC_STORE:
+ op = IOAT_OP_CRC_STORE;
+ break;
+ case DMA_CRC_TEST:
+ op = IOAT_OP_CRC_TEST;
+ break;
+ default:
+ KASSERT(teststore == 0, ("bogus"));
+ op = IOAT_OP_CRC;
+ break;
+ }
+
+ if ((flags & DMA_CRC_INLINE) == 0 &&
+ (crcptr & (0xffffffull << 40)) != 0) {
+ ioat_log_message(0,
+ "%s: High 24 bits of crcptr invalid\n", __func__);
+ return (NULL);
+ }
+
+ desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn,
+ callback_arg, flags & ~_DMA_CRC_FLAGS);
+ if (desc == NULL)
+ return (NULL);
+
+ hw_desc = desc->u.crc32;
+
+ if ((flags & DMA_CRC_INLINE) == 0)
+ hw_desc->crc_address = crcptr;
+ else
+ hw_desc->u.control.crc_location = 1;
+
+ if (initialseed != NULL) {
+ hw_desc->u.control.use_seed = 1;
+ hw_desc->seed = *initialseed;
+ }
+
+ if (g_ioat_debug_level >= 3)
+ dump_descriptor(hw_desc);
+
+ ioat_submit_single(ioat);
+ return (&desc->bus_dmadesc);
+}
+
+struct bus_dmadesc *
+ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern,
+ bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg,
+ uint32_t flags)
+{
+ struct ioat_fill_hw_descriptor *hw_desc;
+ struct ioat_descriptor *desc;
+ struct ioat_softc *ioat;
+
+ CTR0(KTR_IOAT, __func__);
+ ioat = to_ioat_softc(dmaengine);
+
+ if ((ioat->capabilities & IOAT_DMACAP_BFILL) == 0) {
+ ioat_log_message(0, "%s: Device lacks BFILL capability\n",
+ __func__);
+ return (NULL);
+ }
+
+ if ((dst & (0xffffull << 48)) != 0) {
+ ioat_log_message(0, "%s: High 16 bits of dst invalid\n",
+ __func__);
+ return (NULL);
+ }
+
+ desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst,
+ callback_fn, callback_arg, flags);
+ if (desc == NULL)
+ return (NULL);
+
+ hw_desc = desc->u.fill;
+ if (g_ioat_debug_level >= 3)
+ dump_descriptor(hw_desc);
+
+ ioat_submit_single(ioat);
+ return (&desc->bus_dmadesc);
+}
+
+/*
+ * Ring Management
+ */
+static inline uint32_t
+ioat_get_active(struct ioat_softc *ioat)
+{
+
+ return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1));
+}
+
+static inline uint32_t
+ioat_get_ring_space(struct ioat_softc *ioat)
+{
+
+ return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1);
+}
+
+static struct ioat_descriptor *
+ioat_alloc_ring_entry(struct ioat_softc *ioat, int mflags)
+{
+ struct ioat_generic_hw_descriptor *hw_desc;
+ struct ioat_descriptor *desc;
+ int error, busdmaflag;
+
+ error = ENOMEM;
+ hw_desc = NULL;
+
+ if ((mflags & M_WAITOK) != 0)
+ busdmaflag = BUS_DMA_WAITOK;
+ else
+ busdmaflag = BUS_DMA_NOWAIT;
+
+ desc = malloc(sizeof(*desc), M_IOAT, mflags);
+ if (desc == NULL)
+ goto out;
+
+ bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc,
+ BUS_DMA_ZERO | busdmaflag, &ioat->hw_desc_map);
+ if (hw_desc == NULL)
+ goto out;
+
+ memset(&desc->bus_dmadesc, 0, sizeof(desc->bus_dmadesc));
+ desc->u.generic = hw_desc;
+
+ error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc,
+ sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr,
+ busdmaflag);
+ if (error)
+ goto out;
+
+out:
+ if (error) {
+ ioat_free_ring_entry(ioat, desc);
+ return (NULL);
+ }
+ return (desc);
+}
+
+static void
+ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc)
+{
+
+ if (desc == NULL)
+ return;
+
+ if (desc->u.generic)
+ bus_dmamem_free(ioat->hw_desc_tag, desc->u.generic,
+ ioat->hw_desc_map);
+ free(desc, M_IOAT);
+}
+
+/*
+ * Reserves space in this IOAT descriptor ring by ensuring enough slots remain
+ * for 'num_descs'.
+ *
+ * If mflags contains M_WAITOK, blocks until enough space is available.
+ *
+ * Returns zero on success, or an errno on error. If num_descs is beyond the
+ * maximum ring size, returns EINVAl; if allocation would block and mflags
+ * contains M_NOWAIT, returns EAGAIN.
+ *
+ * Must be called with the submit_lock held; returns with the lock held. The
+ * lock may be dropped to allocate the ring.
+ *
+ * (The submit_lock is needed to add any entries to the ring, so callers are
+ * assured enough room is available.)
+ */
+static int
+ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags)
+{
+ struct ioat_descriptor **new_ring;
+ uint32_t order;
+ int error;
+
+ mtx_assert(&ioat->submit_lock, MA_OWNED);
+ error = 0;
+
+ if (num_descs < 1 || num_descs > (1 << IOAT_MAX_ORDER)) {
+ error = EINVAL;
+ goto out;
+ }
+ if (ioat->quiescing) {
+ error = ENXIO;
+ goto out;
+ }
+
+ for (;;) {
+ if (ioat_get_ring_space(ioat) >= num_descs)
+ goto out;
+
+ order = ioat->ring_size_order;
+ if (ioat->is_resize_pending || order == IOAT_MAX_ORDER) {
+ if ((mflags & M_WAITOK) != 0) {
+ msleep(&ioat->tail, &ioat->submit_lock, 0,
+ "ioat_rsz", 0);
+ continue;
+ }
+
+ error = EAGAIN;
+ break;
+ }
+
+ ioat->is_resize_pending = TRUE;
+ for (;;) {
+ mtx_unlock(&ioat->submit_lock);
+
+ new_ring = ioat_prealloc_ring(ioat, 1 << (order + 1),
+ TRUE, mflags);
+
+ mtx_lock(&ioat->submit_lock);
+ KASSERT(ioat->ring_size_order == order,
+ ("is_resize_pending should protect order"));
+
+ if (new_ring == NULL) {
+ KASSERT((mflags & M_WAITOK) == 0,
+ ("allocation failed"));
+ error = EAGAIN;
+ break;
+ }
+
+ error = ring_grow(ioat, order, new_ring);
+ if (error == 0)
+ break;
+ }
+ ioat->is_resize_pending = FALSE;
+ wakeup(&ioat->tail);
+ if (error)
+ break;
+ }
+
+out:
+ mtx_assert(&ioat->submit_lock, MA_OWNED);
+ return (error);
+}
+
+static struct ioat_descriptor **
+ioat_prealloc_ring(struct ioat_softc *ioat, uint32_t size, boolean_t need_dscr,
+ int mflags)
+{
+ struct ioat_descriptor **ring;
+ uint32_t i;
+ int error;
+
+ KASSERT(size > 0 && powerof2(size), ("bogus size"));
+
+ ring = malloc(size * sizeof(*ring), M_IOAT, M_ZERO | mflags);
+ if (ring == NULL)
+ return (NULL);
+
+ if (need_dscr) {
+ error = ENOMEM;
+ for (i = size / 2; i < size; i++) {
+ ring[i] = ioat_alloc_ring_entry(ioat, mflags);
+ if (ring[i] == NULL)
+ goto out;
+ ring[i]->id = i;
+ }
+ }
+ error = 0;
+
+out:
+ if (error != 0 && ring != NULL) {
+ ioat_free_ring(ioat, size, ring);
+ ring = NULL;
+ }
+ return (ring);
+}
+
+static void
+ioat_free_ring(struct ioat_softc *ioat, uint32_t size,
+ struct ioat_descriptor **ring)
+{
+ uint32_t i;
+
+ for (i = 0; i < size; i++) {
+ if (ring[i] != NULL)
+ ioat_free_ring_entry(ioat, ring[i]);
+ }
+ free(ring, M_IOAT);
+}
+
+static struct ioat_descriptor *
+ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index)
+{
+
+ return (ioat->ring[index % (1 << ioat->ring_size_order)]);
+}
+
+static int
+ring_grow(struct ioat_softc *ioat, uint32_t oldorder,
+ struct ioat_descriptor **newring)
+{
+ struct ioat_descriptor *tmp, *next;
+ struct ioat_dma_hw_descriptor *hw;
+ uint32_t oldsize, newsize, head, tail, i, end;
+ int error;
+
+ CTR0(KTR_IOAT, __func__);
+
+ mtx_assert(&ioat->submit_lock, MA_OWNED);
+
+ if (oldorder != ioat->ring_size_order || oldorder >= IOAT_MAX_ORDER) {
+ error = EINVAL;
+ goto out;
+ }
+
+ oldsize = (1 << oldorder);
+ newsize = (1 << (oldorder + 1));
+
+ mtx_lock(&ioat->cleanup_lock);
+
+ head = ioat->head & (oldsize - 1);
+ tail = ioat->tail & (oldsize - 1);
+
+ /* Copy old descriptors to new ring */
+ for (i = 0; i < oldsize; i++)
+ newring[i] = ioat->ring[i];
+
+ /*
+ * If head has wrapped but tail hasn't, we must swap some descriptors
+ * around so that tail can increment directly to head.
+ */
+ if (head < tail) {
+ for (i = 0; i <= head; i++) {
+ tmp = newring[oldsize + i];
+
+ newring[oldsize + i] = newring[i];
+ newring[oldsize + i]->id = oldsize + i;
+
+ newring[i] = tmp;
+ newring[i]->id = i;
+ }
+ head += oldsize;
+ }
+
+ KASSERT(head >= tail, ("invariants"));
+
+ /* Head didn't wrap; we only need to link in oldsize..newsize */
+ if (head < oldsize) {
+ i = oldsize - 1;
+ end = newsize;
+ } else {
+ /* Head did wrap; link newhead..newsize and 0..oldhead */
+ i = head;
+ end = newsize + (head - oldsize) + 1;
+ }
+
+ /*
+ * Fix up hardware ring, being careful not to trample the active
+ * section (tail -> head).
+ */
+ for (; i < end; i++) {
+ KASSERT((i & (newsize - 1)) < tail ||
+ (i & (newsize - 1)) >= head, ("trampling snake"));
+
+ next = newring[(i + 1) & (newsize - 1)];
+ hw = newring[i & (newsize - 1)]->u.dma;
+ hw->next = next->hw_desc_bus_addr;
+ }
+
+ free(ioat->ring, M_IOAT);
+ ioat->ring = newring;
+ ioat->ring_size_order = oldorder + 1;
+ ioat->tail = tail;
+ ioat->head = head;
+ error = 0;
+
+ mtx_unlock(&ioat->cleanup_lock);
+out:
+ if (error)
+ ioat_free_ring(ioat, (1 << (oldorder + 1)), newring);
+ return (error);
+}
+
+static int
+ring_shrink(struct ioat_softc *ioat, uint32_t oldorder,
+ struct ioat_descriptor **newring)
+{
+ struct ioat_dma_hw_descriptor *hw;
+ struct ioat_descriptor *ent, *next;
+ uint32_t oldsize, newsize, current_idx, new_idx, i;
+ int error;
+
+ CTR0(KTR_IOAT, __func__);
+
+ mtx_assert(&ioat->submit_lock, MA_OWNED);
+
+ if (oldorder != ioat->ring_size_order || oldorder <= IOAT_MIN_ORDER) {
+ error = EINVAL;
+ goto out_unlocked;
+ }
+
+ oldsize = (1 << oldorder);
+ newsize = (1 << (oldorder - 1));
+
+ mtx_lock(&ioat->cleanup_lock);
+
+ /* Can't shrink below current active set! */
+ if (ioat_get_active(ioat) >= newsize) {
+ error = ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Copy current descriptors to the new ring, dropping the removed
+ * descriptors.
+ */
+ for (i = 0; i < newsize; i++) {
+ current_idx = (ioat->tail + i) & (oldsize - 1);
+ new_idx = (ioat->tail + i) & (newsize - 1);
+
+ newring[new_idx] = ioat->ring[current_idx];
+ newring[new_idx]->id = new_idx;
+ }
+
+ /* Free deleted descriptors */
+ for (i = newsize; i < oldsize; i++) {
+ ent = ioat_get_ring_entry(ioat, ioat->tail + i);
+ ioat_free_ring_entry(ioat, ent);
+ }
+
+ /* Fix up hardware ring. */
+ hw = newring[(ioat->tail + newsize - 1) & (newsize - 1)]->u.dma;
+ next = newring[(ioat->tail + newsize) & (newsize - 1)];
+ hw->next = next->hw_desc_bus_addr;
+
+ free(ioat->ring, M_IOAT);
+ ioat->ring = newring;
+ ioat->ring_size_order = oldorder - 1;
+ error = 0;
+
+out:
+ mtx_unlock(&ioat->cleanup_lock);
+out_unlocked:
+ if (error)
+ ioat_free_ring(ioat, (1 << (oldorder - 1)), newring);
+ return (error);
+}
+
+static void
+ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr)
+{
+ struct ioat_descriptor *desc;
+
+ ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr,
+ IOAT_CHANERR_STR);
+ if (chanerr == 0)
+ return;
+
+ mtx_assert(&ioat->cleanup_lock, MA_OWNED);
+
+ desc = ioat_get_ring_entry(ioat, ioat->tail + 0);
+ dump_descriptor(desc->u.raw);
+
+ desc = ioat_get_ring_entry(ioat, ioat->tail + 1);
+ dump_descriptor(desc->u.raw);
+}
+
+static void
+ioat_timer_callback(void *arg)
+{
+ struct ioat_descriptor **newring;
+ struct ioat_softc *ioat;
+ uint32_t order;
+
+ ioat = arg;
+ ioat_log_message(1, "%s\n", __func__);
+
+ if (ioat->is_completion_pending) {
+ ioat_process_events(ioat);
+ return;
+ }
+
+ /* Slowly scale the ring down if idle. */
+ mtx_lock(&ioat->submit_lock);
+ order = ioat->ring_size_order;
+ if (ioat->is_resize_pending || order == IOAT_MIN_ORDER) {
+ mtx_unlock(&ioat->submit_lock);
+ goto out;
+ }
+ ioat->is_resize_pending = TRUE;
+ mtx_unlock(&ioat->submit_lock);
+
+ newring = ioat_prealloc_ring(ioat, 1 << (order - 1), FALSE,
+ M_NOWAIT);
+
+ mtx_lock(&ioat->submit_lock);
+ KASSERT(ioat->ring_size_order == order,
+ ("resize_pending protects order"));
+
+ if (newring != NULL)
+ ring_shrink(ioat, order, newring);
+
+ ioat->is_resize_pending = FALSE;
+ mtx_unlock(&ioat->submit_lock);
+
+out:
+ if (ioat->ring_size_order > IOAT_MIN_ORDER)
+ callout_reset(&ioat->timer, 10 * hz,
+ ioat_timer_callback, ioat);
+}
+
+/*
+ * Support Functions
+ */
+static void
+ioat_submit_single(struct ioat_softc *ioat)
+{
+
+ ioat_get(ioat, IOAT_ACTIVE_DESCR_REF);
+ atomic_add_rel_int(&ioat->head, 1);
+ atomic_add_rel_int(&ioat->hw_head, 1);
+
+ if (!ioat->is_completion_pending) {
+ ioat->is_completion_pending = TRUE;
+ callout_reset(&ioat->timer, IOAT_INTR_TIMO,
+ ioat_timer_callback, ioat);
+ }
+
+ ioat->stats.descriptors_submitted++;
+}
+
+static int
+ioat_reset_hw(struct ioat_softc *ioat)
+{
+ uint64_t status;
+ uint32_t chanerr;
+ unsigned timeout;
+ int error;
+
+ mtx_lock(IOAT_REFLK);
+ ioat->quiescing = TRUE;
+ ioat_drain_locked(ioat);
+ mtx_unlock(IOAT_REFLK);
+
+ status = ioat_get_chansts(ioat);
+ if (is_ioat_active(status) || is_ioat_idle(status))
+ ioat_suspend(ioat);
+
+ /* Wait at most 20 ms */
+ for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) &&
+ timeout < 20; timeout++) {
+ DELAY(1000);
+ status = ioat_get_chansts(ioat);
+ }
+ if (timeout == 20) {
+ error = ETIMEDOUT;
+ goto out;
+ }
+
+ KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce"));
+
+ chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
+ ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
+
+ /*
+ * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors
+ * that can cause stability issues for IOAT v3.
+ */
+ pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07,
+ 4);
+ chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4);
+ pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4);
+
+ /*
+ * BDXDE and BWD models reset MSI-X registers on device reset.
+ * Save/restore their contents manually.
+ */
+ if (ioat_model_resets_msix(ioat)) {
+ ioat_log_message(1, "device resets MSI-X registers; saving\n");
+ pci_save_state(ioat->device);
+ }
+
+ ioat_reset(ioat);
+
+ /* Wait at most 20 ms */
+ for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++)
+ DELAY(1000);
+ if (timeout == 20) {
+ error = ETIMEDOUT;
+ goto out;
+ }
+
+ if (ioat_model_resets_msix(ioat)) {
+ ioat_log_message(1, "device resets registers; restored\n");
+ pci_restore_state(ioat->device);
+ }
+
+ /* Reset attempts to return the hardware to "halted." */
+ status = ioat_get_chansts(ioat);
+ if (is_ioat_active(status) || is_ioat_idle(status)) {
+ /* So this really shouldn't happen... */
+ ioat_log_message(0, "Device is active after a reset?\n");
+ ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
+ error = 0;
+ goto out;
+ }
+
+ chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
+ if (chanerr != 0) {
+ mtx_lock(&ioat->cleanup_lock);
+ ioat_halted_debug(ioat, chanerr);
+ mtx_unlock(&ioat->cleanup_lock);
+ error = EIO;
+ goto out;
+ }
+
+ /*
+ * Bring device back online after reset. Writing CHAINADDR brings the
+ * device back to active.
+ *
+ * The internal ring counter resets to zero, so we have to start over
+ * at zero as well.
+ */
+ ioat->tail = ioat->head = ioat->hw_head = 0;
+ ioat->last_seen = 0;
+
+ ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
+ ioat_write_chancmp(ioat, ioat->comp_update_bus_addr);
+ ioat_write_chainaddr(ioat, ioat->ring[0]->hw_desc_bus_addr);
+ error = 0;
+
+out:
+ mtx_lock(IOAT_REFLK);
+ ioat->quiescing = FALSE;
+ wakeup(&ioat->quiescing);
+ mtx_unlock(IOAT_REFLK);
+
+ if (error == 0)
+ error = ioat_start_channel(ioat);
+
+ return (error);
+}
+
+static int
+sysctl_handle_chansts(SYSCTL_HANDLER_ARGS)
+{
+ struct ioat_softc *ioat;
+ struct sbuf sb;
+ uint64_t status;
+ int error;
+
+ ioat = arg1;
+
+ status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS;
+
+ sbuf_new_for_sysctl(&sb, NULL, 256, req);
+ switch (status) {
+ case IOAT_CHANSTS_ACTIVE:
+ sbuf_printf(&sb, "ACTIVE");
+ break;
+ case IOAT_CHANSTS_IDLE:
+ sbuf_printf(&sb, "IDLE");
+ break;
+ case IOAT_CHANSTS_SUSPENDED:
+ sbuf_printf(&sb, "SUSPENDED");
+ break;
+ case IOAT_CHANSTS_HALTED:
+ sbuf_printf(&sb, "HALTED");
+ break;
+ case IOAT_CHANSTS_ARMED:
+ sbuf_printf(&sb, "ARMED");
+ break;
+ default:
+ sbuf_printf(&sb, "UNKNOWN");
+ break;
+ }
+ error = sbuf_finish(&sb);
+ sbuf_delete(&sb);
+
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ return (EINVAL);
+}
+
+static int
+sysctl_handle_dpi(SYSCTL_HANDLER_ARGS)
+{
+ struct ioat_softc *ioat;
+ struct sbuf sb;
+#define PRECISION "1"
+ const uintmax_t factor = 10;
+ uintmax_t rate;
+ int error;
+
+ ioat = arg1;
+ sbuf_new_for_sysctl(&sb, NULL, 16, req);
+
+ if (ioat->stats.interrupts == 0) {
+ sbuf_printf(&sb, "NaN");
+ goto out;
+ }
+ rate = ioat->stats.descriptors_processed * factor /
+ ioat->stats.interrupts;
+ sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor,
+ rate % factor);
+#undef PRECISION
+out:
+ error = sbuf_finish(&sb);
+ sbuf_delete(&sb);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ return (EINVAL);
+}
+
+static int
+sysctl_handle_error(SYSCTL_HANDLER_ARGS)
+{
+ struct ioat_descriptor *desc;
+ struct ioat_softc *ioat;
+ int error, arg;
+
+ ioat = arg1;
+
+ arg = 0;
+ error = SYSCTL_OUT(req, &arg, sizeof(arg));
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ error = SYSCTL_IN(req, &arg, sizeof(arg));
+ if (error != 0)
+ return (error);
+
+ if (arg != 0) {
+ ioat_acquire(&ioat->dmaengine);
+ desc = ioat_op_generic(ioat, IOAT_OP_COPY, 1,
+ 0xffff000000000000ull, 0xffff000000000000ull, NULL, NULL,
+ 0);
+ if (desc == NULL)
+ error = ENOMEM;
+ else
+ ioat_submit_single(ioat);
+ ioat_release(&ioat->dmaengine);
+ }
+ return (error);
+}
+
+static int
+sysctl_handle_reset(SYSCTL_HANDLER_ARGS)
+{
+ struct ioat_softc *ioat;
+ int error, arg;
+
+ ioat = arg1;
+
+ arg = 0;
+ error = SYSCTL_OUT(req, &arg, sizeof(arg));
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ error = SYSCTL_IN(req, &arg, sizeof(arg));
+ if (error != 0)
+ return (error);
+
+ if (arg != 0)
+ error = ioat_reset_hw(ioat);
+
+ return (error);
+}
+
+static void
+dump_descriptor(void *hw_desc)
+{
+ int i, j;
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 8; j++)
+ printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]);
+ printf("\n");
+ }
+}
+
+static void
+ioat_setup_sysctl(device_t device)
+{
+ struct sysctl_oid_list *par, *statpar, *state, *hammer;
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *tree, *tmp;
+ struct ioat_softc *ioat;
+
+ ioat = DEVICE2SOFTC(device);
+ ctx = device_get_sysctl_ctx(device);
+ tree = device_get_sysctl_tree(device);
+ par = SYSCTL_CHILDREN(tree);
+
+ SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD,
+ &ioat->version, 0, "HW version (0xMM form)");
+ SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD,
+ &ioat->max_xfer_size, 0, "HW maximum transfer size");
+ SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD,
+ &ioat->intrdelay_supported, 0, "Is INTRDELAY supported");
+#ifdef notyet
+ SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD,
+ &ioat->intrdelay_max, 0,
+ "Maximum configurable INTRDELAY on this channel (microseconds)");
+#endif
+
+ tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL,
+ "IOAT channel internal state");
+ state = SYSCTL_CHILDREN(tmp);
+
+ SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD,
+ &ioat->ring_size_order, 0, "SW descriptor ring size order");
+ SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head,
+ 0, "SW descriptor head pointer index");
+ SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail,
+ 0, "SW descriptor tail pointer index");
+ SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "hw_head", CTLFLAG_RD,
+ &ioat->hw_head, 0, "HW DMACOUNT");
+
+ SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD,
+ ioat->comp_update, "HW addr of last completion");
+
+ SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_resize_pending", CTLFLAG_RD,
+ &ioat->is_resize_pending, 0, "resize pending");
+ SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_completion_pending",
+ CTLFLAG_RD, &ioat->is_completion_pending, 0, "completion pending");
+ SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_reset_pending", CTLFLAG_RD,
+ &ioat->is_reset_pending, 0, "reset pending");
+ SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_channel_running", CTLFLAG_RD,
+ &ioat->is_channel_running, 0, "channel running");
+
+ SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts",
+ CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A",
+ "String of the channel status");
+
+#ifdef notyet
+ SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD,
+ &ioat->cached_intrdelay, 0,
+ "Current INTRDELAY on this channel (cached, microseconds)");
+#endif
+
+ tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL,
+ "Big hammers (mostly for testing)");
+ hammer = SYSCTL_CHILDREN(tmp);
+
+ SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset",
+ CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I",
+ "Set to non-zero to reset the hardware");
+ SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_error",
+ CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_error, "I",
+ "Set to non-zero to inject a recoverable hardware error");
+
+ tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL,
+ "IOAT channel statistics");
+ statpar = SYSCTL_CHILDREN(tmp);
+
+ SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW,
+ &ioat->stats.interrupts,
+ "Number of interrupts processed on this channel");
+ SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW,
+ &ioat->stats.descriptors_processed,
+ "Number of descriptors processed on this channel");
+ SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW,
+ &ioat->stats.descriptors_submitted,
+ "Number of descriptors submitted to this channel");
+ SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW,
+ &ioat->stats.descriptors_error,
+ "Number of descriptors failed by channel errors");
+#ifdef notyet
+ SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW,
+ &ioat->stats.channel_halts, 0,
+ "Number of times the channel has halted");
+ SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW,
+ &ioat->stats.last_halt_chanerr, 0,
+ "The raw CHANERR when the channel was last halted");
+#endif
+
+ SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt",
+ CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A",
+ "Descriptors per interrupt");
+}
+
+static inline struct ioat_softc *
+ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind)
+{
+ uint32_t old;
+
+ KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus"));
+
+ old = atomic_fetchadd_32(&ioat->refcnt, 1);
+ KASSERT(old < UINT32_MAX, ("refcnt overflow"));
+
+#ifdef INVARIANTS
+ old = atomic_fetchadd_32(&ioat->refkinds[kind], 1);
+ KASSERT(old < UINT32_MAX, ("refcnt kind overflow"));
+#endif
+
+ return (ioat);
+}
+
+static inline void
+ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind)
+{
+
+ _ioat_putn(ioat, n, kind, FALSE);
+}
+
+static inline void
+ioat_putn_locked(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind)
+{
+
+ _ioat_putn(ioat, n, kind, TRUE);
+}
+
+static inline void
+_ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind,
+ boolean_t locked)
+{
+ uint32_t old;
+
+ KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus"));
+
+ if (n == 0)
+ return;
+
+#ifdef INVARIANTS
+ old = atomic_fetchadd_32(&ioat->refkinds[kind], -n);
+ KASSERT(old >= n, ("refcnt kind underflow"));
+#endif
+
+ /* Skip acquiring the lock if resulting refcnt > 0. */
+ for (;;) {
+ old = ioat->refcnt;
+ if (old <= n)
+ break;
+ if (atomic_cmpset_32(&ioat->refcnt, old, old - n))
+ return;
+ }
+
+ if (locked)
+ mtx_assert(IOAT_REFLK, MA_OWNED);
+ else
+ mtx_lock(IOAT_REFLK);
+
+ old = atomic_fetchadd_32(&ioat->refcnt, -n);
+ KASSERT(old >= n, ("refcnt error"));
+
+ if (old == n)
+ wakeup(IOAT_REFLK);
+ if (!locked)
+ mtx_unlock(IOAT_REFLK);
+}
+
+static inline void
+ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind)
+{
+
+ ioat_putn(ioat, 1, kind);
+}
+
+static void
+ioat_drain_locked(struct ioat_softc *ioat)
+{
+
+ mtx_assert(IOAT_REFLK, MA_OWNED);
+ while (ioat->refcnt > 0)
+ msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0);
+}
diff --git a/sys/dev/ioat/ioat.h b/sys/dev/ioat/ioat.h
new file mode 100644
index 0000000..2e10124
--- /dev/null
+++ b/sys/dev/ioat/ioat.h
@@ -0,0 +1,218 @@
+/*-
+ * Copyright (C) 2012 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+__FBSDID("$FreeBSD$");
+
+#ifndef __IOAT_H__
+#define __IOAT_H__
+
+#include <sys/param.h>
+#include <machine/bus.h>
+
+/*
+ * This file defines the public interface to the IOAT driver.
+ */
+
+/*
+ * Enables an interrupt for this operation. Typically, you would only enable
+ * this on the last operation in a group
+ */
+#define DMA_INT_EN 0x1
+/*
+ * Like M_NOWAIT. Operations will return NULL if they cannot allocate a
+ * descriptor without blocking.
+ */
+#define DMA_NO_WAIT 0x2
+/*
+ * Disallow prefetching the source of the following operation. Ordinarily, DMA
+ * operations can be pipelined on some hardware. E.g., operation 2's source
+ * may be prefetched before operation 1 completes.
+ */
+#define DMA_FENCE 0x4
+#define _DMA_GENERIC_FLAGS (DMA_INT_EN | DMA_NO_WAIT | DMA_FENCE)
+
+/*
+ * Emit a CRC32C as the result of a ioat_copy_crc() or ioat_crc().
+ */
+#define DMA_CRC_STORE 0x8
+
+/*
+ * Compare the CRC32C of a ioat_copy_crc() or ioat_crc() against an expeceted
+ * value. It is invalid to specify both TEST and STORE.
+ */
+#define DMA_CRC_TEST 0x10
+#define _DMA_CRC_TESTSTORE (DMA_CRC_STORE | DMA_CRC_TEST)
+
+/*
+ * Use an inline comparison CRC32C or emit an inline CRC32C result. Invalid
+ * without one of STORE or TEST.
+ */
+#define DMA_CRC_INLINE 0x20
+#define _DMA_CRC_FLAGS (DMA_CRC_STORE | DMA_CRC_TEST | DMA_CRC_INLINE)
+
+/*
+ * Hardware revision number. Different hardware revisions support different
+ * features. For example, 3.2 cannot read from MMIO space, while 3.3 can.
+ */
+#define IOAT_VER_3_0 0x30
+#define IOAT_VER_3_2 0x32
+#define IOAT_VER_3_3 0x33
+
+typedef void *bus_dmaengine_t;
+struct bus_dmadesc;
+typedef void (*bus_dmaengine_callback_t)(void *arg, int error);
+
+/*
+ * Called first to acquire a reference to the DMA channel
+ *
+ * Flags may be M_WAITOK or M_NOWAIT.
+ */
+bus_dmaengine_t ioat_get_dmaengine(uint32_t channel_index, int flags);
+
+/* Release the DMA channel */
+void ioat_put_dmaengine(bus_dmaengine_t dmaengine);
+
+/* Check the DMA engine's HW version */
+int ioat_get_hwversion(bus_dmaengine_t dmaengine);
+size_t ioat_get_max_io_size(bus_dmaengine_t dmaengine);
+
+/*
+ * Set interrupt coalescing on a DMA channel.
+ *
+ * The argument is in microseconds. A zero value disables coalescing. Any
+ * other value delays interrupt generation for N microseconds to provide
+ * opportunity to coalesce multiple operations into a single interrupt.
+ *
+ * Returns an error status, or zero on success.
+ *
+ * - ERANGE if the given value exceeds the delay supported by the hardware.
+ * (All current hardware supports a maximum of 0x3fff microseconds delay.)
+ * - ENODEV if the hardware does not support interrupt coalescing.
+ */
+int ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay);
+
+/*
+ * Return the maximum supported coalescing period, for use in
+ * ioat_set_interrupt_coalesce(). If the hardware does not support coalescing,
+ * returns zero.
+ */
+uint16_t ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine);
+
+/*
+ * Acquire must be called before issuing an operation to perform. Release is
+ * called after. Multiple operations can be issued within the context of one
+ * acquire and release
+ */
+void ioat_acquire(bus_dmaengine_t dmaengine);
+void ioat_release(bus_dmaengine_t dmaengine);
+
+/*
+ * Acquire_reserve can be called to ensure there is room for N descriptors. If
+ * it succeeds, the next N valid operations will successfully enqueue.
+ *
+ * It may fail with:
+ * - ENXIO if the channel is in an errored state, or the driver is being
+ * unloaded
+ * - EAGAIN if mflags included M_NOWAIT
+ *
+ * On failure, the caller does not hold the dmaengine.
+ */
+int ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags);
+
+/*
+ * Issue a blockfill operation. The 64-bit pattern 'fillpattern' is written to
+ * 'len' physically contiguous bytes at 'dst'.
+ *
+ * Only supported on devices with the BFILL capability.
+ */
+struct bus_dmadesc *ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst,
+ uint64_t fillpattern, bus_size_t len, bus_dmaengine_callback_t callback_fn,
+ void *callback_arg, uint32_t flags);
+
+/* Issues the copy data operation */
+struct bus_dmadesc *ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
+ bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
+ void *callback_arg, uint32_t flags);
+
+/*
+ * Issue a copy data operation, with constraints:
+ * - src1, src2, dst1, dst2 are all page-aligned addresses
+ * - The quantity to copy is exactly 2 pages;
+ * - src1 -> dst1, src2 -> dst2
+ *
+ * Why use this instead of normal _copy()? You can copy two non-contiguous
+ * pages (src, dst, or both) with one descriptor.
+ */
+struct bus_dmadesc *ioat_copy_8k_aligned(bus_dmaengine_t dmaengine,
+ bus_addr_t dst1, bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2,
+ bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags);
+
+/*
+ * Copy len bytes from dst to src, like ioat_copy().
+ *
+ * Additionally, accumulate a CRC32C of the data.
+ *
+ * If initialseed is not NULL, the value it points to is used to seed the
+ * initial value of the CRC32C.
+ *
+ * If flags include DMA_CRC_STORE and not DMA_CRC_INLINE, crcptr is written
+ * with the 32-bit CRC32C result (in wire format).
+ *
+ * If flags include DMA_CRC_TEST and not DMA_CRC_INLINE, the computed CRC32C is
+ * compared with the 32-bit CRC32C pointed to by crcptr. If they do not match,
+ * a channel error is raised.
+ *
+ * If the DMA_CRC_INLINE flag is set, crcptr is ignored and the DMA engine uses
+ * the 4 bytes trailing the source data (TEST) or the destination data (STORE).
+ */
+struct bus_dmadesc *ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst,
+ bus_addr_t src, bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr,
+ bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags);
+
+/*
+ * ioat_crc() is nearly identical to ioat_copy_crc(), but does not actually
+ * move data around.
+ *
+ * Like ioat_copy_crc, ioat_crc computes a CRC32C over len bytes pointed to by
+ * src. The flags affect its operation in the same way, with one exception:
+ *
+ * If flags includes both DMA_CRC_STORE and DMA_CRC_INLINE, the computed CRC32C
+ * is written to the 4 bytes trailing the *source* data.
+ */
+struct bus_dmadesc *ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src,
+ bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr,
+ bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags);
+
+/*
+ * Issues a null operation. This issues the operation to the hardware, but the
+ * hardware doesn't do anything with it.
+ */
+struct bus_dmadesc *ioat_null(bus_dmaengine_t dmaengine,
+ bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags);
+
+
+#endif /* __IOAT_H__ */
+
diff --git a/sys/dev/ioat/ioat_hw.h b/sys/dev/ioat/ioat_hw.h
new file mode 100644
index 0000000..6dfe9a6
--- /dev/null
+++ b/sys/dev/ioat/ioat_hw.h
@@ -0,0 +1,167 @@
+/*-
+ * Copyright (C) 2012 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+__FBSDID("$FreeBSD$");
+
+#ifndef __IOAT_HW_H__
+#define __IOAT_HW_H__
+
+#define IOAT_MAX_CHANNELS 32
+
+#define IOAT_CHANCNT_OFFSET 0x00
+
+#define IOAT_XFERCAP_OFFSET 0x01
+/* Only bits [4:0] are valid. */
+#define IOAT_XFERCAP_VALID_MASK 0x1f
+
+#define IOAT_GENCTRL_OFFSET 0x02
+
+#define IOAT_INTRCTRL_OFFSET 0x03
+#define IOAT_INTRCTRL_MASTER_INT_EN 0x01
+
+#define IOAT_ATTNSTATUS_OFFSET 0x04
+
+#define IOAT_CBVER_OFFSET 0x08
+
+#define IOAT_INTRDELAY_OFFSET 0x0C
+#define IOAT_INTRDELAY_SUPPORTED (1 << 15)
+/* Reserved. (1 << 14) */
+/* [13:0] is the coalesce period, in microseconds. */
+#define IOAT_INTRDELAY_US_MASK ((1 << 14) - 1)
+
+#define IOAT_CS_STATUS_OFFSET 0x0E
+
+#define IOAT_DMACAPABILITY_OFFSET 0x10
+#define IOAT_DMACAP_PB (1 << 0)
+#define IOAT_DMACAP_CRC (1 << 1)
+#define IOAT_DMACAP_MARKER_SKIP (1 << 2)
+#define IOAT_DMACAP_OLD_XOR (1 << 3)
+#define IOAT_DMACAP_DCA (1 << 4)
+#define IOAT_DMACAP_MOVECRC (1 << 5)
+#define IOAT_DMACAP_BFILL (1 << 6)
+#define IOAT_DMACAP_EXT_APIC (1 << 7)
+#define IOAT_DMACAP_XOR (1 << 8)
+#define IOAT_DMACAP_PQ (1 << 9)
+#define IOAT_DMACAP_DMA_DIF (1 << 10)
+#define IOAT_DMACAP_DWBES (1 << 13)
+#define IOAT_DMACAP_RAID16SS (1 << 17)
+#define IOAT_DMACAP_DMAMC (1 << 18)
+#define IOAT_DMACAP_CTOS (1 << 19)
+
+#define IOAT_DMACAP_STR \
+ "\20\24Completion_Timeout_Support\23DMA_with_Multicasting_Support" \
+ "\22RAID_Super_descriptors\16Descriptor_Write_Back_Error_Support" \
+ "\13DMA_with_DIF\12PQ\11XOR\10Extended_APIC_ID\07Block_Fill\06Move_CRC" \
+ "\05DCA\04Old_XOR\03Marker_Skipping\02CRC\01Page_Break"
+
+/* DMA Channel Registers */
+#define IOAT_CHANCTRL_OFFSET 0x80
+#define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000
+#define IOAT_CHANCTRL_COMPL_DCA_EN 0x0200
+#define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100
+#define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020
+#define IOAT_CHANCTRL_ERR_INT_EN 0x0010
+#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008
+#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
+#define IOAT_CHANCTRL_INT_REARM 0x0001
+#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\
+ IOAT_CHANCTRL_ERR_COMPLETION_EN |\
+ IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\
+ IOAT_CHANCTRL_ERR_INT_EN)
+
+#define IOAT_CHANCMD_OFFSET 0x84
+#define IOAT_CHANCMD_RESET 0x20
+#define IOAT_CHANCMD_SUSPEND 0x04
+
+#define IOAT_DMACOUNT_OFFSET 0x86
+
+#define IOAT_CHANSTS_OFFSET_LOW 0x88
+#define IOAT_CHANSTS_OFFSET_HIGH 0x8C
+#define IOAT_CHANSTS_OFFSET 0x88
+
+#define IOAT_CHANSTS_STATUS 0x7ULL
+#define IOAT_CHANSTS_ACTIVE 0x0
+#define IOAT_CHANSTS_IDLE 0x1
+#define IOAT_CHANSTS_SUSPENDED 0x2
+#define IOAT_CHANSTS_HALTED 0x3
+#define IOAT_CHANSTS_ARMED 0x4
+
+#define IOAT_CHANSTS_UNAFFILIATED_ERROR 0x8ULL
+#define IOAT_CHANSTS_SOFT_ERROR 0x10ULL
+
+#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK (~0x3FULL)
+
+#define IOAT_CHAINADDR_OFFSET_LOW 0x90
+#define IOAT_CHAINADDR_OFFSET_HIGH 0x94
+
+#define IOAT_CHANCMP_OFFSET_LOW 0x98
+#define IOAT_CHANCMP_OFFSET_HIGH 0x9C
+
+#define IOAT_CHANERR_OFFSET 0xA8
+
+#define IOAT_CHANERR_XSADDERR (1 << 0)
+#define IOAT_CHANERR_XDADDERR (1 << 1)
+#define IOAT_CHANERR_NDADDERR (1 << 2)
+#define IOAT_CHANERR_DERR (1 << 3)
+#define IOAT_CHANERR_CHADDERR (1 << 4)
+#define IOAT_CHANERR_CCMDERR (1 << 5)
+#define IOAT_CHANERR_CUNCORERR (1 << 6)
+#define IOAT_CHANERR_DUNCORERR (1 << 7)
+#define IOAT_CHANERR_RDERR (1 << 8)
+#define IOAT_CHANERR_WDERR (1 << 9)
+#define IOAT_CHANERR_DCERR (1 << 10)
+#define IOAT_CHANERR_DXSERR (1 << 11)
+#define IOAT_CHANERR_CMPADDERR (1 << 12)
+#define IOAT_CHANERR_INTCFGERR (1 << 13)
+#define IOAT_CHANERR_SEDERR (1 << 14)
+#define IOAT_CHANERR_UNAFFERR (1 << 15)
+#define IOAT_CHANERR_CXPERR (1 << 16)
+/* Reserved. (1 << 17) */
+#define IOAT_CHANERR_DCNTERR (1 << 18)
+#define IOAT_CHANERR_DIFFERR (1 << 19)
+#define IOAT_CHANERR_GTVERR (1 << 20)
+#define IOAT_CHANERR_ATVERR (1 << 21)
+#define IOAT_CHANERR_RTVERR (1 << 22)
+#define IOAT_CHANERR_BBERR (1 << 23)
+#define IOAT_CHANERR_RDIFFERR (1 << 24)
+#define IOAT_CHANERR_RGTVERR (1 << 25)
+#define IOAT_CHANERR_RATVERR (1 << 26)
+#define IOAT_CHANERR_RRTVERR (1 << 27)
+
+#define IOAT_CHANERR_STR \
+ "\20\34RRTVERR\33RATVERR\32RGTVERR\31RDIFFERR\30BBERR\27RTVERR\26ATVERR" \
+ "\25GTVERR\24DIFFERR\23DCNTERR\21CXPERR\20UNAFFERR\17SEDERR\16INTCFGERR" \
+ "\15CMPADDERR\14DXSERR\13DCERR\12WDERR\11RDERR\10DUNCORERR\07CUNCORERR" \
+ "\06CCMDERR\05CHADDERR\04DERR\03NDADDERR\02XDADDERR\01XSADDERR"
+
+
+#define IOAT_CFG_CHANERR_INT_OFFSET 0x180
+#define IOAT_CFG_CHANERRMASK_INT_OFFSET 0x184
+
+#define IOAT_MIN_ORDER 4
+#define IOAT_MAX_ORDER 16
+
+#endif /* __IOAT_HW_H__ */
diff --git a/sys/dev/ioat/ioat_internal.h b/sys/dev/ioat/ioat_internal.h
new file mode 100644
index 0000000..322671c
--- /dev/null
+++ b/sys/dev/ioat/ioat_internal.h
@@ -0,0 +1,600 @@
+/*-
+ * Copyright (C) 2012 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+__FBSDID("$FreeBSD$");
+
+#ifndef __IOAT_INTERNAL_H__
+#define __IOAT_INTERNAL_H__
+
+#include <sys/_task.h>
+
+#define DEVICE2SOFTC(dev) ((struct ioat_softc *) device_get_softc(dev))
+#define KTR_IOAT KTR_SPARE3
+
+#define ioat_read_chancnt(ioat) \
+ ioat_read_1((ioat), IOAT_CHANCNT_OFFSET)
+
+#define ioat_read_xfercap(ioat) \
+ (ioat_read_1((ioat), IOAT_XFERCAP_OFFSET) & IOAT_XFERCAP_VALID_MASK)
+
+#define ioat_write_intrctrl(ioat, value) \
+ ioat_write_1((ioat), IOAT_INTRCTRL_OFFSET, (value))
+
+#define ioat_read_cbver(ioat) \
+ (ioat_read_1((ioat), IOAT_CBVER_OFFSET) & 0xFF)
+
+#define ioat_read_dmacapability(ioat) \
+ ioat_read_4((ioat), IOAT_DMACAPABILITY_OFFSET)
+
+#define ioat_write_chanctrl(ioat, value) \
+ ioat_write_2((ioat), IOAT_CHANCTRL_OFFSET, (value))
+
+static __inline uint64_t
+ioat_bus_space_read_8_lower_first(bus_space_tag_t tag,
+ bus_space_handle_t handle, bus_size_t offset)
+{
+ return (bus_space_read_4(tag, handle, offset) |
+ ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32);
+}
+
+static __inline void
+ioat_bus_space_write_8_lower_first(bus_space_tag_t tag,
+ bus_space_handle_t handle, bus_size_t offset, uint64_t val)
+{
+ bus_space_write_4(tag, handle, offset, val);
+ bus_space_write_4(tag, handle, offset + 4, val >> 32);
+}
+
+#ifdef __i386__
+#define ioat_bus_space_read_8 ioat_bus_space_read_8_lower_first
+#define ioat_bus_space_write_8 ioat_bus_space_write_8_lower_first
+#else
+#define ioat_bus_space_read_8(tag, handle, offset) \
+ bus_space_read_8((tag), (handle), (offset))
+#define ioat_bus_space_write_8(tag, handle, offset, val) \
+ bus_space_write_8((tag), (handle), (offset), (val))
+#endif
+
+#define ioat_read_1(ioat, offset) \
+ bus_space_read_1((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset))
+
+#define ioat_read_2(ioat, offset) \
+ bus_space_read_2((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset))
+
+#define ioat_read_4(ioat, offset) \
+ bus_space_read_4((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset))
+
+#define ioat_read_8(ioat, offset) \
+ ioat_bus_space_read_8((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset))
+
+#define ioat_read_double_4(ioat, offset) \
+ ioat_bus_space_read_8_lower_first((ioat)->pci_bus_tag, \
+ (ioat)->pci_bus_handle, (offset))
+
+#define ioat_write_1(ioat, offset, value) \
+ bus_space_write_1((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset), (value))
+
+#define ioat_write_2(ioat, offset, value) \
+ bus_space_write_2((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset), (value))
+
+#define ioat_write_4(ioat, offset, value) \
+ bus_space_write_4((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset), (value))
+
+#define ioat_write_8(ioat, offset, value) \
+ ioat_bus_space_write_8((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset), (value))
+
+#define ioat_write_double_4(ioat, offset, value) \
+ ioat_bus_space_write_8_lower_first((ioat)->pci_bus_tag, \
+ (ioat)->pci_bus_handle, (offset), (value))
+
+MALLOC_DECLARE(M_IOAT);
+
+SYSCTL_DECL(_hw_ioat);
+
+extern int g_ioat_debug_level;
+
+struct generic_dma_control {
+ uint32_t int_enable:1;
+ uint32_t src_snoop_disable:1;
+ uint32_t dest_snoop_disable:1;
+ uint32_t completion_update:1;
+ uint32_t fence:1;
+ uint32_t reserved1:1;
+ uint32_t src_page_break:1;
+ uint32_t dest_page_break:1;
+ uint32_t bundle:1;
+ uint32_t dest_dca:1;
+ uint32_t hint:1;
+ uint32_t reserved2:13;
+ uint32_t op:8;
+};
+
+struct ioat_generic_hw_descriptor {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct generic_dma_control control_generic;
+ } u;
+ uint64_t src_addr;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t reserved[4];
+};
+
+struct ioat_dma_hw_descriptor {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct generic_dma_control control_generic;
+ struct {
+ uint32_t int_enable:1;
+ uint32_t src_snoop_disable:1;
+ uint32_t dest_snoop_disable:1;
+ uint32_t completion_update:1;
+ uint32_t fence:1;
+ uint32_t null:1;
+ uint32_t src_page_break:1;
+ uint32_t dest_page_break:1;
+ uint32_t bundle:1;
+ uint32_t dest_dca:1;
+ uint32_t hint:1;
+ uint32_t reserved:13;
+ #define IOAT_OP_COPY 0x00
+ uint32_t op:8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t next_src_addr;
+ uint64_t next_dest_addr;
+ uint64_t user1;
+ uint64_t user2;
+};
+
+struct ioat_fill_hw_descriptor {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct generic_dma_control control_generic;
+ struct {
+ uint32_t int_enable:1;
+ uint32_t reserved:1;
+ uint32_t dest_snoop_disable:1;
+ uint32_t completion_update:1;
+ uint32_t fence:1;
+ uint32_t reserved2:2;
+ uint32_t dest_page_break:1;
+ uint32_t bundle:1;
+ uint32_t reserved3:15;
+ #define IOAT_OP_FILL 0x01
+ uint32_t op:8;
+ } control;
+ } u;
+ uint64_t src_data;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t reserved;
+ uint64_t next_dest_addr;
+ uint64_t user1;
+ uint64_t user2;
+};
+
+struct ioat_crc32_hw_descriptor {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct generic_dma_control control_generic;
+ struct {
+ uint32_t int_enable:1;
+ uint32_t src_snoop_disable:1;
+ uint32_t dest_snoop_disable:1;
+ uint32_t completion_update:1;
+ uint32_t fence:1;
+ uint32_t reserved1:3;
+ uint32_t bundle:1;
+ uint32_t dest_dca:1;
+ uint32_t hint:1;
+ uint32_t use_seed:1;
+ /*
+ * crc_location:
+ * For IOAT_OP_MOVECRC_TEST and IOAT_OP_CRC_TEST:
+ * 0: comparison value is pointed to by CRC Address
+ * field.
+ * 1: comparison value follows data in wire format
+ * ("inverted reflected bit order") in the 4 bytes
+ * following the source data.
+ *
+ * For IOAT_OP_CRC_STORE:
+ * 0: Result will be stored at location pointed to by
+ * CRC Address field (in wire format).
+ * 1: Result will be stored directly following the
+ * source data.
+ *
+ * For IOAT_OP_MOVECRC_STORE:
+ * 0: Result will be stored at location pointed to by
+ * CRC Address field (in wire format).
+ * 1: Result will be stored directly following the
+ * *destination* data.
+ */
+ uint32_t crc_location:1;
+ uint32_t reserved2:11;
+ /*
+ * MOVECRC - Move data in the same way as standard copy
+ * operation, but also compute CRC32.
+ *
+ * CRC - Only compute CRC on source data.
+ *
+ * There is a CRC accumulator register in the hardware.
+ * If 'initial' is set, it is initialized to the value
+ * in 'seed.'
+ *
+ * In all modes, these operators accumulate size bytes
+ * at src_addr into the running CRC32C.
+ *
+ * Store mode emits the accumulated CRC, in wire
+ * format, as specified by the crc_location bit above.
+ *
+ * Test mode compares the accumulated CRC against the
+ * reference CRC, as described in crc_location above.
+ * On failure, halts the DMA engine with a CRC error
+ * status.
+ */
+ #define IOAT_OP_MOVECRC 0x41
+ #define IOAT_OP_MOVECRC_TEST 0x42
+ #define IOAT_OP_MOVECRC_STORE 0x43
+ #define IOAT_OP_CRC 0x81
+ #define IOAT_OP_CRC_TEST 0x82
+ #define IOAT_OP_CRC_STORE 0x83
+ uint32_t op:8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t next_src_addr;
+ uint64_t next_dest_addr;
+ uint32_t seed;
+ uint32_t reserved;
+ uint64_t crc_address;
+};
+
+struct ioat_xor_hw_descriptor {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct generic_dma_control control_generic;
+ struct {
+ uint32_t int_enable:1;
+ uint32_t src_snoop_disable:1;
+ uint32_t dest_snoop_disable:1;
+ uint32_t completion_update:1;
+ uint32_t fence:1;
+ uint32_t src_count:3;
+ uint32_t bundle:1;
+ uint32_t dest_dca:1;
+ uint32_t hint:1;
+ uint32_t reserved:13;
+ #define IOAT_OP_XOR 0x87
+ #define IOAT_OP_XOR_VAL 0x88
+ uint32_t op:8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t src_addr3;
+ uint64_t src_addr4;
+ uint64_t src_addr5;
+};
+
+struct ioat_xor_ext_hw_descriptor {
+ uint64_t src_addr6;
+ uint64_t src_addr7;
+ uint64_t src_addr8;
+ uint64_t next;
+ uint64_t reserved[4];
+};
+
+struct ioat_pq_hw_descriptor {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct generic_dma_control control_generic;
+ struct {
+ uint32_t int_enable:1;
+ uint32_t src_snoop_disable:1;
+ uint32_t dest_snoop_disable:1;
+ uint32_t completion_update:1;
+ uint32_t fence:1;
+ uint32_t src_count:3;
+ uint32_t bundle:1;
+ uint32_t dest_dca:1;
+ uint32_t hint:1;
+ uint32_t p_disable:1;
+ uint32_t q_disable:1;
+ uint32_t reserved:11;
+ #define IOAT_OP_PQ 0x89
+ #define IOAT_OP_PQ_VAL 0x8a
+ uint32_t op:8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t p_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t src_addr3;
+ uint8_t coef[8];
+ uint64_t q_addr;
+};
+
+struct ioat_pq_ext_hw_descriptor {
+ uint64_t src_addr4;
+ uint64_t src_addr5;
+ uint64_t src_addr6;
+ uint64_t next;
+ uint64_t src_addr7;
+ uint64_t src_addr8;
+ uint64_t reserved[2];
+};
+
+struct ioat_pq_update_hw_descriptor {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct generic_dma_control control_generic;
+ struct {
+ uint32_t int_enable:1;
+ uint32_t src_snoop_disable:1;
+ uint32_t dest_snoop_disable:1;
+ uint32_t completion_update:1;
+ uint32_t fence:1;
+ uint32_t src_cnt:3;
+ uint32_t bundle:1;
+ uint32_t dest_dca:1;
+ uint32_t hint:1;
+ uint32_t p_disable:1;
+ uint32_t q_disable:1;
+ uint32_t reserved:3;
+ uint32_t coef:8;
+ #define IOAT_OP_PQ_UP 0x8b
+ uint32_t op:8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t p_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t p_src;
+ uint64_t q_src;
+ uint64_t q_addr;
+};
+
+struct ioat_raw_hw_descriptor {
+ uint64_t field[8];
+};
+
+struct bus_dmadesc {
+ bus_dmaengine_callback_t callback_fn;
+ void *callback_arg;
+};
+
+struct ioat_descriptor {
+ struct bus_dmadesc bus_dmadesc;
+ union {
+ struct ioat_generic_hw_descriptor *generic;
+ struct ioat_dma_hw_descriptor *dma;
+ struct ioat_fill_hw_descriptor *fill;
+ struct ioat_crc32_hw_descriptor *crc32;
+ struct ioat_xor_hw_descriptor *xor;
+ struct ioat_xor_ext_hw_descriptor *xor_ext;
+ struct ioat_pq_hw_descriptor *pq;
+ struct ioat_pq_ext_hw_descriptor *pq_ext;
+ struct ioat_raw_hw_descriptor *raw;
+ } u;
+ uint32_t id;
+ bus_addr_t hw_desc_bus_addr;
+};
+
+/* Unused by this driver at this time. */
+#define IOAT_OP_MARKER 0x84
+
+/*
+ * Deprecated OPs -- v3 DMA generates an abort if given these. And this driver
+ * doesn't support anything older than v3.
+ */
+#define IOAT_OP_OLD_XOR 0x85
+#define IOAT_OP_OLD_XOR_VAL 0x86
+
+enum ioat_ref_kind {
+ IOAT_DMAENGINE_REF = 0,
+ IOAT_ACTIVE_DESCR_REF,
+ IOAT_NUM_REF_KINDS
+};
+
+/* One of these per allocated PCI device. */
+struct ioat_softc {
+ bus_dmaengine_t dmaengine;
+#define to_ioat_softc(_dmaeng) \
+({ \
+ bus_dmaengine_t *_p = (_dmaeng); \
+ (struct ioat_softc *)((char *)_p - \
+ offsetof(struct ioat_softc, dmaengine)); \
+})
+
+ int version;
+ int chan_idx;
+
+ struct mtx submit_lock;
+ device_t device;
+ bus_space_tag_t pci_bus_tag;
+ bus_space_handle_t pci_bus_handle;
+ int pci_resource_id;
+ struct resource *pci_resource;
+ uint32_t max_xfer_size;
+ uint32_t capabilities;
+ uint16_t intrdelay_max;
+ uint16_t cached_intrdelay;
+
+ struct resource *res;
+ int rid;
+ void *tag;
+
+ bus_dma_tag_t hw_desc_tag;
+ bus_dmamap_t hw_desc_map;
+
+ bus_dma_tag_t comp_update_tag;
+ bus_dmamap_t comp_update_map;
+ uint64_t *comp_update;
+ bus_addr_t comp_update_bus_addr;
+
+ struct callout timer;
+ struct task reset_task;
+
+ boolean_t quiescing;
+ boolean_t destroying;
+ boolean_t is_resize_pending;
+ boolean_t is_completion_pending;
+ boolean_t is_reset_pending;
+ boolean_t is_channel_running;
+ boolean_t intrdelay_supported;
+
+ uint32_t head;
+ uint32_t tail;
+ uint32_t hw_head;
+ uint32_t ring_size_order;
+ bus_addr_t last_seen;
+
+ struct ioat_descriptor **ring;
+
+ struct mtx cleanup_lock;
+ volatile uint32_t refcnt;
+#ifdef INVARIANTS
+ volatile uint32_t refkinds[IOAT_NUM_REF_KINDS];
+#endif
+
+ struct {
+ uint64_t interrupts;
+ uint64_t descriptors_processed;
+ uint64_t descriptors_error;
+ uint64_t descriptors_submitted;
+
+ uint32_t channel_halts;
+ uint32_t last_halt_chanerr;
+ } stats;
+};
+
+void ioat_test_attach(void);
+void ioat_test_detach(void);
+
+static inline uint64_t
+ioat_get_chansts(struct ioat_softc *ioat)
+{
+ uint64_t status;
+
+ if (ioat->version >= IOAT_VER_3_3)
+ status = ioat_read_8(ioat, IOAT_CHANSTS_OFFSET);
+ else
+ /* Must read lower 4 bytes before upper 4 bytes. */
+ status = ioat_read_double_4(ioat, IOAT_CHANSTS_OFFSET);
+ return (status);
+}
+
+static inline void
+ioat_write_chancmp(struct ioat_softc *ioat, uint64_t addr)
+{
+
+ if (ioat->version >= IOAT_VER_3_3)
+ ioat_write_8(ioat, IOAT_CHANCMP_OFFSET_LOW, addr);
+ else
+ ioat_write_double_4(ioat, IOAT_CHANCMP_OFFSET_LOW, addr);
+}
+
+static inline void
+ioat_write_chainaddr(struct ioat_softc *ioat, uint64_t addr)
+{
+
+ if (ioat->version >= IOAT_VER_3_3)
+ ioat_write_8(ioat, IOAT_CHAINADDR_OFFSET_LOW, addr);
+ else
+ ioat_write_double_4(ioat, IOAT_CHAINADDR_OFFSET_LOW, addr);
+}
+
+static inline boolean_t
+is_ioat_active(uint64_t status)
+{
+ return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
+}
+
+static inline boolean_t
+is_ioat_idle(uint64_t status)
+{
+ return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_IDLE);
+}
+
+static inline boolean_t
+is_ioat_halted(uint64_t status)
+{
+ return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
+}
+
+static inline boolean_t
+is_ioat_suspended(uint64_t status)
+{
+ return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
+}
+
+static inline void
+ioat_suspend(struct ioat_softc *ioat)
+{
+ ioat_write_1(ioat, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_SUSPEND);
+}
+
+static inline void
+ioat_reset(struct ioat_softc *ioat)
+{
+ ioat_write_1(ioat, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_RESET);
+}
+
+static inline boolean_t
+ioat_reset_pending(struct ioat_softc *ioat)
+{
+ uint8_t cmd;
+
+ cmd = ioat_read_1(ioat, IOAT_CHANCMD_OFFSET);
+ return ((cmd & IOAT_CHANCMD_RESET) != 0);
+}
+
+#endif /* __IOAT_INTERNAL_H__ */
diff --git a/sys/dev/ioat/ioat_test.c b/sys/dev/ioat/ioat_test.c
new file mode 100644
index 0000000..5d27b1b
--- /dev/null
+++ b/sys/dev/ioat/ioat_test.c
@@ -0,0 +1,602 @@
+/*-
+ * Copyright (C) 2012 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/ioccom.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/sysctl.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+
+#include "ioat.h"
+#include "ioat_hw.h"
+#include "ioat_internal.h"
+#include "ioat_test.h"
+
+#ifndef time_after
+#define time_after(a,b) ((long)(b) - (long)(a) < 0)
+#endif
+
+MALLOC_DEFINE(M_IOAT_TEST, "ioat_test", "ioat test allocations");
+
+#define IOAT_MAX_BUFS 256
+
+struct test_transaction {
+ void *buf[IOAT_MAX_BUFS];
+ uint32_t length;
+ uint32_t depth;
+ struct ioat_test *test;
+ TAILQ_ENTRY(test_transaction) entry;
+};
+
+#define IT_LOCK() mtx_lock(&ioat_test_lk)
+#define IT_UNLOCK() mtx_unlock(&ioat_test_lk)
+#define IT_ASSERT() mtx_assert(&ioat_test_lk, MA_OWNED)
+static struct mtx ioat_test_lk;
+MTX_SYSINIT(ioat_test_lk, &ioat_test_lk, "test coordination mtx", MTX_DEF);
+
+static int g_thread_index = 1;
+static struct cdev *g_ioat_cdev = NULL;
+
+#define ioat_test_log(v, ...) _ioat_test_log((v), "ioat_test: " __VA_ARGS__)
+static inline void _ioat_test_log(int verbosity, const char *fmt, ...);
+
+static void
+ioat_test_transaction_destroy(struct test_transaction *tx)
+{
+ struct ioat_test *test;
+ int i;
+
+ test = tx->test;
+
+ for (i = 0; i < IOAT_MAX_BUFS; i++) {
+ if (tx->buf[i] != NULL) {
+ if (test->testkind == IOAT_TEST_DMA_8K)
+ free(tx->buf[i], M_IOAT_TEST);
+ else
+ contigfree(tx->buf[i], tx->length, M_IOAT_TEST);
+ tx->buf[i] = NULL;
+ }
+ }
+
+ free(tx, M_IOAT_TEST);
+}
+
+static struct
+test_transaction *ioat_test_transaction_create(struct ioat_test *test,
+ unsigned num_buffers)
+{
+ struct test_transaction *tx;
+ unsigned i;
+
+ tx = malloc(sizeof(*tx), M_IOAT_TEST, M_NOWAIT | M_ZERO);
+ if (tx == NULL)
+ return (NULL);
+
+ tx->length = test->buffer_size;
+
+ for (i = 0; i < num_buffers; i++) {
+ if (test->testkind == IOAT_TEST_DMA_8K)
+ tx->buf[i] = malloc(test->buffer_size, M_IOAT_TEST,
+ M_NOWAIT);
+ else
+ tx->buf[i] = contigmalloc(test->buffer_size,
+ M_IOAT_TEST, M_NOWAIT, 0, BUS_SPACE_MAXADDR,
+ PAGE_SIZE, 0);
+
+ if (tx->buf[i] == NULL) {
+ ioat_test_transaction_destroy(tx);
+ return (NULL);
+ }
+ }
+ return (tx);
+}
+
+static void
+dump_hex(void *p, size_t chunks)
+{
+ size_t i, j;
+
+ for (i = 0; i < chunks; i++) {
+ for (j = 0; j < 8; j++)
+ printf("%08x ", ((uint32_t *)p)[i * 8 + j]);
+ printf("\n");
+ }
+}
+
+static bool
+ioat_compare_ok(struct test_transaction *tx)
+{
+ struct ioat_test *test;
+ char *dst, *src;
+ uint32_t i, j;
+
+ test = tx->test;
+
+ for (i = 0; i < tx->depth; i++) {
+ dst = tx->buf[2 * i + 1];
+ src = tx->buf[2 * i];
+
+ if (test->testkind == IOAT_TEST_FILL) {
+ for (j = 0; j < tx->length; j += sizeof(uint64_t)) {
+ if (memcmp(src, &dst[j],
+ MIN(sizeof(uint64_t), tx->length - j))
+ != 0)
+ return (false);
+ }
+ } else if (test->testkind == IOAT_TEST_DMA) {
+ if (memcmp(src, dst, tx->length) != 0)
+ return (false);
+ } else if (test->testkind == IOAT_TEST_RAW_DMA) {
+ if (test->raw_write)
+ dst = test->raw_vtarget;
+ dump_hex(dst, tx->length / 32);
+ }
+ }
+ return (true);
+}
+
+static void
+ioat_dma_test_callback(void *arg, int error)
+{
+ struct test_transaction *tx;
+ struct ioat_test *test;
+
+ if (error != 0)
+ ioat_test_log(0, "%s: Got error: %d\n", __func__, error);
+
+ tx = arg;
+ test = tx->test;
+
+ if (test->verify && !ioat_compare_ok(tx)) {
+ ioat_test_log(0, "miscompare found\n");
+ atomic_add_32(&test->status[IOAT_TEST_MISCOMPARE], tx->depth);
+ } else if (!test->too_late)
+ atomic_add_32(&test->status[IOAT_TEST_OK], tx->depth);
+
+ IT_LOCK();
+ TAILQ_REMOVE(&test->pend_q, tx, entry);
+ TAILQ_INSERT_TAIL(&test->free_q, tx, entry);
+ wakeup(&test->free_q);
+ IT_UNLOCK();
+}
+
+static int
+ioat_test_prealloc_memory(struct ioat_test *test, int index)
+{
+ uint32_t i, j, k;
+ struct test_transaction *tx;
+
+ for (i = 0; i < test->transactions; i++) {
+ tx = ioat_test_transaction_create(test, test->chain_depth * 2);
+ if (tx == NULL) {
+ ioat_test_log(0, "tx == NULL - memory exhausted\n");
+ test->status[IOAT_TEST_NO_MEMORY]++;
+ return (ENOMEM);
+ }
+
+ TAILQ_INSERT_HEAD(&test->free_q, tx, entry);
+
+ tx->test = test;
+ tx->depth = test->chain_depth;
+
+ /* fill in source buffers */
+ for (j = 0; j < (tx->length / sizeof(uint32_t)); j++) {
+ uint32_t val = j + (index << 28);
+
+ for (k = 0; k < test->chain_depth; k++) {
+ ((uint32_t *)tx->buf[2*k])[j] = ~val;
+ ((uint32_t *)tx->buf[2*k+1])[j] = val;
+ }
+ }
+ }
+ return (0);
+}
+
+static void
+ioat_test_release_memory(struct ioat_test *test)
+{
+ struct test_transaction *tx, *s;
+
+ TAILQ_FOREACH_SAFE(tx, &test->free_q, entry, s)
+ ioat_test_transaction_destroy(tx);
+ TAILQ_INIT(&test->free_q);
+
+ TAILQ_FOREACH_SAFE(tx, &test->pend_q, entry, s)
+ ioat_test_transaction_destroy(tx);
+ TAILQ_INIT(&test->pend_q);
+}
+
+static void
+ioat_test_submit_1_tx(struct ioat_test *test, bus_dmaengine_t dma)
+{
+ struct test_transaction *tx;
+ struct bus_dmadesc *desc;
+ bus_dmaengine_callback_t cb;
+ bus_addr_t src, dest;
+ uint64_t fillpattern;
+ uint32_t i, flags;
+
+ desc = NULL;
+
+ IT_LOCK();
+ while (TAILQ_EMPTY(&test->free_q))
+ msleep(&test->free_q, &ioat_test_lk, 0, "test_submit", 0);
+
+ tx = TAILQ_FIRST(&test->free_q);
+ TAILQ_REMOVE(&test->free_q, tx, entry);
+ TAILQ_INSERT_HEAD(&test->pend_q, tx, entry);
+ IT_UNLOCK();
+
+ if (test->testkind != IOAT_TEST_MEMCPY)
+ ioat_acquire(dma);
+ for (i = 0; i < tx->depth; i++) {
+ if (test->testkind == IOAT_TEST_MEMCPY) {
+ memcpy(tx->buf[2 * i + 1], tx->buf[2 * i], tx->length);
+ if (i == tx->depth - 1)
+ ioat_dma_test_callback(tx, 0);
+ continue;
+ }
+
+ src = vtophys((vm_offset_t)tx->buf[2*i]);
+ dest = vtophys((vm_offset_t)tx->buf[2*i+1]);
+
+ if (test->testkind == IOAT_TEST_RAW_DMA) {
+ if (test->raw_write)
+ dest = test->raw_target;
+ else
+ src = test->raw_target;
+ }
+
+ if (i == tx->depth - 1) {
+ cb = ioat_dma_test_callback;
+ flags = DMA_INT_EN;
+ } else {
+ cb = NULL;
+ flags = 0;
+ }
+
+ if (test->testkind == IOAT_TEST_DMA ||
+ test->testkind == IOAT_TEST_RAW_DMA)
+ desc = ioat_copy(dma, dest, src, tx->length, cb, tx,
+ flags);
+ else if (test->testkind == IOAT_TEST_FILL) {
+ fillpattern = *(uint64_t *)tx->buf[2*i];
+ desc = ioat_blockfill(dma, dest, fillpattern,
+ tx->length, cb, tx, flags);
+ } else if (test->testkind == IOAT_TEST_DMA_8K) {
+ bus_addr_t src2, dst2;
+
+ src2 = vtophys((vm_offset_t)tx->buf[2*i] + PAGE_SIZE);
+ dst2 = vtophys((vm_offset_t)tx->buf[2*i+1] + PAGE_SIZE);
+
+ desc = ioat_copy_8k_aligned(dma, dest, dst2, src, src2,
+ cb, tx, flags);
+ }
+ if (desc == NULL)
+ break;
+ }
+ if (test->testkind == IOAT_TEST_MEMCPY)
+ return;
+ ioat_release(dma);
+
+ /*
+ * We couldn't issue an IO -- either the device is being detached or
+ * the HW reset. Essentially spin until the device comes back up or
+ * our timer expires.
+ */
+ if (desc == NULL && tx->depth > 0) {
+ atomic_add_32(&test->status[IOAT_TEST_NO_DMA_ENGINE], tx->depth);
+ IT_LOCK();
+ TAILQ_REMOVE(&test->pend_q, tx, entry);
+ TAILQ_INSERT_HEAD(&test->free_q, tx, entry);
+ IT_UNLOCK();
+ }
+}
+
+static void
+ioat_dma_test(void *arg)
+{
+ struct ioat_softc *ioat;
+ struct ioat_test *test;
+ bus_dmaengine_t dmaengine;
+ uint32_t loops;
+ int index, rc, start, end, error;
+
+ test = arg;
+ memset(__DEVOLATILE(void *, test->status), 0, sizeof(test->status));
+
+ if (test->testkind == IOAT_TEST_DMA_8K &&
+ test->buffer_size != 2 * PAGE_SIZE) {
+ ioat_test_log(0, "Asked for 8k test and buffer size isn't 8k\n");
+ test->status[IOAT_TEST_INVALID_INPUT]++;
+ return;
+ }
+
+ if (test->buffer_size > 1024 * 1024) {
+ ioat_test_log(0, "Buffer size too large >1MB\n");
+ test->status[IOAT_TEST_NO_MEMORY]++;
+ return;
+ }
+
+ if (test->chain_depth * 2 > IOAT_MAX_BUFS) {
+ ioat_test_log(0, "Depth too large (> %u)\n",
+ (unsigned)IOAT_MAX_BUFS / 2);
+ test->status[IOAT_TEST_NO_MEMORY]++;
+ return;
+ }
+
+ if (btoc((uint64_t)test->buffer_size * test->chain_depth *
+ test->transactions) > (physmem / 4)) {
+ ioat_test_log(0, "Sanity check failed -- test would "
+ "use more than 1/4 of phys mem.\n");
+ test->status[IOAT_TEST_NO_MEMORY]++;
+ return;
+ }
+
+ if ((uint64_t)test->transactions * test->chain_depth > (1<<16)) {
+ ioat_test_log(0, "Sanity check failed -- test would "
+ "use more than available IOAT ring space.\n");
+ test->status[IOAT_TEST_NO_MEMORY]++;
+ return;
+ }
+
+ if (test->testkind >= IOAT_NUM_TESTKINDS) {
+ ioat_test_log(0, "Invalid kind %u\n",
+ (unsigned)test->testkind);
+ test->status[IOAT_TEST_INVALID_INPUT]++;
+ return;
+ }
+
+ dmaengine = ioat_get_dmaengine(test->channel_index, M_NOWAIT);
+ if (dmaengine == NULL) {
+ ioat_test_log(0, "Couldn't acquire dmaengine\n");
+ test->status[IOAT_TEST_NO_DMA_ENGINE]++;
+ return;
+ }
+ ioat = to_ioat_softc(dmaengine);
+
+ if (test->testkind == IOAT_TEST_FILL &&
+ (ioat->capabilities & IOAT_DMACAP_BFILL) == 0)
+ {
+ ioat_test_log(0,
+ "Hardware doesn't support block fill, aborting test\n");
+ test->status[IOAT_TEST_INVALID_INPUT]++;
+ goto out;
+ }
+
+ if (test->coalesce_period > ioat->intrdelay_max) {
+ ioat_test_log(0,
+ "Hardware doesn't support intrdelay of %u us.\n",
+ (unsigned)test->coalesce_period);
+ test->status[IOAT_TEST_INVALID_INPUT]++;
+ goto out;
+ }
+ error = ioat_set_interrupt_coalesce(dmaengine, test->coalesce_period);
+ if (error == ENODEV && test->coalesce_period == 0)
+ error = 0;
+ if (error != 0) {
+ ioat_test_log(0, "ioat_set_interrupt_coalesce: %d\n", error);
+ test->status[IOAT_TEST_INVALID_INPUT]++;
+ goto out;
+ }
+
+ if (test->zero_stats)
+ memset(&ioat->stats, 0, sizeof(ioat->stats));
+
+ if (test->testkind == IOAT_TEST_RAW_DMA) {
+ if (test->raw_is_virtual) {
+ test->raw_vtarget = (void *)test->raw_target;
+ test->raw_target = vtophys(test->raw_vtarget);
+ } else {
+ test->raw_vtarget = pmap_mapdev(test->raw_target,
+ test->buffer_size);
+ }
+ }
+
+ index = g_thread_index++;
+ TAILQ_INIT(&test->free_q);
+ TAILQ_INIT(&test->pend_q);
+
+ if (test->duration == 0)
+ ioat_test_log(1, "Thread %d: num_loops remaining: 0x%08x\n",
+ index, test->transactions);
+ else
+ ioat_test_log(1, "Thread %d: starting\n", index);
+
+ rc = ioat_test_prealloc_memory(test, index);
+ if (rc != 0) {
+ ioat_test_log(0, "prealloc_memory: %d\n", rc);
+ goto out;
+ }
+ wmb();
+
+ test->too_late = false;
+ start = ticks;
+ end = start + (((sbintime_t)test->duration * hz) / 1000);
+
+ for (loops = 0;; loops++) {
+ if (test->duration == 0 && loops >= test->transactions)
+ break;
+ else if (test->duration != 0 && time_after(ticks, end)) {
+ test->too_late = true;
+ break;
+ }
+
+ ioat_test_submit_1_tx(test, dmaengine);
+ }
+
+ ioat_test_log(1, "Test Elapsed: %d ticks (overrun %d), %d sec.\n",
+ ticks - start, ticks - end, (ticks - start) / hz);
+
+ IT_LOCK();
+ while (!TAILQ_EMPTY(&test->pend_q))
+ msleep(&test->free_q, &ioat_test_lk, 0, "ioattestcompl", hz);
+ IT_UNLOCK();
+
+ ioat_test_log(1, "Test Elapsed2: %d ticks (overrun %d), %d sec.\n",
+ ticks - start, ticks - end, (ticks - start) / hz);
+
+ ioat_test_release_memory(test);
+out:
+ if (test->testkind == IOAT_TEST_RAW_DMA && !test->raw_is_virtual)
+ pmap_unmapdev((vm_offset_t)test->raw_vtarget,
+ test->buffer_size);
+ ioat_put_dmaengine(dmaengine);
+}
+
+static int
+ioat_test_open(struct cdev *dev, int flags, int fmt, struct thread *td)
+{
+
+ return (0);
+}
+
+static int
+ioat_test_close(struct cdev *dev, int flags, int fmt, struct thread *td)
+{
+
+ return (0);
+}
+
+static int
+ioat_test_ioctl(struct cdev *dev, unsigned long cmd, caddr_t arg, int flag,
+ struct thread *td)
+{
+
+ switch (cmd) {
+ case IOAT_DMATEST:
+ ioat_dma_test(arg);
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (0);
+}
+
+static struct cdevsw ioat_cdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = 0,
+ .d_open = ioat_test_open,
+ .d_close = ioat_test_close,
+ .d_ioctl = ioat_test_ioctl,
+ .d_name = "ioat_test",
+};
+
+static int
+enable_ioat_test(bool enable)
+{
+
+ mtx_assert(&Giant, MA_OWNED);
+
+ if (enable && g_ioat_cdev == NULL) {
+ g_ioat_cdev = make_dev(&ioat_cdevsw, 0, UID_ROOT, GID_WHEEL,
+ 0600, "ioat_test");
+ } else if (!enable && g_ioat_cdev != NULL) {
+ destroy_dev(g_ioat_cdev);
+ g_ioat_cdev = NULL;
+ }
+ return (0);
+}
+
+static int
+sysctl_enable_ioat_test(SYSCTL_HANDLER_ARGS)
+{
+ int error, enabled;
+
+ enabled = (g_ioat_cdev != NULL);
+ error = sysctl_handle_int(oidp, &enabled, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ enable_ioat_test(enabled);
+ return (0);
+}
+SYSCTL_PROC(_hw_ioat, OID_AUTO, enable_ioat_test, CTLTYPE_INT | CTLFLAG_RW,
+ 0, 0, sysctl_enable_ioat_test, "I",
+ "Non-zero: Enable the /dev/ioat_test device");
+
+void
+ioat_test_attach(void)
+{
+#ifdef notyet
+ char *val;
+
+ val = kern_getenv("hw.ioat.enable_ioat_test");
+ if (val != NULL && strcmp(val, "0") != 0) {
+#else
+ int val = 0;
+
+ TUNABLE_INT_FETCH("hw.ioat.enable_ioat_test", &val);
+ if (val != 0) {
+#endif
+ mtx_lock(&Giant);
+ enable_ioat_test(true);
+ mtx_unlock(&Giant);
+ }
+#ifdef notyet
+ freeenv(val);
+#endif
+}
+
+void
+ioat_test_detach(void)
+{
+
+ mtx_lock(&Giant);
+ enable_ioat_test(false);
+ mtx_unlock(&Giant);
+}
+
+static inline void
+_ioat_test_log(int verbosity, const char *fmt, ...)
+{
+ va_list argp;
+
+ if (verbosity > g_ioat_debug_level)
+ return;
+
+ va_start(argp, fmt);
+ vprintf(fmt, argp);
+ va_end(argp);
+}
diff --git a/sys/dev/ioat/ioat_test.h b/sys/dev/ioat/ioat_test.h
new file mode 100644
index 0000000..8ef521c
--- /dev/null
+++ b/sys/dev/ioat/ioat_test.h
@@ -0,0 +1,90 @@
+/*-
+ * Copyright (C) 2012 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+__FBSDID("$FreeBSD$");
+
+#ifndef __IOAT_TEST_H__
+#define __IOAT_TEST_H__
+
+enum ioat_res {
+ IOAT_TEST_OK = 0,
+ IOAT_TEST_NO_DMA_ENGINE,
+ IOAT_TEST_NO_MEMORY,
+ IOAT_TEST_MISCOMPARE,
+ IOAT_TEST_INVALID_INPUT,
+ IOAT_NUM_RES
+};
+
+enum ioat_test_kind {
+ IOAT_TEST_FILL = 0,
+ IOAT_TEST_DMA,
+ IOAT_TEST_RAW_DMA,
+ IOAT_TEST_DMA_8K,
+ IOAT_TEST_MEMCPY,
+ IOAT_NUM_TESTKINDS
+};
+
+struct test_transaction;
+
+struct ioat_test {
+ volatile uint32_t status[IOAT_NUM_RES];
+ uint32_t channel_index;
+
+ enum ioat_test_kind testkind;
+
+ /* HW max of 1MB */
+ uint32_t buffer_size;
+ uint32_t chain_depth;
+ uint32_t transactions;
+
+ /*
+ * If non-zero, duration is time in ms;
+ * If zero, bounded by 'transactions' above.
+ */
+ uint32_t duration;
+
+ /* If true, check for miscompares after a copy. */
+ bool verify;
+
+ /* DMA directly to/from some memory address */
+ uint64_t raw_target;
+ void *raw_vtarget;
+ bool raw_write;
+ bool raw_is_virtual;
+
+ bool zero_stats;
+ /* Configure coalesce period */
+ uint16_t coalesce_period;
+
+ /* Internal usage -- not test inputs */
+ TAILQ_HEAD(, test_transaction) free_q;
+ TAILQ_HEAD(, test_transaction) pend_q;
+ volatile bool too_late;
+};
+
+#define IOAT_DMATEST _IOWR('i', 0, struct ioat_test)
+
+#endif /* __IOAT_TEST_H__ */
diff --git a/sys/dev/isp/isp.c b/sys/dev/isp/isp.c
index ceb54ed..aa36453 100644
--- a/sys/dev/isp/isp.c
+++ b/sys/dev/isp/isp.c
@@ -3206,7 +3206,7 @@ isp_pdb_sync(ispsoftc_t *isp, int chan)
case FC_PORTDB_STATE_DEAD:
lp->state = FC_PORTDB_STATE_NIL;
isp_async(isp, ISPASYNC_DEV_GONE, chan, lp);
- if (lp->autologin == 0) {
+ if ((lp->portid & 0xffff00) != 0) {
(void) isp_plogx(isp, chan, lp->handle,
lp->portid,
PLOGX_FLG_CMD_LOGO |
@@ -3304,7 +3304,6 @@ isp_pdb_add_update(ispsoftc_t *isp, int chan, isp_pdb_t *pdb)
}
ISP_MEMZERO(lp, sizeof (fcportdb_t));
- lp->autologin = 1;
lp->probational = 0;
lp->state = FC_PORTDB_STATE_NEW;
lp->portid = lp->new_portid = pdb->portid;
@@ -3808,6 +3807,9 @@ fail:
goto fail;
}
+ if (lp->state == FC_PORTDB_STATE_ZOMBIE)
+ goto relogin;
+
/*
* See if we're still logged into it.
*
@@ -4697,6 +4699,8 @@ isp_control(ispsoftc_t *isp, ispctl_t ctl, ...)
tmf->tmf_tidhi = lp->portid >> 16;
tmf->tmf_vpidx = ISP_GET_VPIDX(isp, chan);
isp_put_24xx_tmf(isp, tmf, isp->isp_iocb);
+ if (isp->isp_dblev & ISP_LOGDEBUG1)
+ isp_print_bytes(isp, "TMF IOCB request", QENTRY_LEN, isp->isp_iocb);
MEMORYBARRIER(isp, SYNC_IFORDEV, 0, QENTRY_LEN, chan);
fcp->sendmarker = 1;
@@ -4713,6 +4717,8 @@ isp_control(ispsoftc_t *isp, ispctl_t ctl, ...)
break;
MEMORYBARRIER(isp, SYNC_IFORCPU, QENTRY_LEN, QENTRY_LEN, chan);
+ if (isp->isp_dblev & ISP_LOGDEBUG1)
+ isp_print_bytes(isp, "TMF IOCB response", QENTRY_LEN, &((isp24xx_statusreq_t *)isp->isp_iocb)[1]);
sp = (isp24xx_statusreq_t *) local;
isp_get_24xx_response(isp, &((isp24xx_statusreq_t *)isp->isp_iocb)[1], sp);
if (sp->req_completion_status == 0) {
@@ -4779,6 +4785,8 @@ isp_control(ispsoftc_t *isp, ispctl_t ctl, ...)
ab->abrt_tidhi = lp->portid >> 16;
ab->abrt_vpidx = ISP_GET_VPIDX(isp, chan);
isp_put_24xx_abrt(isp, ab, isp->isp_iocb);
+ if (isp->isp_dblev & ISP_LOGDEBUG1)
+ isp_print_bytes(isp, "AB IOCB quest", QENTRY_LEN, isp->isp_iocb);
MEMORYBARRIER(isp, SYNC_IFORDEV, 0, 2 * QENTRY_LEN, chan);
ISP_MEMZERO(&mbs, sizeof (mbs));
@@ -4794,6 +4802,8 @@ isp_control(ispsoftc_t *isp, ispctl_t ctl, ...)
break;
MEMORYBARRIER(isp, SYNC_IFORCPU, QENTRY_LEN, QENTRY_LEN, chan);
+ if (isp->isp_dblev & ISP_LOGDEBUG1)
+ isp_print_bytes(isp, "AB IOCB response", QENTRY_LEN, &((isp24xx_abrt_t *)isp->isp_iocb)[1]);
isp_get_24xx_abrt(isp, &((isp24xx_abrt_t *)isp->isp_iocb)[1], ab);
if (ab->abrt_nphdl == ISP24XX_ABRT_OKAY) {
return (0);
@@ -6515,6 +6525,8 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
{
const char *reason;
uint8_t sts = sp->req_completion_status & 0xff;
+ fcparam *fcp = FCPARAM(isp, 0);
+ fcportdb_t *lp;
/*
* It was there (maybe)- treat as a selection timeout.
@@ -6532,8 +6544,8 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
* to force a re-login of this unit. If we're on fabric,
* then we'll have to log in again as a matter of course.
*/
- if (FCPARAM(isp, 0)->isp_topo == TOPO_NL_PORT ||
- FCPARAM(isp, 0)->isp_topo == TOPO_FL_PORT) {
+ if (fcp->isp_topo == TOPO_NL_PORT ||
+ fcp->isp_topo == TOPO_FL_PORT) {
mbreg_t mbs;
MBSINIT(&mbs, MBOX_INIT_LIP, MBLOGALL, 0);
if (ISP_CAP_2KLOGIN(isp)) {
@@ -6542,7 +6554,12 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
isp_mboxcmd_qnw(isp, &mbs, 1);
}
if (XS_NOERR(xs)) {
- XS_SETERR(xs, HBA_SELTIMEOUT);
+ lp = &fcp->portdb[XS_TGT(xs)];
+ if (lp->state == FC_PORTDB_STATE_ZOMBIE) {
+ *XS_STSP(xs) = SCSI_BUSY;
+ XS_SETERR(xs, HBA_TGTBSY);
+ } else
+ XS_SETERR(xs, HBA_SELTIMEOUT);
}
return;
}
@@ -6666,6 +6683,8 @@ isp_parse_status_24xx(ispsoftc_t *isp, isp24xx_statusreq_t *sp, XS_T *xs, long *
{
const char *reason;
uint8_t sts = sp->req_completion_status & 0xff;
+ fcparam *fcp = FCPARAM(isp, XS_CHANNEL(xs));
+ fcportdb_t *lp;
/*
* It was there (maybe)- treat as a selection timeout.
@@ -6683,7 +6702,12 @@ isp_parse_status_24xx(ispsoftc_t *isp, isp24xx_statusreq_t *sp, XS_T *xs, long *
* There is no MBOX_INIT_LIP for the 24XX.
*/
if (XS_NOERR(xs)) {
- XS_SETERR(xs, HBA_SELTIMEOUT);
+ lp = &fcp->portdb[XS_TGT(xs)];
+ if (lp->state == FC_PORTDB_STATE_ZOMBIE) {
+ *XS_STSP(xs) = SCSI_BUSY;
+ XS_SETERR(xs, HBA_TGTBSY);
+ } else
+ XS_SETERR(xs, HBA_SELTIMEOUT);
}
return;
}
diff --git a/sys/dev/isp/isp_freebsd.c b/sys/dev/isp/isp_freebsd.c
index 0473b1b..c6b8dc4 100644
--- a/sys/dev/isp/isp_freebsd.c
+++ b/sys/dev/isp/isp_freebsd.c
@@ -1363,7 +1363,7 @@ isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how)
* and status, don't do it again and do the status portion now.
*/
if (atp->sendst) {
- isp_prt(isp, ISP_LOGTINFO, "[0x%x] now sending synthesized status orig_dl=%u xfered=%u bit=%u",
+ isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] now sending synthesized status orig_dl=%u xfered=%u bit=%u",
cso->tag_id, atp->orig_datalen, atp->bytes_xfered, atp->bytes_in_transit);
xfrlen = 0; /* we already did the data transfer */
atp->sendst = 0;
@@ -2103,7 +2103,7 @@ isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep)
"%s: [0x%x] no state pointer for lun %jx or wildcard",
__func__, aep->at_rxid, (uintmax_t)lun);
if (lun == 0) {
- isp_endcmd(isp, aep, nphdl, SCSI_STATUS_BUSY, 0);
+ isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
} else {
isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0);
}
diff --git a/sys/dev/isp/isp_library.c b/sys/dev/isp/isp_library.c
index 48e0535..f186e50 100644
--- a/sys/dev/isp/isp_library.c
+++ b/sys/dev/isp/isp_library.c
@@ -437,8 +437,8 @@ isp_dump_portdb(ispsoftc_t *isp, int chan)
}
isp_gen_role_str(buf1, sizeof (buf1), lp->prli_word3);
isp_gen_role_str(buf2, sizeof (buf2), lp->new_prli_word3);
- isp_prt(isp, ISP_LOGALL, "Chan %d [%d]: hdl 0x%x %s al%d %s 0x%06x =>%s 0x%06x; WWNN 0x%08x%08x WWPN 0x%08x%08x",
- chan, i, lp->handle, dbs[lp->state], lp->autologin, buf1, lp->portid, buf2, lp->new_portid,
+ isp_prt(isp, ISP_LOGALL, "Chan %d [%d]: hdl 0x%x %s %s 0x%06x =>%s 0x%06x; WWNN 0x%08x%08x WWPN 0x%08x%08x",
+ chan, i, lp->handle, dbs[lp->state], buf1, lp->portid, buf2, lp->new_portid,
(uint32_t) (lp->node_wwn >> 32), (uint32_t) (lp->node_wwn), (uint32_t) (lp->port_wwn >> 32), (uint32_t) (lp->port_wwn));
}
}
diff --git a/sys/dev/isp/isp_target.c b/sys/dev/isp/isp_target.c
index c6af888..c5c277f 100644
--- a/sys/dev/isp/isp_target.c
+++ b/sys/dev/isp/isp_target.c
@@ -169,7 +169,7 @@ isp_target_notify(ispsoftc_t *isp, void *vptr, uint32_t *optrp)
* Check for and do something with commands whose
* IULEN extends past a single queue entry.
*/
- len = at7iop->at_ta_len & 0xfffff;
+ len = at7iop->at_ta_len & 0x0fff;
if (len > (QENTRY_LEN - 8)) {
len -= (QENTRY_LEN - 8);
isp_prt(isp, ISP_LOGINFO, "long IU length (%d) ignored", len);
@@ -539,13 +539,22 @@ isp_endcmd(ispsoftc_t *isp, ...)
} else if (code & ECMD_SVALID) {
cto->ct_flags |= CT7_FLAG_MODE1 | CT7_SENDSTATUS;
cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8);
- cto->rsp.m1.ct_resplen = cto->ct_senselen = min(16, MAXRESPLEN_24XX);
+ cto->ct_senselen = min(16, MAXRESPLEN_24XX);
ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp));
cto->rsp.m1.ct_resp[0] = 0xf0;
cto->rsp.m1.ct_resp[2] = (code >> 12) & 0xf;
cto->rsp.m1.ct_resp[7] = 8;
cto->rsp.m1.ct_resp[12] = (code >> 16) & 0xff;
cto->rsp.m1.ct_resp[13] = (code >> 24) & 0xff;
+ } else if (code & ECMD_RVALID) {
+ cto->ct_flags |= CT7_FLAG_MODE1 | CT7_SENDSTATUS;
+ cto->ct_scsi_status |= (FCP_RSPLEN_VALID << 8);
+ cto->rsp.m1.ct_resplen = 4;
+ ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp));
+ cto->rsp.m1.ct_resp[0] = (code >> 12) & 0xf;
+ cto->rsp.m1.ct_resp[1] = (code >> 16) & 0xff;
+ cto->rsp.m1.ct_resp[2] = (code >> 24) & 0xff;
+ cto->rsp.m1.ct_resp[3] = 0;
} else {
cto->ct_flags |= CT7_FLAG_MODE1 | CT7_SENDSTATUS;
}
@@ -764,6 +773,7 @@ isp_got_tmf_24xx(ispsoftc_t *isp, at7_entry_t *aep)
isp_notify_t notify;
static const char f1[] = "%s from PortID 0x%06x lun %x seq 0x%08x";
static const char f2[] = "unknown Task Flag 0x%x lun %x PortID 0x%x tag 0x%08x";
+ fcportdb_t *lp;
uint16_t chan;
uint32_t sid, did;
@@ -774,21 +784,23 @@ isp_got_tmf_24xx(ispsoftc_t *isp, at7_entry_t *aep)
notify.nt_tagval = aep->at_rxid;
notify.nt_tagval |= (((uint64_t)(isp->isp_serno++)) << 32);
notify.nt_lreserved = aep;
- sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | (aep->at_hdr.s_id[2]);
-
- /* Channel has to derived from D_ID */
+ sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2];
- for (chan = 0; chan < isp->isp_nchan; chan++) {
- if (FCPARAM(isp, chan)->isp_portid == did) {
- break;
+ if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) {
+ /* Channel has to be derived from D_ID */
+ isp_find_chan_by_did(isp, did, &chan);
+ if (chan == ISP_NOCHAN) {
+ isp_prt(isp, ISP_LOGWARN, "%s: D_ID 0x%x not found on any channel", __func__, did);
+ isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, ECMD_TERMINATE, 0);
+ return;
}
+ } else {
+ chan = 0;
}
- if (chan == isp->isp_nchan) {
- isp_prt(isp, ISP_LOGWARN, "%s: D_ID 0x%x not found on any channel", __func__, did);
- /* just drop on the floor */
- return;
- }
- notify.nt_nphdl = NIL_HANDLE; /* unknown here */
+ if (isp_find_pdb_by_portid(isp, chan, sid, &lp))
+ notify.nt_nphdl = lp->handle;
+ else
+ notify.nt_nphdl = NIL_HANDLE;
notify.nt_sid = sid;
notify.nt_did = did;
notify.nt_channel = chan;
@@ -816,6 +828,7 @@ isp_got_tmf_24xx(ispsoftc_t *isp, at7_entry_t *aep)
} else {
isp_prt(isp, ISP_LOGWARN, f2, aep->at_cmnd.fcp_cmnd_task_management, notify.nt_lun, sid, aep->at_rxid);
notify.nt_ncode = NT_UNKNOWN;
+ isp_endcmd(isp, aep, notify.nt_nphdl, chan, ECMD_RVALID | (0x4 << 12), 0);
return;
}
isp_async(isp, ISPASYNC_TARGET_NOTIFY, &notify);
diff --git a/sys/dev/isp/ispvar.h b/sys/dev/isp/ispvar.h
index a97a04f..6f7f871 100644
--- a/sys/dev/isp/ispvar.h
+++ b/sys/dev/isp/ispvar.h
@@ -380,9 +380,6 @@ typedef struct {
uint16_t handle;
/*
- * A device is 'autologin' if the firmware automatically logs into
- * it (re-logins as needed). Basically, local private loop devices.
- *
* PRLI word 3 parameters contains role as well as other things.
*
* The state is the current state of this entry.
@@ -396,8 +393,7 @@ typedef struct {
*/
uint16_t prli_word3; /* PRLI parameters */
uint16_t new_prli_word3; /* Incoming new PRLI parameters */
- uint16_t : 11,
- autologin : 1, /* F/W does PLOGI/PLOGO */
+ uint16_t : 12,
probational : 1,
state : 3;
uint32_t : 6,
@@ -1147,7 +1143,8 @@ int isp_target_put_atio(ispsoftc_t *, void *);
*/
int isp_endcmd(ispsoftc_t *, ...);
#define ECMD_SVALID 0x100
-#define ECMD_TERMINATE 0x200
+#define ECMD_RVALID 0x200
+#define ECMD_TERMINATE 0x400
/*
* Handle an asynchronous event
diff --git a/sys/dev/mlx5/mlx5_en/en.h b/sys/dev/mlx5/mlx5_en/en.h
index b3433f7..c4415c1 100644
--- a/sys/dev/mlx5/mlx5_en/en.h
+++ b/sys/dev/mlx5/mlx5_en/en.h
@@ -393,6 +393,8 @@ struct mlx5e_params {
m(+1, u64 tx_coalesce_usecs, "tx_coalesce_usecs", "Limit in usec for joining tx packets") \
m(+1, u64 tx_coalesce_pkts, "tx_coalesce_pkts", "Maximum number of tx packets to join") \
m(+1, u64 tx_coalesce_mode, "tx_coalesce_mode", "0: EQE mode 1: CQE mode") \
+ m(+1, u64 tx_completion_fact, "tx_completion_fact", "1..MAX: Completion event ratio") \
+ m(+1, u64 tx_completion_fact_max, "tx_completion_fact_max", "Maximum completion event ratio") \
m(+1, u64 hw_lro, "hw_lro", "set to enable hw_lro") \
m(+1, u64 cqe_zipping, "cqe_zipping", "0 : CQE zipping disabled")
@@ -498,6 +500,17 @@ struct mlx5e_sq {
/* dirtied @xmit */
u16 pc __aligned(MLX5E_CACHELINE_SIZE);
u16 bf_offset;
+ u16 cev_counter; /* completion event counter */
+ u16 cev_factor; /* completion event factor */
+ u32 cev_next_state; /* next completion event state */
+#define MLX5E_CEV_STATE_INITIAL 0 /* timer not started */
+#define MLX5E_CEV_STATE_SEND_NOPS 1 /* send NOPs */
+#define MLX5E_CEV_STATE_HOLD_NOPS 2 /* don't send NOPs yet */
+ struct callout cev_callout;
+ union {
+ u32 d32[2];
+ u64 d64;
+ } doorbell;
struct mlx5e_sq_stats stats;
struct mlx5e_cq cq;
@@ -745,8 +758,7 @@ int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
static inline void
-mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
- struct mlx5e_tx_wqe *wqe, int bf_sz)
+mlx5e_tx_notify_hw(struct mlx5e_sq *sq, u32 *wqe, int bf_sz)
{
u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
@@ -762,13 +774,13 @@ mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
wmb();
if (bf_sz) {
- __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
+ __iowrite64_copy(sq->uar_bf_map + ofst, wqe, bf_sz);
/* flush the write-combining mapped buffer */
wmb();
} else {
- mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+ mlx5_write64(wqe, sq->uar_map + ofst, NULL);
}
sq->bf_offset ^= sq->bf_buf_size;
@@ -788,7 +800,8 @@ void mlx5e_create_ethtool(struct mlx5e_priv *);
void mlx5e_create_stats(struct sysctl_ctx_list *,
struct sysctl_oid_list *, const char *,
const char **, unsigned, u64 *);
-void mlx5e_send_nop(struct mlx5e_sq *, u32, bool);
+void mlx5e_send_nop(struct mlx5e_sq *, u32);
+void mlx5e_sq_cev_timeout(void *);
int mlx5e_refresh_channel_params(struct mlx5e_priv *);
#endif /* _MLX5_EN_H_ */
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c b/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
index f7993e9..45500d7 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
@@ -48,6 +48,45 @@ mlx5e_create_stats(struct sysctl_ctx_list *ctx,
}
}
+static void
+mlx5e_ethtool_sync_tx_completion_fact(struct mlx5e_priv *priv)
+{
+ /*
+ * Limit the maximum distance between completion events to
+ * half of the currently set TX queue size.
+ *
+ * The maximum number of queue entries a single IP packet can
+ * consume is given by MLX5_SEND_WQE_MAX_WQEBBS.
+ *
+ * The worst case max value is then given as below:
+ */
+ uint64_t max = priv->params_ethtool.tx_queue_size /
+ (2 * MLX5_SEND_WQE_MAX_WQEBBS);
+
+ /*
+ * Update the maximum completion factor value in case the
+ * tx_queue_size field changed. Ensure we don't overflow
+ * 16-bits.
+ */
+ if (max < 1)
+ max = 1;
+ else if (max > 65535)
+ max = 65535;
+ priv->params_ethtool.tx_completion_fact_max = max;
+
+ /*
+ * Verify that the current TX completion factor is within the
+ * given limits:
+ */
+ if (priv->params_ethtool.tx_completion_fact < 1)
+ priv->params_ethtool.tx_completion_fact = 1;
+ else if (priv->params_ethtool.tx_completion_fact > max)
+ priv->params_ethtool.tx_completion_fact = max;
+}
+
+#define MLX5_PARAM_OFFSET(n) \
+ __offsetof(struct mlx5e_priv, params_ethtool.n)
+
static int
mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
{
@@ -74,129 +113,222 @@ mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
error = ENXIO;
goto done;
}
- /* import RX coal time */
- if (priv->params_ethtool.rx_coalesce_usecs < 1)
- priv->params_ethtool.rx_coalesce_usecs = 0;
- else if (priv->params_ethtool.rx_coalesce_usecs >
- MLX5E_FLD_MAX(cqc, cq_period)) {
- priv->params_ethtool.rx_coalesce_usecs =
- MLX5E_FLD_MAX(cqc, cq_period);
- }
- priv->params.rx_cq_moderation_usec = priv->params_ethtool.rx_coalesce_usecs;
-
- /* import RX coal pkts */
- if (priv->params_ethtool.rx_coalesce_pkts < 1)
- priv->params_ethtool.rx_coalesce_pkts = 0;
- else if (priv->params_ethtool.rx_coalesce_pkts >
- MLX5E_FLD_MAX(cqc, cq_max_count)) {
- priv->params_ethtool.rx_coalesce_pkts =
- MLX5E_FLD_MAX(cqc, cq_max_count);
- }
- priv->params.rx_cq_moderation_pkts = priv->params_ethtool.rx_coalesce_pkts;
-
- /* import TX coal time */
- if (priv->params_ethtool.tx_coalesce_usecs < 1)
- priv->params_ethtool.tx_coalesce_usecs = 0;
- else if (priv->params_ethtool.tx_coalesce_usecs >
- MLX5E_FLD_MAX(cqc, cq_period)) {
- priv->params_ethtool.tx_coalesce_usecs =
- MLX5E_FLD_MAX(cqc, cq_period);
- }
- priv->params.tx_cq_moderation_usec = priv->params_ethtool.tx_coalesce_usecs;
-
- /* import TX coal pkts */
- if (priv->params_ethtool.tx_coalesce_pkts < 1)
- priv->params_ethtool.tx_coalesce_pkts = 0;
- else if (priv->params_ethtool.tx_coalesce_pkts >
- MLX5E_FLD_MAX(cqc, cq_max_count)) {
- priv->params_ethtool.tx_coalesce_pkts = MLX5E_FLD_MAX(cqc, cq_max_count);
- }
- priv->params.tx_cq_moderation_pkts = priv->params_ethtool.tx_coalesce_pkts;
-
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened) {
- u64 *xarg = priv->params_ethtool.arg + arg2;
-
- if (xarg == &priv->params_ethtool.tx_coalesce_pkts ||
- xarg == &priv->params_ethtool.rx_coalesce_pkts ||
- xarg == &priv->params_ethtool.tx_coalesce_usecs ||
- xarg == &priv->params_ethtool.rx_coalesce_usecs) {
- /* avoid downing and upping the network interface */
+
+ switch (MLX5_PARAM_OFFSET(arg[arg2])) {
+ case MLX5_PARAM_OFFSET(rx_coalesce_usecs):
+ /* import RX coal time */
+ if (priv->params_ethtool.rx_coalesce_usecs < 1)
+ priv->params_ethtool.rx_coalesce_usecs = 0;
+ else if (priv->params_ethtool.rx_coalesce_usecs >
+ MLX5E_FLD_MAX(cqc, cq_period)) {
+ priv->params_ethtool.rx_coalesce_usecs =
+ MLX5E_FLD_MAX(cqc, cq_period);
+ }
+ priv->params.rx_cq_moderation_usec =
+ priv->params_ethtool.rx_coalesce_usecs;
+
+ /* check to avoid down and up the network interface */
+ if (was_opened)
error = mlx5e_refresh_channel_params(priv);
- goto done;
+ break;
+
+ case MLX5_PARAM_OFFSET(rx_coalesce_pkts):
+ /* import RX coal pkts */
+ if (priv->params_ethtool.rx_coalesce_pkts < 1)
+ priv->params_ethtool.rx_coalesce_pkts = 0;
+ else if (priv->params_ethtool.rx_coalesce_pkts >
+ MLX5E_FLD_MAX(cqc, cq_max_count)) {
+ priv->params_ethtool.rx_coalesce_pkts =
+ MLX5E_FLD_MAX(cqc, cq_max_count);
}
- mlx5e_close_locked(priv->ifp);
- }
- /* import TX queue size */
- if (priv->params_ethtool.tx_queue_size <
- (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
- priv->params_ethtool.tx_queue_size =
- (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
- } else if (priv->params_ethtool.tx_queue_size >
- priv->params_ethtool.tx_queue_size_max) {
+ priv->params.rx_cq_moderation_pkts =
+ priv->params_ethtool.rx_coalesce_pkts;
+
+ /* check to avoid down and up the network interface */
+ if (was_opened)
+ error = mlx5e_refresh_channel_params(priv);
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_coalesce_usecs):
+ /* import TX coal time */
+ if (priv->params_ethtool.tx_coalesce_usecs < 1)
+ priv->params_ethtool.tx_coalesce_usecs = 0;
+ else if (priv->params_ethtool.tx_coalesce_usecs >
+ MLX5E_FLD_MAX(cqc, cq_period)) {
+ priv->params_ethtool.tx_coalesce_usecs =
+ MLX5E_FLD_MAX(cqc, cq_period);
+ }
+ priv->params.tx_cq_moderation_usec =
+ priv->params_ethtool.tx_coalesce_usecs;
+
+ /* check to avoid down and up the network interface */
+ if (was_opened)
+ error = mlx5e_refresh_channel_params(priv);
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_coalesce_pkts):
+ /* import TX coal pkts */
+ if (priv->params_ethtool.tx_coalesce_pkts < 1)
+ priv->params_ethtool.tx_coalesce_pkts = 0;
+ else if (priv->params_ethtool.tx_coalesce_pkts >
+ MLX5E_FLD_MAX(cqc, cq_max_count)) {
+ priv->params_ethtool.tx_coalesce_pkts =
+ MLX5E_FLD_MAX(cqc, cq_max_count);
+ }
+ priv->params.tx_cq_moderation_pkts =
+ priv->params_ethtool.tx_coalesce_pkts;
+
+ /* check to avoid down and up the network interface */
+ if (was_opened)
+ error = mlx5e_refresh_channel_params(priv);
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_queue_size):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import TX queue size */
+ if (priv->params_ethtool.tx_queue_size <
+ (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
+ priv->params_ethtool.tx_queue_size =
+ (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+ } else if (priv->params_ethtool.tx_queue_size >
+ priv->params_ethtool.tx_queue_size_max) {
+ priv->params_ethtool.tx_queue_size =
+ priv->params_ethtool.tx_queue_size_max;
+ }
+ /* store actual TX queue size */
+ priv->params.log_sq_size =
+ order_base_2(priv->params_ethtool.tx_queue_size);
priv->params_ethtool.tx_queue_size =
- priv->params_ethtool.tx_queue_size_max;
- }
- priv->params.log_sq_size =
- order_base_2(priv->params_ethtool.tx_queue_size);
+ 1 << priv->params.log_sq_size;
- /* import RX queue size */
- if (priv->params_ethtool.rx_queue_size <
- (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
- priv->params_ethtool.rx_queue_size =
- (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
- } else if (priv->params_ethtool.rx_queue_size >
- priv->params_ethtool.rx_queue_size_max) {
+ /* verify TX completion factor */
+ mlx5e_ethtool_sync_tx_completion_fact(priv);
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(rx_queue_size):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import RX queue size */
+ if (priv->params_ethtool.rx_queue_size <
+ (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+ priv->params_ethtool.rx_queue_size =
+ (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ } else if (priv->params_ethtool.rx_queue_size >
+ priv->params_ethtool.rx_queue_size_max) {
+ priv->params_ethtool.rx_queue_size =
+ priv->params_ethtool.rx_queue_size_max;
+ }
+ /* store actual RX queue size */
+ priv->params.log_rq_size =
+ order_base_2(priv->params_ethtool.rx_queue_size);
priv->params_ethtool.rx_queue_size =
- priv->params_ethtool.rx_queue_size_max;
- }
- priv->params.log_rq_size =
- order_base_2(priv->params_ethtool.rx_queue_size);
-
- priv->params.min_rx_wqes = min_t (u16,
- priv->params_ethtool.rx_queue_size - 1,
- MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
-
- /* import number of channels */
- if (priv->params_ethtool.channels < 1)
- priv->params_ethtool.channels = 1;
- else if (priv->params_ethtool.channels >
- (u64) priv->mdev->priv.eq_table.num_comp_vectors) {
- priv->params_ethtool.channels =
- (u64) priv->mdev->priv.eq_table.num_comp_vectors;
- }
- priv->params.num_channels = priv->params_ethtool.channels;
-
- /* import RX mode */
- if (priv->params_ethtool.rx_coalesce_mode != 0)
- priv->params_ethtool.rx_coalesce_mode = 1;
- priv->params.rx_cq_moderation_mode = priv->params_ethtool.rx_coalesce_mode;
-
- /* import TX mode */
- if (priv->params_ethtool.tx_coalesce_mode != 0)
- priv->params_ethtool.tx_coalesce_mode = 1;
- priv->params.tx_cq_moderation_mode = priv->params_ethtool.tx_coalesce_mode;
-
- /* we always agree to turn off HW LRO - but not always to turn on */
- if (priv->params_ethtool.hw_lro != 0) {
- if ((priv->ifp->if_capenable & IFCAP_LRO) &&
- MLX5_CAP_ETH(priv->mdev, lro_cap)) {
- priv->params.hw_lro_en = 1;
- priv->params_ethtool.hw_lro = 1;
+ 1 << priv->params.log_rq_size;
+
+ /* update least number of RX WQEs */
+ priv->params.min_rx_wqes = min(
+ priv->params_ethtool.rx_queue_size - 1,
+ MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(channels):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import number of channels */
+ if (priv->params_ethtool.channels < 1)
+ priv->params_ethtool.channels = 1;
+ else if (priv->params_ethtool.channels >
+ (u64) priv->mdev->priv.eq_table.num_comp_vectors) {
+ priv->params_ethtool.channels =
+ (u64) priv->mdev->priv.eq_table.num_comp_vectors;
+ }
+ priv->params.num_channels = priv->params_ethtool.channels;
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(rx_coalesce_mode):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import RX coalesce mode */
+ if (priv->params_ethtool.rx_coalesce_mode != 0)
+ priv->params_ethtool.rx_coalesce_mode = 1;
+ priv->params.rx_cq_moderation_mode =
+ priv->params_ethtool.rx_coalesce_mode;
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_coalesce_mode):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import TX coalesce mode */
+ if (priv->params_ethtool.tx_coalesce_mode != 0)
+ priv->params_ethtool.tx_coalesce_mode = 1;
+ priv->params.tx_cq_moderation_mode =
+ priv->params_ethtool.tx_coalesce_mode;
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(hw_lro):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import HW LRO mode */
+ if (priv->params_ethtool.hw_lro != 0) {
+ if ((priv->ifp->if_capenable & IFCAP_LRO) &&
+ MLX5_CAP_ETH(priv->mdev, lro_cap)) {
+ priv->params.hw_lro_en = 1;
+ priv->params_ethtool.hw_lro = 1;
+ } else {
+ priv->params.hw_lro_en = 0;
+ priv->params_ethtool.hw_lro = 0;
+ error = EINVAL;
+
+ if_printf(priv->ifp, "Can't enable HW LRO: "
+ "The HW or SW LRO feature is disabled\n");
+ }
} else {
priv->params.hw_lro_en = 0;
- priv->params_ethtool.hw_lro = 0;
- error = EINVAL;
-
- if_printf(priv->ifp, "Can't enable HW LRO: "
- "The HW or SW LRO feature is disabled");
}
- } else {
- priv->params.hw_lro_en = 0;
- }
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(cqe_zipping):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
- if (&priv->params_ethtool.arg[arg2] ==
- &priv->params_ethtool.cqe_zipping) {
+ /* import CQE zipping mode */
if (priv->params_ethtool.cqe_zipping &&
MLX5_CAP_GEN(priv->mdev, cqe_compression)) {
priv->params.cqe_zipping_en = true;
@@ -205,9 +337,27 @@ mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
priv->params.cqe_zipping_en = false;
priv->params_ethtool.cqe_zipping = 0;
}
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_completion_fact):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* verify parameter */
+ mlx5e_ethtool_sync_tx_completion_fact(priv);
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ default:
+ break;
}
- if (was_opened)
- mlx5e_open_locked(priv->ifp);
done:
PRIV_UNLOCK(priv);
return (error);
@@ -475,6 +625,7 @@ mlx5e_create_ethtool(struct mlx5e_priv *priv)
priv->params_ethtool.tx_coalesce_pkts = priv->params.tx_cq_moderation_pkts;
priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
priv->params_ethtool.cqe_zipping = priv->params.cqe_zipping_en;
+ mlx5e_ethtool_sync_tx_completion_fact(priv);
/* create root node */
node = SYSCTL_ADD_NODE(&priv->sysctl_ctx,
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
index a76d32e..d71cbb3 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
@@ -850,7 +850,6 @@ mlx5e_open_rq(struct mlx5e_channel *c,
struct mlx5e_rq *rq)
{
int err;
- int i;
err = mlx5e_create_rq(c, param, rq);
if (err)
@@ -866,12 +865,6 @@ mlx5e_open_rq(struct mlx5e_channel *c,
c->rq.enabled = 1;
- /*
- * Test send queues, which will trigger
- * "mlx5e_post_rx_wqes()":
- */
- for (i = 0; i != c->num_tc; i++)
- mlx5e_send_nop(&c->sq[i], 1, true);
return (0);
err_disable_rq:
@@ -1185,24 +1178,89 @@ err_destroy_sq:
}
static void
-mlx5e_close_sq(struct mlx5e_sq *sq)
+mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep)
+{
+ /* fill up remainder with NOPs */
+ while (sq->cev_counter != 0) {
+ while (!mlx5e_sq_has_room_for(sq, 1)) {
+ if (can_sleep != 0) {
+ mtx_unlock(&sq->lock);
+ msleep(4);
+ mtx_lock(&sq->lock);
+ } else {
+ goto done;
+ }
+ }
+ /* send a single NOP */
+ mlx5e_send_nop(sq, 1);
+ wmb();
+ }
+done:
+ /* Check if we need to write the doorbell */
+ if (likely(sq->doorbell.d64 != 0)) {
+ mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
+ sq->doorbell.d64 = 0;
+ }
+ return;
+}
+
+void
+mlx5e_sq_cev_timeout(void *arg)
{
+ struct mlx5e_sq *sq = arg;
- /* ensure hw is notified of all pending wqes */
- if (mlx5e_sq_has_room_for(sq, 1))
- mlx5e_send_nop(sq, 1, true);
+ mtx_assert(&sq->lock, MA_OWNED);
- mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+ /* check next state */
+ switch (sq->cev_next_state) {
+ case MLX5E_CEV_STATE_SEND_NOPS:
+ /* fill TX ring with NOPs, if any */
+ mlx5e_sq_send_nops_locked(sq, 0);
+
+ /* check if completed */
+ if (sq->cev_counter == 0) {
+ sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
+ return;
+ }
+ break;
+ default:
+ /* send NOPs on next timeout */
+ sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS;
+ break;
+ }
+
+ /* restart timer */
+ callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq);
}
static void
mlx5e_close_sq_wait(struct mlx5e_sq *sq)
{
+
+ mtx_lock(&sq->lock);
+ /* teardown event factor timer, if any */
+ sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
+ callout_stop(&sq->cev_callout);
+
+ /* send dummy NOPs in order to flush the transmit ring */
+ mlx5e_sq_send_nops_locked(sq, 1);
+ mtx_unlock(&sq->lock);
+
+ /* make sure it is safe to free the callout */
+ callout_drain(&sq->cev_callout);
+
+ /* error out remaining requests */
+ mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+
/* wait till SQ is empty */
+ mtx_lock(&sq->lock);
while (sq->cc != sq->pc) {
+ mtx_unlock(&sq->lock);
msleep(4);
sq->cq.mcq.comp(&sq->cq.mcq);
+ mtx_lock(&sq->lock);
}
+ mtx_unlock(&sq->lock);
mlx5e_disable_sq(sq);
mlx5e_destroy_sq(sq);
@@ -1412,24 +1470,13 @@ mlx5e_open_sqs(struct mlx5e_channel *c,
return (0);
err_close_sqs:
- for (tc--; tc >= 0; tc--) {
- mlx5e_close_sq(&c->sq[tc]);
+ for (tc--; tc >= 0; tc--)
mlx5e_close_sq_wait(&c->sq[tc]);
- }
return (err);
}
static void
-mlx5e_close_sqs(struct mlx5e_channel *c)
-{
- int tc;
-
- for (tc = 0; tc < c->num_tc; tc++)
- mlx5e_close_sq(&c->sq[tc]);
-}
-
-static void
mlx5e_close_sqs_wait(struct mlx5e_channel *c)
{
int tc;
@@ -1446,9 +1493,19 @@ mlx5e_chan_mtx_init(struct mlx5e_channel *c)
mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
for (tc = 0; tc < c->num_tc; tc++) {
- mtx_init(&c->sq[tc].lock, "mlx5tx", MTX_NETWORK_LOCK, MTX_DEF);
- mtx_init(&c->sq[tc].comp_lock, "mlx5comp", MTX_NETWORK_LOCK,
+ struct mlx5e_sq *sq = c->sq + tc;
+
+ mtx_init(&sq->lock, "mlx5tx", MTX_NETWORK_LOCK, MTX_DEF);
+ mtx_init(&sq->comp_lock, "mlx5comp", MTX_NETWORK_LOCK,
MTX_DEF);
+
+ callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
+
+ sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
+
+ /* ensure the TX completion event factor is not zero */
+ if (sq->cev_factor == 0)
+ sq->cev_factor = 1;
}
}
@@ -1529,7 +1586,6 @@ mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
return (0);
err_close_sqs:
- mlx5e_close_sqs(c);
mlx5e_close_sqs_wait(c);
err_close_rx_cq:
@@ -1554,7 +1610,6 @@ mlx5e_close_channel(struct mlx5e_channel *volatile *pp)
if (c == NULL)
return;
mlx5e_close_rq(&c->rq);
- mlx5e_close_sqs(c);
}
static void
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c b/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
index 483a7e1..29c8b4b 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
@@ -28,8 +28,20 @@
#include "en.h"
#include <machine/atomic.h>
+static inline bool
+mlx5e_do_send_cqe(struct mlx5e_sq *sq)
+{
+ sq->cev_counter++;
+ /* interleave the CQEs */
+ if (sq->cev_counter >= sq->cev_factor) {
+ sq->cev_counter = 0;
+ return (1);
+ }
+ return (0);
+}
+
void
-mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt, bool notify_hw)
+mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt)
{
u16 pi = sq->pc & sq->wq.sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
@@ -38,14 +50,18 @@ mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt, bool notify_hw)
wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ if (mlx5e_do_send_cqe(sq))
+ wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ else
+ wqe->ctrl.fm_ce_se = 0;
+
+ /* Copy data for doorbell */
+ memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
sq->mbuf[pi].mbuf = NULL;
sq->mbuf[pi].num_bytes = 0;
sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
sq->pc += sq->mbuf[pi].num_wqebbs;
- if (notify_hw)
- mlx5e_tx_notify_hw(sq, wqe, 0);
}
#if (__FreeBSD_version >= 1100000)
@@ -206,7 +222,7 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
pi = ((~sq->pc) & sq->wq.sz_m1);
if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
/* Send one multi NOP message instead of many */
- mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS, false);
+ mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS);
pi = ((~sq->pc) & sq->wq.sz_m1);
if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
m_freem(mb);
@@ -340,7 +356,13 @@ skip_dma:
wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ if (mlx5e_do_send_cqe(sq))
+ wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ else
+ wqe->ctrl.fm_ce_se = 0;
+
+ /* Copy data for doorbell */
+ memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
/* Store pointer to mbuf */
sq->mbuf[pi].mbuf = mb;
@@ -351,8 +373,6 @@ skip_dma:
if (mb != NULL)
bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, BUS_DMASYNC_PREWRITE);
- mlx5e_tx_notify_hw(sq, wqe, 0);
-
sq->stats.packets++;
return (0);
@@ -374,9 +394,10 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
*/
sqcc = sq->cc;
- while (budget--) {
+ while (budget > 0) {
struct mlx5_cqe64 *cqe;
struct mbuf *mb;
+ u16 x;
u16 ci;
cqe = mlx5e_get_cqe(&sq->cq);
@@ -385,24 +406,29 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
mlx5_cqwq_pop(&sq->cq.wq);
- ci = sqcc & sq->wq.sz_m1;
- mb = sq->mbuf[ci].mbuf;
- sq->mbuf[ci].mbuf = NULL; /* Safety clear */
+ /* update budget according to the event factor */
+ budget -= sq->cev_factor;
- if (mb == NULL) {
- if (sq->mbuf[ci].num_bytes == 0) {
- /* NOP */
- sq->stats.nop++;
- }
- } else {
- bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
+ for (x = 0; x != sq->cev_factor; x++) {
+ ci = sqcc & sq->wq.sz_m1;
+ mb = sq->mbuf[ci].mbuf;
+ sq->mbuf[ci].mbuf = NULL; /* Safety clear */
- /* Free transmitted mbuf */
- m_freem(mb);
+ if (mb == NULL) {
+ if (sq->mbuf[ci].num_bytes == 0) {
+ /* NOP */
+ sq->stats.nop++;
+ }
+ } else {
+ bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
+
+ /* Free transmitted mbuf */
+ m_freem(mb);
+ }
+ sqcc += sq->mbuf[ci].num_wqebbs;
}
- sqcc += sq->mbuf[ci].num_wqebbs;
}
mlx5_cqwq_update_db_record(&sq->cq.wq);
@@ -450,6 +476,23 @@ mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
break;
}
+ /* Check if we need to write the doorbell */
+ if (likely(sq->doorbell.d64 != 0)) {
+ mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
+ sq->doorbell.d64 = 0;
+ }
+ /*
+ * Check if we need to start the event timer which flushes the
+ * transmit ring on timeout:
+ */
+ if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
+ sq->cev_factor != 1)) {
+ /* start the timer */
+ mlx5e_sq_cev_timeout(sq);
+ } else {
+ /* don't send NOPs yet */
+ sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
+ }
return (err);
}
diff --git a/sys/dev/mpr/mpr_sas.c b/sys/dev/mpr/mpr_sas.c
index 64c0d07..45c0bbd 100644
--- a/sys/dev/mpr/mpr_sas.c
+++ b/sys/dev/mpr/mpr_sas.c
@@ -2469,11 +2469,20 @@ mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
/*
- * Since these are generally external (i.e. hopefully
- * transient transport-related) errors, retry these without
- * decrementing the retry count.
+ * These can sometimes be transient transport-related
+ * errors, and sometimes persistent drive-related errors.
+ * We used to retry these without decrementing the retry
+ * count by returning CAM_REQUEUE_REQ. Unfortunately, if
+ * we hit a persistent drive problem that returns one of
+ * these error codes, we would retry indefinitely. So,
+ * return CAM_REQ_CMP_ERROR so that we decrement the retry
+ * count and avoid infinite retries. We're taking the
+ * potential risk of flagging false failures in the event
+ * of a topology-related error (e.g. a SAS expander problem
+ * causes a command addressed to a drive to fail), but
+ * avoiding getting into an infinite retry loop.
*/
- mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
+ mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
mprsas_log_command(cm, MPR_INFO,
"terminated ioc %x scsi %x state %x xfer %u\n",
le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
diff --git a/sys/dev/mps/mps_sas.c b/sys/dev/mps/mps_sas.c
index 4fbdbb2..f3ea8d3 100644
--- a/sys/dev/mps/mps_sas.c
+++ b/sys/dev/mps/mps_sas.c
@@ -2408,11 +2408,20 @@ mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
/*
- * Since these are generally external (i.e. hopefully
- * transient transport-related) errors, retry these without
- * decrementing the retry count.
+ * These can sometimes be transient transport-related
+ * errors, and sometimes persistent drive-related errors.
+ * We used to retry these without decrementing the retry
+ * count by returning CAM_REQUEUE_REQ. Unfortunately, if
+ * we hit a persistent drive problem that returns one of
+ * these error codes, we would retry indefinitely. So,
+ * return CAM_REQ_CMP_ERROR so that we decrement the retry
+ * count and avoid infinite retries. We're taking the
+ * potential risk of flagging false failures in the event
+ * of a topology-related error (e.g. a SAS expander problem
+ * causes a command addressed to a drive to fail), but
+ * avoiding getting into an infinite retry loop.
*/
- mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
+ mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
mpssas_log_command(cm, MPS_INFO,
"terminated ioc %x scsi %x state %x xfer %u\n",
le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
diff --git a/sys/dev/mrsas/mrsas.c b/sys/dev/mrsas/mrsas.c
index 6fa4639..0db859a 100644
--- a/sys/dev/mrsas/mrsas.c
+++ b/sys/dev/mrsas/mrsas.c
@@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/types.h>
+#include <sys/sysent.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/smp.h>
@@ -63,6 +64,7 @@ static d_write_t mrsas_write;
static d_ioctl_t mrsas_ioctl;
static d_poll_t mrsas_poll;
+static void mrsas_ich_startup(void *arg);
static struct mrsas_mgmt_info mrsas_mgmt_info;
static struct mrsas_ident *mrsas_find_ident(device_t);
static int mrsas_setup_msix(struct mrsas_softc *sc);
@@ -80,7 +82,8 @@ static int mrsas_setup_irq(struct mrsas_softc *sc);
static int mrsas_alloc_mem(struct mrsas_softc *sc);
static int mrsas_init_fw(struct mrsas_softc *sc);
static int mrsas_setup_raidmap(struct mrsas_softc *sc);
-static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
+static void megasas_setup_jbod_map(struct mrsas_softc *sc);
+static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
static int mrsas_clear_intr(struct mrsas_softc *sc);
static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
@@ -104,8 +107,9 @@ int mrsas_ioc_init(struct mrsas_softc *sc);
int mrsas_bus_scan(struct mrsas_softc *sc);
int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
-int mrsas_reset_ctrl(struct mrsas_softc *sc);
-int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
+int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
+int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
+int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
int
mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
struct mrsas_mfi_cmd *cmd);
@@ -182,6 +186,10 @@ MRSAS_CTLR_ID device_table[] = {
{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
{0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
+ {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
+ {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
+ {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
+ {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
{0, 0, 0, 0, NULL}
};
@@ -553,6 +561,7 @@ mrsas_get_seq_num(struct mrsas_softc *sc,
{
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
+ u_int8_t do_ocr = 1, retcode = 0;
cmd = mrsas_get_mfi_cmd(sc);
@@ -580,16 +589,24 @@ mrsas_get_seq_num(struct mrsas_softc *sc,
dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
- mrsas_issue_blocked_cmd(sc, cmd);
+ retcode = mrsas_issue_blocked_cmd(sc, cmd);
+ if (retcode == ETIMEDOUT)
+ goto dcmd_timeout;
+ do_ocr = 0;
/*
* Copy the data back into callers buffer
*/
memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
mrsas_free_evt_log_info_cmd(sc);
- mrsas_release_mfi_cmd(cmd);
- return 0;
+dcmd_timeout:
+ if (do_ocr)
+ sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
+ else
+ mrsas_release_mfi_cmd(cmd);
+
+ return retcode;
}
@@ -812,6 +829,8 @@ mrsas_attach(device_t dev)
struct mrsas_softc *sc = device_get_softc(dev);
uint32_t cmd, bar, error;
+ memset(sc, 0, sizeof(struct mrsas_softc));
+
/* Look up our softc and initialize its fields. */
sc->mrsas_dev = dev;
sc->device_id = pci_get_device(dev);
@@ -851,12 +870,6 @@ mrsas_attach(device_t dev)
mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
- /*
- * Intialize a counting Semaphore to take care no. of concurrent
- * IOCTLs
- */
- sema_init(&sc->ioctl_count_sema, MRSAS_MAX_MFI_CMDS - 5, IOCTL_SEMA_DESCRIPTION);
-
/* Intialize linked list */
TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
@@ -865,15 +878,6 @@ mrsas_attach(device_t dev)
sc->io_cmds_highwater = 0;
- /* Create a /dev entry for this device. */
- sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT,
- GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
- device_get_unit(dev));
- if (device_get_unit(dev) == 0)
- make_dev_alias(sc->mrsas_cdev, "megaraid_sas_ioctl_node");
- if (sc->mrsas_cdev)
- sc->mrsas_cdev->si_drv1 = sc;
-
sc->adprecovery = MRSAS_HBA_OPERATIONAL;
sc->UnevenSpanSupport = 0;
@@ -883,7 +887,7 @@ mrsas_attach(device_t dev)
if (mrsas_init_fw(sc) != SUCCESS) {
goto attach_fail_fw;
}
- /* Register SCSI mid-layer */
+ /* Register mrsas to CAM layer */
if ((mrsas_cam_attach(sc) != SUCCESS)) {
goto attach_fail_cam;
}
@@ -891,38 +895,28 @@ mrsas_attach(device_t dev)
if (mrsas_setup_irq(sc) != SUCCESS) {
goto attach_fail_irq;
}
- /* Enable Interrupts */
- mrsas_enable_intr(sc);
-
error = mrsas_kproc_create(mrsas_ocr_thread, sc,
&sc->ocr_thread, 0, 0, "mrsas_ocr%d",
device_get_unit(sc->mrsas_dev));
if (error) {
- printf("Error %d starting rescan thread\n", error);
- goto attach_fail_irq;
- }
- mrsas_setup_sysctl(sc);
-
- /* Initiate AEN (Asynchronous Event Notification) */
-
- if (mrsas_start_aen(sc)) {
- printf("Error: start aen failed\n");
- goto fail_start_aen;
+ device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
+ goto attach_fail_ocr_thread;
}
/*
- * Add this controller to mrsas_mgmt_info structure so that it can be
- * exported to management applications
+ * After FW initialization and OCR thread creation
+ * we will defer the cdev creation, AEN setup on ICH callback
*/
- if (device_get_unit(dev) == 0)
- memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
-
- mrsas_mgmt_info.count++;
- mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
- mrsas_mgmt_info.max_index++;
-
- return (0);
+ sc->mrsas_ich.ich_func = mrsas_ich_startup;
+ sc->mrsas_ich.ich_arg = sc;
+ if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
+ device_printf(sc->mrsas_dev, "Config hook is already established\n");
+ }
+ mrsas_setup_sysctl(sc);
+ return SUCCESS;
-fail_start_aen:
+attach_fail_ocr_thread:
+ if (sc->ocr_thread_active)
+ wakeup(&sc->ocr_chan);
attach_fail_irq:
mrsas_teardown_intr(sc);
attach_fail_cam:
@@ -940,10 +934,7 @@ attach_fail_fw:
mtx_destroy(&sc->mpt_cmd_pool_lock);
mtx_destroy(&sc->mfi_cmd_pool_lock);
mtx_destroy(&sc->raidmap_lock);
- /* Destroy the counting semaphore created for Ioctl */
- sema_destroy(&sc->ioctl_count_sema);
attach_fail:
- destroy_dev(sc->mrsas_cdev);
if (sc->reg_res) {
bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
sc->reg_res_id, sc->reg_res);
@@ -952,6 +943,63 @@ attach_fail:
}
/*
+ * Interrupt config hook
+ */
+static void
+mrsas_ich_startup(void *arg)
+{
+ struct mrsas_softc *sc = (struct mrsas_softc *)arg;
+
+ /*
+ * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
+ */
+ sema_init(&sc->ioctl_count_sema,
+ MRSAS_MAX_MFI_CMDS - 5,
+ IOCTL_SEMA_DESCRIPTION);
+
+ /* Create a /dev entry for mrsas controller. */
+ sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
+ GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
+ device_get_unit(sc->mrsas_dev));
+
+ if (device_get_unit(sc->mrsas_dev) == 0) {
+ make_dev_alias_p(MAKEDEV_CHECKNAME,
+ &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
+ "megaraid_sas_ioctl_node");
+ }
+ if (sc->mrsas_cdev)
+ sc->mrsas_cdev->si_drv1 = sc;
+
+ /*
+ * Add this controller to mrsas_mgmt_info structure so that it can be
+ * exported to management applications
+ */
+ if (device_get_unit(sc->mrsas_dev) == 0)
+ memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
+
+ mrsas_mgmt_info.count++;
+ mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
+ mrsas_mgmt_info.max_index++;
+
+ /* Enable Interrupts */
+ mrsas_enable_intr(sc);
+
+ /* Initiate AEN (Asynchronous Event Notification) */
+ if (mrsas_start_aen(sc)) {
+ device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
+ "Further events from the controller will not be communicated.\n"
+ "Either there is some problem in the controller"
+ "or the controller does not support AEN.\n"
+ "Please contact to the SUPPORT TEAM if the problem persists\n");
+ }
+ if (sc->mrsas_ich.ich_arg != NULL) {
+ device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
+ config_intrhook_disestablish(&sc->mrsas_ich);
+ sc->mrsas_ich.ich_arg = NULL;
+ }
+}
+
+/*
* mrsas_detach: De-allocates and teardown resources
* input: pointer to device struct
*
@@ -969,6 +1017,8 @@ mrsas_detach(device_t dev)
sc->remove_in_progress = 1;
/* Destroy the character device so no other IOCTL will be handled */
+ if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
+ destroy_dev(sc->mrsas_linux_emulator_cdev);
destroy_dev(sc->mrsas_cdev);
/*
@@ -989,7 +1039,7 @@ mrsas_detach(device_t dev)
i++;
if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
mrsas_dprint(sc, MRSAS_INFO,
- "[%2d]waiting for ocr to be finished\n", i);
+ "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
}
pause("mr_shutdown", hz);
}
@@ -1065,7 +1115,14 @@ mrsas_free_mem(struct mrsas_softc *sc)
if (sc->ld_drv_map[i] != NULL)
free(sc->ld_drv_map[i], M_MRSAS);
}
-
+ for (i = 0; i < 2; i++) {
+ if (sc->jbodmap_phys_addr[i])
+ bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
+ if (sc->jbodmap_mem[i] != NULL)
+ bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
+ if (sc->jbodmap_tag[i] != NULL)
+ bus_dma_tag_destroy(sc->jbodmap_tag[i]);
+ }
/*
* Free version buffer memroy
*/
@@ -1227,9 +1284,7 @@ mrsas_teardown_intr(struct mrsas_softc *sc)
static int
mrsas_suspend(device_t dev)
{
- struct mrsas_softc *sc;
-
- sc = device_get_softc(dev);
+ /* This will be filled when the driver will have hibernation support */
return (0);
}
@@ -1242,9 +1297,7 @@ mrsas_suspend(device_t dev)
static int
mrsas_resume(device_t dev)
{
- struct mrsas_softc *sc;
-
- sc = device_get_softc(dev);
+ /* This will be filled when the driver will have hibernation support */
return (0);
}
@@ -1315,9 +1368,7 @@ mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
i++;
if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
mrsas_dprint(sc, MRSAS_INFO,
- "[%2d]waiting for "
- "OCR to be finished %d\n", i,
- sc->ocr_thread_active);
+ "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
}
pause("mr_ioctl", hz);
}
@@ -1481,7 +1532,7 @@ mrsas_isr(void *arg)
* perform the appropriate action. Before we return, we clear the response
* interrupt.
*/
-static int
+int
mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
{
Mpi2ReplyDescriptorsUnion_t *desc;
@@ -1578,7 +1629,11 @@ mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
if (sc->msix_enable) {
if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY))
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24) ||
+ (sc->device_id == MRSAS_CUTLASS_52) ||
+ (sc->device_id == MRSAS_CUTLASS_53))
mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
((MSIxIndex & 0x7) << 24) |
sc->last_reply_idx[MSIxIndex]);
@@ -1600,7 +1655,11 @@ mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
/* Clear response interrupt */
if (sc->msix_enable) {
if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY)) {
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24) ||
+ (sc->device_id == MRSAS_CUTLASS_52) ||
+ (sc->device_id == MRSAS_CUTLASS_53)) {
mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
((MSIxIndex & 0x7) << 24) |
sc->last_reply_idx[MSIxIndex]);
@@ -1684,9 +1743,9 @@ mrsas_alloc_mem(struct mrsas_softc *sc)
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- MRSAS_MAX_IO_SIZE, /* maxsize */
- MRSAS_MAX_SGL, /* nsegments */
- MRSAS_MAX_IO_SIZE, /* maxsegsize */
+ MAXPHYS, /* maxsize */
+ sc->max_num_sge, /* nsegments */
+ MAXPHYS, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->mrsas_parent_tag /* tag */
@@ -1883,9 +1942,9 @@ mrsas_alloc_mem(struct mrsas_softc *sc)
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
- MRSAS_MAX_IO_SIZE,
- MRSAS_MAX_SGL,
- MRSAS_MAX_IO_SIZE,
+ MAXPHYS,
+ sc->max_num_sge, /* nsegments */
+ MAXPHYS,
BUS_DMA_ALLOCNOW,
busdma_lock_mutex,
&sc->io_lock,
@@ -1987,6 +2046,78 @@ ABORT:
return (1);
}
+/**
+ * megasas_setup_jbod_map - setup jbod map for FP seq_number.
+ * @sc: Adapter soft state
+ *
+ * Return 0 on success.
+ */
+void
+megasas_setup_jbod_map(struct mrsas_softc *sc)
+{
+ int i;
+ uint32_t pd_seq_map_sz;
+
+ pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
+ (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
+
+ if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
+ sc->use_seqnum_jbod_fp = 0;
+ return;
+ }
+ if (sc->jbodmap_mem[0])
+ goto skip_alloc;
+
+ for (i = 0; i < 2; i++) {
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 4, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ pd_seq_map_sz,
+ 1,
+ pd_seq_map_sz,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &sc->jbodmap_tag[i])) {
+ device_printf(sc->mrsas_dev,
+ "Cannot allocate jbod map tag.\n");
+ return;
+ }
+ if (bus_dmamem_alloc(sc->jbodmap_tag[i],
+ (void **)&sc->jbodmap_mem[i],
+ BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
+ device_printf(sc->mrsas_dev,
+ "Cannot allocate jbod map memory.\n");
+ return;
+ }
+ bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
+
+ if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
+ sc->jbodmap_mem[i], pd_seq_map_sz,
+ mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
+ BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
+ return;
+ }
+ if (!sc->jbodmap_mem[i]) {
+ device_printf(sc->mrsas_dev,
+ "Cannot allocate memory for jbod map.\n");
+ sc->use_seqnum_jbod_fp = 0;
+ return;
+ }
+ }
+
+skip_alloc:
+ if (!megasas_sync_pd_seq_num(sc, false) &&
+ !megasas_sync_pd_seq_num(sc, true))
+ sc->use_seqnum_jbod_fp = 1;
+ else
+ sc->use_seqnum_jbod_fp = 0;
+
+ device_printf(sc->mrsas_dev, "Jbod map is supported\n");
+}
+
/*
* mrsas_init_fw: Initialize Firmware
* input: Adapter soft state
@@ -2086,18 +2217,28 @@ mrsas_init_fw(struct mrsas_softc *sc)
if (sc->secure_jbod_support)
device_printf(sc->mrsas_dev, "FW supports SED \n");
+ if (sc->use_seqnum_jbod_fp)
+ device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
+
if (mrsas_setup_raidmap(sc) != SUCCESS) {
- device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
- return (1);
+ device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
+ "There seems to be some problem in the controller\n"
+ "Please contact to the SUPPORT TEAM if the problem persists\n");
}
+ megasas_setup_jbod_map(sc);
+
/* For pass-thru, get PD/LD list and controller info */
memset(sc->pd_list, 0,
MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
- mrsas_get_pd_list(sc);
-
+ if (mrsas_get_pd_list(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Get PD list failed.\n");
+ return (1);
+ }
memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
- mrsas_get_ld_list(sc);
-
+ if (mrsas_get_ld_list(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
+ return (1);
+ }
/*
* Compute the max allowed sectors per IO: The controller info has
* two limits on max sectors. Driver should use the minimum of these
@@ -2147,7 +2288,7 @@ int
mrsas_init_adapter(struct mrsas_softc *sc)
{
uint32_t status;
- u_int32_t max_cmd;
+ u_int32_t max_cmd, scratch_pad_2;
int ret;
int i = 0;
@@ -2166,13 +2307,33 @@ mrsas_init_adapter(struct mrsas_softc *sc)
sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
- sc->chain_frames_alloc_sz = 1024 * max_cmd;
+ scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad_2));
+ /*
+ * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
+ * Firmware support extended IO chain frame which is 4 time more
+ * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
+ * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
+ */
+ if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
+ sc->max_chain_frame_sz =
+ ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
+ * MEGASAS_1MB_IO;
+ else
+ sc->max_chain_frame_sz =
+ ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
+ * MEGASAS_256K_IO;
+
+ sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd;
sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
- sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
+ sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
+ mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n",
+ sc->max_num_sge, sc->max_chain_frame_sz);
+
/* Used for pass thru MFI frame (DCMD) */
sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
@@ -2297,7 +2458,11 @@ mrsas_ioc_init(struct mrsas_softc *sc)
/* driver support Extended MSIX */
if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY)) {
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24) ||
+ (sc->device_id == MRSAS_CUTLASS_52) ||
+ (sc->device_id == MRSAS_CUTLASS_53)) {
init_frame->driver_operations.
mfi_capabilities.support_additional_msix = 1;
}
@@ -2310,6 +2475,8 @@ mrsas_ioc_init(struct mrsas_softc *sc)
init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
+ if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
+ init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
init_frame->queue_info_new_phys_addr_lo = phys_addr;
init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
@@ -2412,7 +2579,7 @@ mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
for (i = 0; i < max_cmd; i++) {
cmd = sc->mpt_cmd_list[i];
offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
- chain_offset = 1024 * i;
+ chain_offset = sc->max_chain_frame_sz * i;
sense_offset = MRSAS_SENSE_LEN * i;
memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
cmd->index = i + 1;
@@ -2623,16 +2790,20 @@ mrsas_ocr_thread(void *arg)
/* Sleep for 1 second and check the queue status */
msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
"mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
- if (sc->remove_in_progress) {
+ if (sc->remove_in_progress ||
+ sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
mrsas_dprint(sc, MRSAS_OCR,
- "Exit due to shutdown from %s\n", __func__);
+ "Exit due to %s from %s\n",
+ sc->remove_in_progress ? "Shutdown" :
+ "Hardware critical error", __func__);
break;
}
fw_status = mrsas_read_reg(sc,
offsetof(mrsas_reg_set, outbound_scratch_pad));
fw_state = fw_status & MFI_STATE_MASK;
if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
- device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
+ device_printf(sc->mrsas_dev, "%s started due to %s!\n",
+ sc->disableOnlineCtrlReset ? "Kill Adapter" : "OCR",
sc->do_timedout_reset ? "IO Timeout" :
"FW fault detected");
mtx_lock_spin(&sc->ioctl_lock);
@@ -2640,7 +2811,7 @@ mrsas_ocr_thread(void *arg)
sc->reset_count++;
mtx_unlock_spin(&sc->ioctl_lock);
mrsas_xpt_freeze(sc);
- mrsas_reset_ctrl(sc);
+ mrsas_reset_ctrl(sc, sc->do_timedout_reset);
mrsas_xpt_release(sc);
sc->reset_in_progress = 0;
sc->do_timedout_reset = 0;
@@ -2687,14 +2858,14 @@ mrsas_reset_reply_desc(struct mrsas_softc *sc)
* OCR, Re-fire Managment command and move Controller to Operation state.
*/
int
-mrsas_reset_ctrl(struct mrsas_softc *sc)
+mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
{
int retval = SUCCESS, i, j, retry = 0;
u_int32_t host_diag, abs_state, status_reg, reset_adapter;
union ccb *ccb;
struct mrsas_mfi_cmd *mfi_cmd;
struct mrsas_mpt_cmd *mpt_cmd;
- MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ union mrsas_evt_class_locale class_locale;
if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
device_printf(sc->mrsas_dev,
@@ -2704,14 +2875,16 @@ mrsas_reset_ctrl(struct mrsas_softc *sc)
mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
mrsas_disable_intr(sc);
- DELAY(1000 * 1000);
+ msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
+ sc->mrsas_fw_fault_check_delay * hz);
/* First try waiting for commands to complete */
- if (mrsas_wait_for_outstanding(sc)) {
+ if (mrsas_wait_for_outstanding(sc, reset_reason)) {
mrsas_dprint(sc, MRSAS_OCR,
"resetting adapter from %s.\n",
__func__);
/* Now return commands back to the CAM layer */
+ mtx_unlock(&sc->sim_lock);
for (i = 0; i < sc->max_fw_cmds; i++) {
mpt_cmd = sc->mpt_cmd_list[i];
if (mpt_cmd->ccb_ptr) {
@@ -2721,6 +2894,7 @@ mrsas_reset_ctrl(struct mrsas_softc *sc)
mrsas_atomic_dec(&sc->fw_outstanding);
}
}
+ mtx_lock(&sc->sim_lock);
status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
outbound_scratch_pad));
@@ -2815,31 +2989,17 @@ mrsas_reset_ctrl(struct mrsas_softc *sc)
mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
continue;
}
- /* Re-fire management commands */
for (j = 0; j < sc->max_fw_cmds; j++) {
mpt_cmd = sc->mpt_cmd_list[j];
if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
- if (mfi_cmd->frame->dcmd.opcode ==
- MR_DCMD_LD_MAP_GET_INFO) {
- mrsas_release_mfi_cmd(mfi_cmd);
- mrsas_release_mpt_cmd(mpt_cmd);
- } else {
- req_desc = mrsas_get_request_desc(sc,
- mfi_cmd->cmd_id.context.smid - 1);
- mrsas_dprint(sc, MRSAS_OCR,
- "Re-fire command DCMD opcode 0x%x index %d\n ",
- mfi_cmd->frame->dcmd.opcode, j);
- if (!req_desc)
- device_printf(sc->mrsas_dev,
- "Cannot build MPT cmd.\n");
- else
- mrsas_fire_cmd(sc, req_desc->addr.u.low,
- req_desc->addr.u.high);
- }
+ mrsas_release_mfi_cmd(mfi_cmd);
+ mrsas_release_mpt_cmd(mpt_cmd);
}
}
+ sc->aen_cmd = NULL;
+
/* Reset load balance info */
memset(sc->load_balance_info, 0,
sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
@@ -2852,10 +3012,37 @@ mrsas_reset_ctrl(struct mrsas_softc *sc)
if (!mrsas_get_map_info(sc))
mrsas_sync_map_info(sc);
+ megasas_setup_jbod_map(sc);
+
+ memset(sc->pd_list, 0,
+ MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
+ if (mrsas_get_pd_list(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Get PD list failed from OCR.\n"
+ "Will get the latest PD LIST after OCR on event.\n");
+ }
+ memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
+ if (mrsas_get_ld_list(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Get LD lsit failed from OCR.\n"
+ "Will get the latest LD LIST after OCR on event.\n");
+ }
mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
mrsas_enable_intr(sc);
sc->adprecovery = MRSAS_HBA_OPERATIONAL;
+ /* Register AEN with FW for last sequence number */
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+ if (mrsas_register_aen(sc, sc->last_seq_num,
+ class_locale.word)) {
+ device_printf(sc->mrsas_dev,
+ "ERROR: AEN registration FAILED from OCR !!! "
+ "Further events from the controller cannot be notified."
+ "Either there is some problem in the controller"
+ "or the controller does not support AEN.\n"
+ "Please contact to the SUPPORT TEAM if the problem persists\n");
+ }
/* Adapter reset completed successfully */
device_printf(sc->mrsas_dev, "Reset successful\n");
retval = SUCCESS;
@@ -2887,7 +3074,7 @@ void
mrsas_kill_hba(struct mrsas_softc *sc)
{
sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
- pause("mrsas_kill_hba", 1000);
+ DELAY(1000 * 1000);
mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
MFI_STOP_ADP);
@@ -2933,7 +3120,7 @@ mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
* completed.
*/
int
-mrsas_wait_for_outstanding(struct mrsas_softc *sc)
+mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
{
int i, outstanding, retval = 0;
u_int32_t fw_state, count, MSIxIndex;
@@ -2955,6 +3142,12 @@ mrsas_wait_for_outstanding(struct mrsas_softc *sc)
retval = 1;
goto out;
}
+ if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "DCMD IO TIMEOUT detected, will reset adapter.\n");
+ retval = 1;
+ goto out;
+ }
outstanding = mrsas_atomic_read(&sc->fw_outstanding);
if (!outstanding)
goto out;
@@ -3012,6 +3205,7 @@ static int
mrsas_get_ctrl_info(struct mrsas_softc *sc)
{
int retcode = 0;
+ u_int8_t do_ocr = 1;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
@@ -3041,15 +3235,26 @@ mrsas_get_ctrl_info(struct mrsas_softc *sc)
dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
- if (!mrsas_issue_polled(sc, cmd))
- memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
+ retcode = mrsas_issue_polled(sc, cmd);
+ if (retcode == ETIMEDOUT)
+ goto dcmd_timeout;
else
- retcode = 1;
+ memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
+ do_ocr = 0;
mrsas_update_ext_vd_details(sc);
+ sc->use_seqnum_jbod_fp =
+ sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
+
+dcmd_timeout:
mrsas_free_ctlr_info_cmd(sc);
- mrsas_release_mfi_cmd(cmd);
+
+ if (do_ocr)
+ sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
+ else
+ mrsas_release_mfi_cmd(cmd);
+
return (retcode);
}
@@ -3168,7 +3373,7 @@ mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
struct mrsas_header *frame_hdr = &cmd->frame->hdr;
u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
- int i, retcode = 0;
+ int i, retcode = SUCCESS;
frame_hdr->cmd_status = 0xFF;
frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
@@ -3191,12 +3396,12 @@ mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
break;
}
}
- if (frame_hdr->cmd_status != 0) {
- if (frame_hdr->cmd_status == 0xFF)
- device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
- else
- device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
- retcode = 1;
+ if (frame_hdr->cmd_status == 0xFF) {
+ device_printf(sc->mrsas_dev, "DCMD timed out after %d "
+ "seconds from %s\n", max_wait, __func__);
+ device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
+ cmd->frame->dcmd.opcode);
+ retcode = ETIMEDOUT;
}
return (retcode);
}
@@ -3290,7 +3495,12 @@ mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cm
io_req = mpt_cmd->io_request;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24) ||
+ (sc->device_id == MRSAS_CUTLASS_52) ||
+ (sc->device_id == MRSAS_CUTLASS_53)) {
pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
sgl_ptr_end += sc->max_sge_in_main_msg - 1;
@@ -3307,7 +3517,7 @@ mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cm
mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
- mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
+ mpi25_ieee_chain->Length = sc->max_chain_frame_sz;
return (0);
}
@@ -3325,10 +3535,10 @@ mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
unsigned long total_time = 0;
- int retcode = 0;
+ int retcode = SUCCESS;
/* Initialize cmd_status */
- cmd->cmd_status = ECONNREFUSED;
+ cmd->cmd_status = 0xFF;
/* Build MPT-MFI command for issue to FW */
if (mrsas_issue_dcmd(sc, cmd)) {
@@ -3338,18 +3548,30 @@ mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
sc->chan = (void *)&cmd;
while (1) {
- if (cmd->cmd_status == ECONNREFUSED) {
+ if (cmd->cmd_status == 0xFF) {
tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
} else
break;
- total_time++;
- if (total_time >= max_wait) {
- device_printf(sc->mrsas_dev,
- "Internal command timed out after %d seconds.\n", max_wait);
- retcode = 1;
- break;
+
+ if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL
+ * command */
+ total_time++;
+ if (total_time >= max_wait) {
+ device_printf(sc->mrsas_dev,
+ "Internal command timed out after %d seconds.\n", max_wait);
+ retcode = 1;
+ break;
+ }
}
}
+
+ if (cmd->cmd_status == 0xFF) {
+ device_printf(sc->mrsas_dev, "DCMD timed out after %d "
+ "seconds from %s\n", max_wait, __func__);
+ device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
+ cmd->frame->dcmd.opcode);
+ retcode = ETIMEDOUT;
+ }
return (retcode);
}
@@ -3400,6 +3622,7 @@ mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd
(cmd->frame->dcmd.mbox.b[1] == 1)) {
sc->fast_path_io = 0;
mtx_lock(&sc->raidmap_lock);
+ sc->map_update_cmd = NULL;
if (cmd_status != 0) {
if (cmd_status != MFI_STAT_NOT_FOUND)
device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
@@ -3423,6 +3646,28 @@ mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd
cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
sc->mrsas_aen_triggered = 0;
}
+ /* FW has an updated PD sequence */
+ if ((cmd->frame->dcmd.opcode ==
+ MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
+ (cmd->frame->dcmd.mbox.b[0] == 1)) {
+
+ mtx_lock(&sc->raidmap_lock);
+ sc->jbod_seq_cmd = NULL;
+ mrsas_release_mfi_cmd(cmd);
+
+ if (cmd_status == MFI_STAT_OK) {
+ sc->pd_seq_map_id++;
+ /* Re-register a pd sync seq num cmd */
+ if (megasas_sync_pd_seq_num(sc, true))
+ sc->use_seqnum_jbod_fp = 0;
+ } else {
+ sc->use_seqnum_jbod_fp = 0;
+ device_printf(sc->mrsas_dev,
+ "Jbod map sync failed, status=%x\n", cmd_status);
+ }
+ mtx_unlock(&sc->raidmap_lock);
+ break;
+ }
/* See if got an event notification */
if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
mrsas_complete_aen(sc, cmd);
@@ -3454,7 +3699,7 @@ mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
cmd->cmd_status = cmd->frame->io.cmd_status;
- if (cmd->cmd_status == ECONNREFUSED)
+ if (cmd->cmd_status == 0xFF)
cmd->cmd_status = 0;
sc->chan = (void *)&cmd;
@@ -3485,9 +3730,10 @@ mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
}
if (sc->aen_cmd)
mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
-
if (sc->map_update_cmd)
mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
+ if (sc->jbod_seq_cmd)
+ mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
dcmd = &cmd->frame->dcmd;
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -3549,6 +3795,85 @@ mrsas_flush_cache(struct mrsas_softc *sc)
return;
}
+int
+megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
+{
+ int retcode = 0;
+ u_int8_t do_ocr = 1;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ uint32_t pd_seq_map_sz;
+ struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
+ bus_addr_t pd_seq_h;
+
+ pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
+ (sizeof(struct MR_PD_CFG_SEQ) *
+ (MAX_PHYSICAL_DEVICES - 1));
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev,
+ "Cannot alloc for ld map info cmd.\n");
+ return 1;
+ }
+ dcmd = &cmd->frame->dcmd;
+
+ pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
+ pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
+ if (!pd_sync) {
+ device_printf(sc->mrsas_dev,
+ "Failed to alloc mem for jbod map info.\n");
+ mrsas_release_mfi_cmd(cmd);
+ return (ENOMEM);
+ }
+ memset(pd_sync, 0, pd_seq_map_sz);
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = (pd_seq_map_sz);
+ dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
+ dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
+
+ if (pend) {
+ dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
+ dcmd->flags = (MFI_FRAME_DIR_WRITE);
+ sc->jbod_seq_cmd = cmd;
+ if (mrsas_issue_dcmd(sc, cmd)) {
+ device_printf(sc->mrsas_dev,
+ "Fail to send sync map info command.\n");
+ return 1;
+ } else
+ return 0;
+ } else
+ dcmd->flags = MFI_FRAME_DIR_READ;
+
+ retcode = mrsas_issue_polled(sc, cmd);
+ if (retcode == ETIMEDOUT)
+ goto dcmd_timeout;
+
+ if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
+ device_printf(sc->mrsas_dev,
+ "driver supports max %d JBOD, but FW reports %d\n",
+ MAX_PHYSICAL_DEVICES, pd_sync->count);
+ retcode = -EINVAL;
+ }
+ if (!retcode)
+ sc->pd_seq_map_id++;
+ do_ocr = 0;
+
+dcmd_timeout:
+ if (do_ocr)
+ sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
+ else
+ mrsas_release_mfi_cmd(cmd);
+
+ return (retcode);
+}
+
/*
* mrsas_get_map_info: Load and validate RAID map input:
* Adapter instance soft state
@@ -3618,14 +3943,11 @@ mrsas_get_ld_map_info(struct mrsas_softc *sc)
dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
dcmd->sgl.sge32[0].length = sc->current_map_sz;
- if (!mrsas_issue_polled(sc, cmd))
- retcode = 0;
- else {
- device_printf(sc->mrsas_dev,
- "Fail to send get LD map info cmd.\n");
- retcode = 1;
- }
- mrsas_release_mfi_cmd(cmd);
+ retcode = mrsas_issue_polled(sc, cmd);
+ if (retcode == ETIMEDOUT)
+ sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
+ else
+ mrsas_release_mfi_cmd(cmd);
return (retcode);
}
@@ -3710,6 +4032,7 @@ static int
mrsas_get_pd_list(struct mrsas_softc *sc)
{
int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
+ u_int8_t do_ocr = 1;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
struct MR_PD_LIST *pd_list_mem;
@@ -3731,6 +4054,8 @@ mrsas_get_pd_list(struct mrsas_softc *sc)
device_printf(sc->mrsas_dev,
"Cannot alloc dmamap for get PD list cmd\n");
mrsas_release_mfi_cmd(cmd);
+ mrsas_free_tmp_dcmd(tcmd);
+ free(tcmd, M_MRSAS);
return (ENOMEM);
} else {
pd_list_mem = tcmd->tmp_dcmd_mem;
@@ -3751,15 +4076,14 @@ mrsas_get_pd_list(struct mrsas_softc *sc)
dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
- if (!mrsas_issue_polled(sc, cmd))
- retcode = 0;
- else
- retcode = 1;
+ retcode = mrsas_issue_polled(sc, cmd);
+ if (retcode == ETIMEDOUT)
+ goto dcmd_timeout;
/* Get the instance PD list */
pd_count = MRSAS_MAX_PD;
pd_addr = pd_list_mem->addr;
- if (retcode == 0 && pd_list_mem->count < pd_count) {
+ if (pd_list_mem->count < pd_count) {
memset(sc->local_pd_list, 0,
MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
@@ -3770,15 +4094,22 @@ mrsas_get_pd_list(struct mrsas_softc *sc)
MR_PD_STATE_SYSTEM;
pd_addr++;
}
+ /*
+ * Use mutext/spinlock if pd_list component size increase more than
+ * 32 bit.
+ */
+ memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
+ do_ocr = 0;
}
- /*
- * Use mutext/spinlock if pd_list component size increase more than
- * 32 bit.
- */
- memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
+dcmd_timeout:
mrsas_free_tmp_dcmd(tcmd);
- mrsas_release_mfi_cmd(cmd);
free(tcmd, M_MRSAS);
+
+ if (do_ocr)
+ sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
+ else
+ mrsas_release_mfi_cmd(cmd);
+
return (retcode);
}
@@ -3794,6 +4125,7 @@ static int
mrsas_get_ld_list(struct mrsas_softc *sc)
{
int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
+ u_int8_t do_ocr = 1;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
struct MR_LD_LIST *ld_list_mem;
@@ -3814,6 +4146,8 @@ mrsas_get_ld_list(struct mrsas_softc *sc)
device_printf(sc->mrsas_dev,
"Cannot alloc dmamap for get LD list cmd\n");
mrsas_release_mfi_cmd(cmd);
+ mrsas_free_tmp_dcmd(tcmd);
+ free(tcmd, M_MRSAS);
return (ENOMEM);
} else {
ld_list_mem = tcmd->tmp_dcmd_mem;
@@ -3835,18 +4169,16 @@ mrsas_get_ld_list(struct mrsas_softc *sc)
dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
dcmd->pad_0 = 0;
- if (!mrsas_issue_polled(sc, cmd))
- retcode = 0;
- else
- retcode = 1;
+ retcode = mrsas_issue_polled(sc, cmd);
+ if (retcode == ETIMEDOUT)
+ goto dcmd_timeout;
#if VD_EXT_DEBUG
printf("Number of LDs %d\n", ld_list_mem->ldCount);
#endif
/* Get the instance LD list */
- if ((retcode == 0) &&
- (ld_list_mem->ldCount <= sc->fw_supported_vd_count)) {
+ if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) {
sc->CurLdCount = ld_list_mem->ldCount;
memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
@@ -3855,10 +4187,17 @@ mrsas_get_ld_list(struct mrsas_softc *sc)
sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
}
}
+ do_ocr = 0;
}
+dcmd_timeout:
mrsas_free_tmp_dcmd(tcmd);
- mrsas_release_mfi_cmd(cmd);
free(tcmd, M_MRSAS);
+
+ if (do_ocr)
+ sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
+ else
+ mrsas_release_mfi_cmd(cmd);
+
return (retcode);
}
@@ -4014,7 +4353,7 @@ mrsas_aen_handler(struct mrsas_softc *sc)
union mrsas_evt_class_locale class_locale;
int doscan = 0;
u_int32_t seq_num;
- int error;
+ int error, fail_aen = 0;
if (sc == NULL) {
printf("invalid instance!\n");
@@ -4023,13 +4362,19 @@ mrsas_aen_handler(struct mrsas_softc *sc)
if (sc->evt_detail_mem) {
switch (sc->evt_detail_mem->code) {
case MR_EVT_PD_INSERTED:
- mrsas_get_pd_list(sc);
- mrsas_bus_scan_sim(sc, sc->sim_1);
+ fail_aen = mrsas_get_pd_list(sc);
+ if (!fail_aen)
+ mrsas_bus_scan_sim(sc, sc->sim_1);
+ else
+ goto skip_register_aen;
doscan = 0;
break;
case MR_EVT_PD_REMOVED:
- mrsas_get_pd_list(sc);
- mrsas_bus_scan_sim(sc, sc->sim_1);
+ fail_aen = mrsas_get_pd_list(sc);
+ if (!fail_aen)
+ mrsas_bus_scan_sim(sc, sc->sim_1);
+ else
+ goto skip_register_aen;
doscan = 0;
break;
case MR_EVT_LD_OFFLINE:
@@ -4039,8 +4384,11 @@ mrsas_aen_handler(struct mrsas_softc *sc)
doscan = 0;
break;
case MR_EVT_LD_CREATED:
- mrsas_get_ld_list(sc);
- mrsas_bus_scan_sim(sc, sc->sim_0);
+ fail_aen = mrsas_get_ld_list(sc);
+ if (!fail_aen)
+ mrsas_bus_scan_sim(sc, sc->sim_0);
+ else
+ goto skip_register_aen;
doscan = 0;
break;
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
@@ -4057,12 +4405,19 @@ mrsas_aen_handler(struct mrsas_softc *sc)
return;
}
if (doscan) {
- mrsas_get_pd_list(sc);
- mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
- mrsas_bus_scan_sim(sc, sc->sim_1);
- mrsas_get_ld_list(sc);
- mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
- mrsas_bus_scan_sim(sc, sc->sim_0);
+ fail_aen = mrsas_get_pd_list(sc);
+ if (!fail_aen) {
+ mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
+ mrsas_bus_scan_sim(sc, sc->sim_1);
+ } else
+ goto skip_register_aen;
+
+ fail_aen = mrsas_get_ld_list(sc);
+ if (!fail_aen) {
+ mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
+ mrsas_bus_scan_sim(sc, sc->sim_0);
+ } else
+ goto skip_register_aen;
}
seq_num = sc->evt_detail_mem->seq_num + 1;
@@ -4082,6 +4437,9 @@ mrsas_aen_handler(struct mrsas_softc *sc)
if (error)
device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
+skip_register_aen:
+ return;
+
}
diff --git a/sys/dev/mrsas/mrsas.h b/sys/dev/mrsas/mrsas.h
index 1cfe89e..2c2a341 100644
--- a/sys/dev/mrsas/mrsas.h
+++ b/sys/dev/mrsas/mrsas.h
@@ -80,6 +80,10 @@ __FBSDID("$FreeBSD$");
#define MRSAS_TBOLT 0x005b
#define MRSAS_INVADER 0x005d
#define MRSAS_FURY 0x005f
+#define MRSAS_INTRUDER 0x00ce
+#define MRSAS_INTRUDER_24 0x00cf
+#define MRSAS_CUTLASS_52 0x0052
+#define MRSAS_CUTLASS_53 0x0053
#define MRSAS_PCI_BAR0 0x10
#define MRSAS_PCI_BAR1 0x14
#define MRSAS_PCI_BAR2 0x1C
@@ -102,7 +106,7 @@ __FBSDID("$FreeBSD$");
*/
#define BYTE_ALIGNMENT 1
#define MRSAS_MAX_NAME_LENGTH 32
-#define MRSAS_VERSION "06.707.04.03-fbsd"
+#define MRSAS_VERSION "06.709.07.00-fbsd"
#define MRSAS_ULONG_MAX 0xFFFFFFFFFFFFFFFF
#define MRSAS_DEFAULT_TIMEOUT 0x14 /* Temporarily set */
#define DONE 0
@@ -166,7 +170,9 @@ typedef struct _RAID_CONTEXT {
u_int8_t numSGE;
u_int16_t configSeqNum;
u_int8_t spanArm;
- u_int8_t resvd2[3];
+ u_int8_t priority; /* 0x1D MR_PRIORITY_RANGE */
+ u_int8_t numSGEExt; /* 0x1E 1M IO support */
+ u_int8_t resvd2; /* 0x1F */
} RAID_CONTEXT;
@@ -577,6 +583,7 @@ Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t;
#define MAX_PHYSICAL_DEVICES 256
#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
+#define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102
#define MRSAS_MAX_PD_CHANNELS 1
@@ -863,6 +870,22 @@ struct IO_REQUEST_INFO {
u_int8_t pd_after_lb;
};
+/*
+ * define MR_PD_CFG_SEQ structure for system PDs
+ */
+struct MR_PD_CFG_SEQ {
+ u_int16_t seqNum;
+ u_int16_t devHandle;
+ u_int8_t reserved[4];
+} __packed;
+
+struct MR_PD_CFG_SEQ_NUM_SYNC {
+ u_int32_t size;
+ u_int32_t count;
+ struct MR_PD_CFG_SEQ seq[1];
+} __packed;
+
+
typedef struct _MR_LD_TARGET_SYNC {
u_int8_t targetId;
u_int8_t reserved;
@@ -1223,7 +1246,7 @@ enum MR_EVT_ARGS {
/*
* Thunderbolt (and later) Defines
*/
-#define MRSAS_MAX_SZ_CHAIN_FRAME 1024
+#define MEGASAS_CHAIN_FRAME_SZ_MIN 1024
#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
#define MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
#define MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
@@ -1301,10 +1324,13 @@ typedef enum _REGION_TYPE {
#define MRSAS_SCSI_MAX_CMDS 8
#define MRSAS_SCSI_MAX_CDB_LEN 16
#define MRSAS_SCSI_SENSE_BUFFERSIZE 96
-#define MRSAS_MAX_SGL 70
-#define MRSAS_MAX_IO_SIZE (256 * 1024)
#define MRSAS_INTERNAL_CMDS 32
+#define MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK 0x400000
+#define MEGASAS_MAX_CHAIN_SIZE_MASK 0x3E0
+#define MEGASAS_256K_IO 128
+#define MEGASAS_1MB_IO (MEGASAS_256K_IO * 4)
+
/* Request types */
#define MRSAS_REQ_TYPE_INTERNAL_CMD 0x0
#define MRSAS_REQ_TYPE_AEN_FETCH 0x1
@@ -1927,7 +1953,12 @@ struct mrsas_ctrl_info {
u_int32_t supportCacheBypassModes:1;
u_int32_t supportSecurityonJBOD:1;
u_int32_t discardCacheDuringLDDelete:1;
- u_int32_t reserved:12;
+ u_int32_t supportTTYLogCompression:1;
+ u_int32_t supportCPLDUpdate:1;
+ u_int32_t supportDiskCacheSettingForSysPDs:1;
+ u_int32_t supportExtendedSSCSize:1;
+ u_int32_t useSeqNumJbodFP:1;
+ u_int32_t reserved:7;
} adapterOperations3;
u_int8_t pad[0x800 - 0x7EC]; /* 0x7EC */
@@ -2001,7 +2032,9 @@ typedef union _MFI_CAPABILITIES {
u_int32_t support_ndrive_r1_lb:1;
u_int32_t support_core_affinity:1;
u_int32_t security_protocol_cmds_fw:1;
- u_int32_t reserved:25;
+ u_int32_t support_ext_queue_depth:1;
+ u_int32_t support_ext_io_size:1;
+ u_int32_t reserved:23;
} mfi_capabilities;
u_int32_t reg;
} MFI_CAPABILITIES;
@@ -2435,6 +2468,12 @@ struct mrsas_irq_context {
uint32_t MSIxIndex;
};
+enum MEGASAS_OCR_REASON {
+ FW_FAULT_OCR = 0,
+ SCSIIO_TIMEOUT_OCR = 1,
+ MFI_DCMD_TIMEOUT_OCR = 2,
+};
+
/* Controller management info added to support Linux Emulator */
#define MAX_MGMT_ADAPTERS 1024
@@ -2611,6 +2650,8 @@ typedef struct _MRSAS_DRV_PCI_INFORMATION {
struct mrsas_softc {
device_t mrsas_dev;
struct cdev *mrsas_cdev;
+ struct intr_config_hook mrsas_ich;
+ struct cdev *mrsas_linux_emulator_cdev;
uint16_t device_id;
struct resource *reg_res;
int reg_res_id;
@@ -2669,6 +2710,7 @@ struct mrsas_softc {
int msix_enable;
uint32_t msix_reg_offset[16];
uint8_t mask_interrupts;
+ uint16_t max_chain_frame_sz;
struct mrsas_mpt_cmd **mpt_cmd_list;
struct mrsas_mfi_cmd **mfi_cmd_list;
TAILQ_HEAD(, mrsas_mpt_cmd) mrsas_mpt_cmd_list_head;
@@ -2691,7 +2733,9 @@ struct mrsas_softc {
u_int8_t chain_offset_mfi_pthru;
u_int32_t map_sz;
u_int64_t map_id;
+ u_int64_t pd_seq_map_id;
struct mrsas_mfi_cmd *map_update_cmd;
+ struct mrsas_mfi_cmd *jbod_seq_cmd;
struct mrsas_mfi_cmd *aen_cmd;
u_int8_t fast_path_io;
void *chan;
@@ -2702,6 +2746,12 @@ struct mrsas_softc {
u_int8_t do_timedout_reset;
u_int32_t reset_in_progress;
u_int32_t reset_count;
+
+ bus_dma_tag_t jbodmap_tag[2];
+ bus_dmamap_t jbodmap_dmamap[2];
+ void *jbodmap_mem[2];
+ bus_addr_t jbodmap_phys_addr[2];
+
bus_dma_tag_t raidmap_tag[2];
bus_dmamap_t raidmap_dmamap[2];
void *raidmap_mem[2];
@@ -2745,6 +2795,7 @@ struct mrsas_softc {
LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
u_int8_t secure_jbod_support;
+ u_int8_t use_seqnum_jbod_fp;
u_int8_t max256vdSupport;
u_int16_t fw_supported_vd_count;
u_int16_t fw_supported_pd_count;
diff --git a/sys/dev/mrsas/mrsas_cam.c b/sys/dev/mrsas/mrsas_cam.c
index 08f9002..a0d1323 100644
--- a/sys/dev/mrsas/mrsas_cam.c
+++ b/sys/dev/mrsas/mrsas_cam.c
@@ -65,11 +65,14 @@ int
mrsas_map_request(struct mrsas_softc *sc,
struct mrsas_mpt_cmd *cmd, union ccb *ccb);
int
-mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb);
int
-mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
- union ccb *ccb, struct cam_sim *sim);
+mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb);
+int
+mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible);
int
mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb, u_int32_t device_id,
@@ -121,6 +124,7 @@ mrsas_get_updated_dev_handle(struct mrsas_softc *sc,
extern u_int8_t
megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
u_int64_t block, u_int32_t count);
+extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
/*
@@ -341,7 +345,7 @@ mrsas_action(struct cam_sim *sim, union ccb *ccb)
else
ccb->cpi.max_target = MRSAS_MAX_LD_IDS - 1;
#if (__FreeBSD_version > 704000)
- ccb->cpi.maxio = MRSAS_MAX_IO_SIZE;
+ ccb->cpi.maxio = sc->max_num_sge * MRSAS_PAGE_SIZE;
#endif
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
@@ -392,7 +396,7 @@ mrsas_scsiio_timeout(void *data)
callout_reset(&cmd->cm_callout, (600000 * hz) / 1000,
mrsas_scsiio_timeout, cmd);
#endif
- sc->do_timedout_reset = 1;
+ sc->do_timedout_reset = SCSIIO_TIMEOUT_OCR;
if (sc->ocr_thread_active)
wakeup(&sc->ocr_chan);
}
@@ -415,6 +419,7 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
struct ccb_hdr *ccb_h = &(ccb->ccb_h);
struct ccb_scsiio *csio = &(ccb->csio);
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u_int8_t cmd_type;
if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE) {
ccb->ccb_h.status = CAM_REQ_CMP;
@@ -458,7 +463,7 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
ccb_h->status = CAM_REQ_INVALID;
goto done;
case CAM_DATA_VADDR:
- if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) {
+ if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
mrsas_release_mpt_cmd(cmd);
ccb_h->status = CAM_REQ_TOO_BIG;
goto done;
@@ -468,6 +473,11 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
cmd->data = csio->data_ptr;
break;
case CAM_DATA_BIO:
+ if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_TOO_BIG;
+ goto done;
+ }
cmd->length = csio->dxfer_len;
if (cmd->length)
cmd->data = csio->data_ptr;
@@ -479,7 +489,7 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
#else
if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Virtual data address */
if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
- if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) {
+ if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
mrsas_release_mpt_cmd(cmd);
ccb_h->status = CAM_REQ_TOO_BIG;
goto done;
@@ -517,19 +527,44 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
mtx_lock(&sc->raidmap_lock);
/* Check for IO type READ-WRITE targeted for Logical Volume */
- if (mrsas_find_io_type(sim, ccb) == READ_WRITE_LDIO) {
+ cmd_type = mrsas_find_io_type(sim, ccb);
+ switch (cmd_type) {
+ case READ_WRITE_LDIO:
/* Build READ-WRITE IO for Logical Volume */
- if (mrsas_build_ldio(sc, cmd, ccb)) {
- device_printf(sc->mrsas_dev, "Build LDIO failed.\n");
+ if (mrsas_build_ldio_rw(sc, cmd, ccb)) {
+ device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n");
mtx_unlock(&sc->raidmap_lock);
return (1);
}
- } else {
- if (mrsas_build_dcdb(sc, cmd, ccb, sim)) {
- device_printf(sc->mrsas_dev, "Build DCDB failed.\n");
+ break;
+ case NON_READ_WRITE_LDIO:
+ /* Build NON READ-WRITE IO for Logical Volume */
+ if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) {
+ device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n");
mtx_unlock(&sc->raidmap_lock);
return (1);
}
+ break;
+ case READ_WRITE_SYSPDIO:
+ case NON_READ_WRITE_SYSPDIO:
+ if (sc->secure_jbod_support &&
+ (cmd_type == NON_READ_WRITE_SYSPDIO)) {
+ /* Build NON-RW IO for JBOD */
+ if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) {
+ device_printf(sc->mrsas_dev,
+ "Build SYSPDIO failed.\n");
+ mtx_unlock(&sc->raidmap_lock);
+ return (1);
+ }
+ } else {
+ /* Build RW IO for JBOD */
+ if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) {
+ device_printf(sc->mrsas_dev,
+ "Build SYSPDIO failed.\n");
+ mtx_unlock(&sc->raidmap_lock);
+ return (1);
+ }
+ }
}
mtx_unlock(&sc->raidmap_lock);
@@ -614,7 +649,10 @@ mrsas_get_mpt_cmd(struct mrsas_softc *sc)
if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)) {
cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head);
TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next);
+ } else {
+ goto out;
}
+
memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
cmd->data = NULL;
cmd->length = 0;
@@ -622,8 +660,9 @@ mrsas_get_mpt_cmd(struct mrsas_softc *sc)
cmd->error_code = 0;
cmd->load_balance = 0;
cmd->ccb_ptr = NULL;
- mtx_unlock(&sc->mpt_cmd_pool_lock);
+out:
+ mtx_unlock(&sc->mpt_cmd_pool_lock);
return cmd;
}
@@ -668,7 +707,7 @@ mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
}
/*
- * mrsas_build_ldio: Builds an LDIO command
+ * mrsas_build_ldio_rw: Builds an LDIO command
* input: Adapter instance soft state
* Pointer to command packet
* Pointer to CCB
@@ -677,7 +716,7 @@ mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
* built successfully, otherwise it returns a 1.
*/
int
-mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb)
{
struct ccb_hdr *ccb_h = &(ccb->ccb_h);
@@ -701,12 +740,18 @@ mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
io_request->DataLength = cmd->length;
if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
- if (cmd->sge_count > MRSAS_MAX_SGL) {
+ if (cmd->sge_count > sc->max_num_sge) {
device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
"max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
return (FAIL);
}
+ /*
+ * numSGE store lower 8 bit of sge_count. numSGEExt store
+ * higher 8 bit of sge_count
+ */
io_request->RaidContext.numSGE = cmd->sge_count;
+ io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
+
} else {
device_printf(sc->mrsas_dev, "Data map/load failed.\n");
return (FAIL);
@@ -832,7 +877,12 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24) ||
+ (sc->device_id == MRSAS_CUTLASS_52) ||
+ (sc->device_id == MRSAS_CUTLASS_53)) {
if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
cmd->request_desc->SCSIIO.RequestFlags =
(MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
@@ -861,7 +911,12 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
cmd->request_desc->SCSIIO.RequestFlags =
(MRSAS_REQ_DESCRIPT_FLAGS_LD_IO <<
MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24) ||
+ (sc->device_id == MRSAS_CUTLASS_52) ||
+ (sc->device_id == MRSAS_CUTLASS_53)) {
if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
cmd->request_desc->SCSIIO.RequestFlags =
(MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
@@ -879,78 +934,141 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
}
/*
- * mrsas_build_dcdb: Builds an DCDB command
+ * mrsas_build_ldio_nonrw: Builds an LDIO command
* input: Adapter instance soft state
* Pointer to command packet
* Pointer to CCB
*
- * This function builds the DCDB inquiry command. It returns 0 if the command
- * is built successfully, otherwise it returns a 1.
+ * This function builds the LDIO command packet. It returns 0 if the command is
+ * built successfully, otherwise it returns a 1.
*/
int
-mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
- union ccb *ccb, struct cam_sim *sim)
+mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb)
{
struct ccb_hdr *ccb_h = &(ccb->ccb_h);
u_int32_t device_id;
- MR_DRV_RAID_MAP_ALL *map_ptr;
MRSAS_RAID_SCSI_IO_REQUEST *io_request;
io_request = cmd->io_request;
device_id = ccb_h->target_id;
- map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
- /*
- * Check if this is RW for system PD or
- * it's a NON RW for sys PD and there is NO secure jbod FW support
- */
- if (cam_sim_bus(sim) == 1 &&
- sc->pd_list[device_id].driveState == MR_PD_STATE_SYSTEM) {
+ /* FW path for LD Non-RW (SCSI management commands) */
+ io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = device_id;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- io_request->DevHandle =
- map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
- io_request->RaidContext.RAIDFlags =
- MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
- MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
- cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
- cmd->request_desc->SCSIIO.MSIxIndex =
- sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
-
- if (sc->secure_jbod_support && (mrsas_find_io_type(sim, ccb) == NON_READ_WRITE_SYSPDIO)) {
- /* system pd firmware path */
- io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
- cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- } else {
- /* system pd fast path */
- io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
- io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
- io_request->RaidContext.regLockFlags = 0;
- io_request->RaidContext.regLockRowLBA = 0;
- io_request->RaidContext.regLockLength = 0;
-
- cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
- MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->LUN[1] = ccb_h->target_lun & 0xF;
+ io_request->DataLength = cmd->length;
- /*
- * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH
- * Because the NON RW cmds will now go via FW Queue
- * and not the Exception queue
- */
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
- io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+ if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
+ if (cmd->sge_count > sc->max_num_sge) {
+ device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
+ "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
+ return (1);
}
+ /*
+ * numSGE store lower 8 bit of sge_count. numSGEExt store
+ * higher 8 bit of sge_count
+ */
+ io_request->RaidContext.numSGE = cmd->sge_count;
+ io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
} else {
- /* FW path for SysPD or LD Non-RW (SCSI management commands) */
+ device_printf(sc->mrsas_dev, "Data map/load failed.\n");
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * mrsas_build_syspdio: Builds an DCDB command
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ * Pointer to CCB
+ *
+ * This function builds the DCDB inquiry command. It returns 0 if the command
+ * is built successfully, otherwise it returns a 1.
+ */
+int
+mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible)
+{
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ u_int32_t device_id;
+ MR_DRV_RAID_MAP_ALL *local_map_ptr;
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+ struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
+
+ pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1];
+
+ io_request = cmd->io_request;
+ device_id = ccb_h->target_id;
+ local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
+ io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
+ << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+ io_request->RaidContext.regLockFlags = 0;
+ io_request->RaidContext.regLockRowLBA = 0;
+ io_request->RaidContext.regLockLength = 0;
+
+ /* If FW supports PD sequence number */
+ if (sc->use_seqnum_jbod_fp &&
+ sc->pd_list[device_id].driveType == 0x00) {
+ //printf("Using Drv seq num\n");
+ io_request->RaidContext.VirtualDiskTgtId = device_id + 255;
+ io_request->RaidContext.configSeqNum = pd_sync->seq[device_id].seqNum;
+ io_request->DevHandle = pd_sync->seq[device_id].devHandle;
+ io_request->RaidContext.regLockFlags |=
+ (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.nseg = 0x1;
+ } else if (sc->fast_path_io) {
+ //printf("Using LD RAID map\n");
+ io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->RaidContext.configSeqNum = 0;
+ local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
+ io_request->DevHandle =
+ local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ } else {
+ //printf("Using FW PATH\n");
+ /* Want to send all IO via FW path */
+ io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->RaidContext.configSeqNum = 0;
+ io_request->DevHandle = 0xFFFF;
+ }
+
+ cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
+
+ if (!fp_possible) {
+ /* system pd firmware path */
io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
- io_request->DevHandle = device_id;
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.timeoutValue =
+ local_map_ptr->raidMap.fpPdIoTimeoutSec;
+ io_request->RaidContext.VirtualDiskTgtId = device_id;
+ } else {
+ /* system pd fast path */
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ io_request->RaidContext.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec;
+
+ /*
+ * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH
+ * Because the NON RW cmds will now go via FW Queue
+ * and not the Exception queue
+ */
+ io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
- io_request->RaidContext.VirtualDiskTgtId = device_id;
io_request->LUN[1] = ccb_h->target_lun & 0xF;
io_request->DataLength = cmd->length;
@@ -960,7 +1078,12 @@ mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
"max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
return (1);
}
+ /*
+ * numSGE store lower 8 bit of sge_count. numSGEExt store
+ * higher 8 bit of sge_count
+ */
io_request->RaidContext.numSGE = cmd->sge_count;
+ io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
} else {
device_printf(sc->mrsas_dev, "Data map/load failed.\n");
return (1);
@@ -1069,7 +1192,12 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
io_request = cmd->io_request;
sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24) ||
+ (sc->device_id == MRSAS_CUTLASS_52) ||
+ (sc->device_id == MRSAS_CUTLASS_53)) {
pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
sgl_ptr_end += sc->max_sge_in_main_msg - 1;
@@ -1080,7 +1208,12 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
sgl_ptr->Address = segs[i].ds_addr;
sgl_ptr->Length = segs[i].ds_len;
sgl_ptr->Flags = 0;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24) ||
+ (sc->device_id == MRSAS_CUTLASS_52) ||
+ (sc->device_id == MRSAS_CUTLASS_53)) {
if (i == nseg - 1)
sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
}
@@ -1090,7 +1223,12 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
(nseg > sc->max_sge_in_main_msg)) {
pMpi25IeeeSgeChain64_t sg_chain;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24) ||
+ (sc->device_id == MRSAS_CUTLASS_52) ||
+ (sc->device_id == MRSAS_CUTLASS_53)) {
if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
!= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
cmd->io_request->ChainOffset = sc->chain_offset_io_request;
@@ -1099,7 +1237,12 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
} else
cmd->io_request->ChainOffset = sc->chain_offset_io_request;
sg_chain = sgl_ptr;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24) ||
+ (sc->device_id == MRSAS_CUTLASS_52) ||
+ (sc->device_id == MRSAS_CUTLASS_53))
sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
else
sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
@@ -1170,9 +1313,16 @@ mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
static void
mrsas_cam_poll(struct cam_sim *sim)
{
+ int i;
struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
- mrsas_isr((void *)sc);
+ if (sc->msix_vectors != 0){
+ for (i=0; i<sc->msix_vectors; i++){
+ mrsas_complete_cmd(sc, i);
+ }
+ } else {
+ mrsas_complete_cmd(sc, 0);
+ }
}
/*
diff --git a/sys/dev/mrsas/mrsas_fp.c b/sys/dev/mrsas/mrsas_fp.c
index 7ae5662..f83d52b 100644
--- a/sys/dev/mrsas/mrsas_fp.c
+++ b/sys/dev/mrsas/mrsas_fp.c
@@ -749,7 +749,12 @@ mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripR
u_int32_t logArm, rowMod, armQ, arm;
u_int8_t do_invader = 0;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24) ||
+ (sc->device_id == MRSAS_CUTLASS_52) ||
+ (sc->device_id == MRSAS_CUTLASS_53))
do_invader = 1;
/* Get row and span from io_info for Uneven Span IO. */
@@ -960,7 +965,12 @@ MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info,
regSize += stripSize;
}
pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24) ||
+ (sc->device_id == MRSAS_CUTLASS_52) ||
+ (sc->device_id == MRSAS_CUTLASS_53))
pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
else
pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
@@ -1309,12 +1319,6 @@ mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len,
cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff);
cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff);
break;
- case 12:
- cdb[5] = (u_int8_t)(start_blk & 0xff);
- cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff);
- cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff);
- cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff);
- break;
case 16:
cdb[9] = (u_int8_t)(start_blk & 0xff);
cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff);
@@ -1451,7 +1455,12 @@ MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
u_int32_t rowMod, armQ, arm, logArm;
u_int8_t do_invader = 0;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24) ||
+ (sc->device_id == MRSAS_CUTLASS_52) ||
+ (sc->device_id == MRSAS_CUTLASS_53))
do_invader = 1;
row = mega_div64_32(stripRow, raid->rowDataSize);
diff --git a/sys/dev/mrsas/mrsas_ioctl.c b/sys/dev/mrsas/mrsas_ioctl.c
index 3c4dbf9..4939156 100644
--- a/sys/dev/mrsas/mrsas_ioctl.c
+++ b/sys/dev/mrsas/mrsas_ioctl.c
@@ -138,6 +138,11 @@ mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd)
kern_sge32 = (struct mrsas_sge32 *)
((unsigned long)cmd->frame + user_ioc->sgl_off);
+ memset(ioctl_data_tag, 0, (sizeof(bus_dma_tag_t) * MAX_IOCTL_SGE));
+ memset(ioctl_data_dmamap, 0, (sizeof(bus_dmamap_t) * MAX_IOCTL_SGE));
+ memset(ioctl_data_mem, 0, (sizeof(void *) * MAX_IOCTL_SGE));
+ memset(ioctl_data_phys_addr, 0, (sizeof(bus_addr_t) * MAX_IOCTL_SGE));
+
/*
* For each user buffer, create a mirror buffer and copy in
*/
@@ -246,7 +251,14 @@ mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd)
* cmd to the SCSI mid-layer
*/
cmd->sync_cmd = 1;
- mrsas_issue_blocked_cmd(sc, cmd);
+ ret = mrsas_issue_blocked_cmd(sc, cmd);
+ if (ret == ETIMEDOUT) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "IOCTL command is timed out, initiating OCR\n");
+ sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
+ ret = EAGAIN;
+ goto out;
+ }
cmd->sync_cmd = 0;
/*
@@ -435,6 +447,17 @@ mrsas_create_frame_pool(struct mrsas_softc *sc)
device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n");
return (ENOMEM);
}
+ /*
+ * For MFI controllers.
+ * max_num_sge = 60
+ * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
+ * Totl 960 byte (15 MFI frame of 64 byte)
+ *
+ * Fusion adapter require only 3 extra frame.
+ * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
+ * max_sge_sz = 12 byte (sizeof megasas_sge64)
+ * Total 192 byte (3 MFI frame of 64 byte)
+ */
memset(cmd->frame, 0, MRSAS_MFI_FRAME_SIZE);
cmd->frame->io.context = cmd->index;
cmd->frame->io.pad_0 = 0;
diff --git a/sys/dev/sfxge/common/hunt_ev.c b/sys/dev/sfxge/common/ef10_ev.c
index 1a41b49..58582b2 100644
--- a/sys/dev/sfxge/common/hunt_ev.c
+++ b/sys/dev/sfxge/common/ef10_ev.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,7 +37,7 @@ __FBSDID("$FreeBSD$");
#include "mcdi_mon.h"
#endif
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
#if EFSYS_OPT_QSTATS
#define EFX_EV_QSTAT_INCR(_eep, _stat) \
@@ -92,8 +92,7 @@ efx_mcdi_init_evq(
__in unsigned int instance,
__in efsys_mem_t *esmp,
__in size_t nevs,
- __in uint32_t irq,
- __out_opt uint32_t *irqp)
+ __in uint32_t irq)
{
efx_mcdi_req_t req;
uint8_t payload[
@@ -175,8 +174,7 @@ efx_mcdi_init_evq(
goto fail3;
}
- if (irqp != NULL)
- *irqp = MCDI_OUT_DWORD(req, INIT_EVQ_OUT_IRQ);
+ /* NOTE: ignore the returned IRQ param as firmware does not set it. */
return (0);
@@ -209,7 +207,7 @@ efx_mcdi_fini_evq(
MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
- efx_mcdi_execute(enp, &req);
+ efx_mcdi_execute_quiet(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
@@ -275,12 +273,14 @@ ef10_ev_qcreate(
eep->ee_drv_gen = ef10_ev_drv_gen;
eep->ee_mcdi = ef10_ev_mcdi;
+ /* Set up the event queue */
+ irq = index; /* INIT_EVQ expects function-relative vector number */
+
/*
- * Set up the event queue
- * NOTE: ignore the returned IRQ param as firmware does not set it.
+ * Interrupts may be raised for events immediately after the queue is
+ * created. See bug58606.
*/
- irq = index; /* INIT_EVQ expects function-relative vector number */
- if ((rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, NULL)) != 0)
+ if ((rc = efx_mcdi_init_evq(enp, index, esmp, n, irq)) != 0)
goto fail3;
return (0);
@@ -871,7 +871,9 @@ ef10_ev_mcdi(
*/
enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
- EFSYS_PROBE1(tx_descq_err, uint32_t, MCDI_EV_FIELD(eqp, DATA));
+ EFSYS_PROBE2(tx_descq_err,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
/* Inform the driver that a reset is required. */
eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
@@ -911,7 +913,9 @@ ef10_ev_mcdi(
*/
enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
- EFSYS_PROBE1(rx_descq_err, uint32_t, MCDI_EV_FIELD(eqp, DATA));
+ EFSYS_PROBE2(rx_descq_err,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
/* Inform the driver that a reset is required. */
eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
@@ -985,4 +989,4 @@ ef10_ev_rxlabel_fini(
eersp->eers_rx_mask = 0;
}
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/hunt_filter.c b/sys/dev/sfxge/common/ef10_filter.c
index eaa0720..505e386 100644
--- a/sys/dev/sfxge/common/hunt_filter.c
+++ b/sys/dev/sfxge/common/ef10_filter.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,7 +34,7 @@ __FBSDID("$FreeBSD$");
#include "efx.h"
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
#if EFSYS_OPT_FILTER
@@ -353,7 +353,7 @@ efx_mcdi_filter_op_delete(
MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO, handle->efh_lo);
MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI, handle->efh_hi);
- efx_mcdi_execute(enp, &req);
+ efx_mcdi_execute_quiet(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
@@ -970,7 +970,7 @@ ef10_filter_supported_filters(
{
efx_rc_t rc;
- if ((rc = efx_mcdi_get_parser_disp_info(enp, list, length) != 0))
+ if ((rc = efx_mcdi_get_parser_disp_info(enp, list, length)) != 0)
goto fail1;
return (0);
@@ -982,19 +982,15 @@ fail1:
}
static __checkReturn efx_rc_t
-ef10_filter_unicast_refresh(
+ef10_filter_insert_unicast(
__in efx_nic_t *enp,
__in_ecount(6) uint8_t const *addr,
- __in boolean_t all_unicst,
__in efx_filter_flag_t filter_flags)
{
ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
efx_filter_spec_t spec;
efx_rc_t rc;
- if (all_unicst == B_TRUE)
- goto use_uc_def;
-
/* Insert the filter for the local station address */
efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags,
@@ -1002,77 +998,82 @@ ef10_filter_unicast_refresh(
efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr);
rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
- &eftp->eft_unicst_filter_index);
- if (rc != 0) {
- /*
- * Fall back to an unknown filter. We may be able to subscribe
- * to it even if we couldn't insert the unicast filter.
- */
- goto use_uc_def;
- }
- eftp->eft_unicst_filter_set = B_TRUE;
+ &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_unicst_filter_count++;
+ EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX);
return (0);
-use_uc_def:
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_all_unicast(
+ __in efx_nic_t *enp,
+ __in efx_filter_flag_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
/* Insert the unknown unicast filter */
efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags,
eftp->eft_default_rxq);
efx_filter_spec_set_uc_def(&spec);
rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
- &eftp->eft_unicst_filter_index);
+ &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
if (rc != 0)
goto fail1;
- eftp->eft_unicst_filter_set = B_TRUE;
+ eftp->eft_unicst_filter_count++;
+ EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX);
return (0);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- if (eftp->eft_unicst_filter_set != B_FALSE) {
- (void) ef10_filter_delete_internal(enp,
- eftp->eft_unicst_filter_index);
-
- eftp->eft_unicst_filter_set = B_FALSE;
- }
-
return (rc);
}
static __checkReturn efx_rc_t
-ef10_filter_multicast_refresh(
+ef10_filter_insert_multicast_list(
__in efx_nic_t *enp,
__in boolean_t mulcst,
- __in boolean_t all_mulcst,
__in boolean_t brdcst,
__in_ecount(6*count) uint8_t const *addrs,
- __in int count,
- __in efx_filter_flag_t filter_flags)
+ __in uint32_t count,
+ __in efx_filter_flag_t filter_flags,
+ __in boolean_t rollback)
{
ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
efx_filter_spec_t spec;
uint8_t addr[6];
- unsigned i;
+ uint32_t i;
+ uint32_t filter_index;
+ uint32_t filter_count;
efx_rc_t rc;
- if (all_mulcst == B_TRUE)
- goto use_mc_def;
-
if (mulcst == B_FALSE)
count = 0;
if (count + (brdcst ? 1 : 0) >
EFX_ARRAY_SIZE(eftp->eft_mulcst_filter_indexes)) {
- /* Too many MAC addresses; use unknown multicast filter */
- goto use_mc_def;
+ /* Too many MAC addresses */
+ rc = EINVAL;
+ goto fail1;
}
/* Insert/renew multicast address list filters */
- eftp->eft_mulcst_filter_count = count;
- for (i = 0; i < eftp->eft_mulcst_filter_count; i++) {
+ filter_count = 0;
+ for (i = 0; i < count; i++) {
efx_filter_spec_init_rx(&spec,
EFX_FILTER_PRI_AUTO,
filter_flags,
@@ -1083,16 +1084,21 @@ ef10_filter_multicast_refresh(
&addrs[i * EFX_MAC_ADDR_LEN]);
rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
- &eftp->eft_mulcst_filter_indexes[i]);
- if (rc != 0) {
- /* Rollback, then use unknown multicast filter */
+ &filter_index);
+
+ if (rc == 0) {
+ eftp->eft_mulcst_filter_indexes[filter_count] =
+ filter_index;
+ filter_count++;
+ } else if (rollback == B_TRUE) {
+ /* Only stop upon failure if told to rollback */
goto rollback;
}
+
}
if (brdcst == B_TRUE) {
/* Insert/renew broadcast address filter */
- eftp->eft_mulcst_filter_count++;
efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags,
eftp->eft_default_rxq);
@@ -1102,28 +1108,47 @@ ef10_filter_multicast_refresh(
addr);
rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
- &eftp->eft_mulcst_filter_indexes[
- eftp->eft_mulcst_filter_count - 1]);
- if (rc != 0) {
- /* Rollback, then use unknown multicast filter */
+ &filter_index);
+
+ if (rc == 0) {
+ eftp->eft_mulcst_filter_indexes[filter_count] =
+ filter_index;
+ filter_count++;
+ } else if (rollback == B_TRUE) {
+ /* Only stop upon failure if told to rollback */
goto rollback;
}
}
+ eftp->eft_mulcst_filter_count = filter_count;
+ eftp->eft_using_all_mulcst = B_FALSE;
+
return (0);
rollback:
- /*
- * Rollback by removing any filters we have inserted
- * before inserting the unknown multicast filter.
- */
+ /* Remove any filters we have inserted */
+ i = filter_count;
while (i--) {
(void) ef10_filter_delete_internal(enp,
eftp->eft_mulcst_filter_indexes[i]);
}
eftp->eft_mulcst_filter_count = 0;
-use_mc_def:
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_all_multicast(
+ __in efx_nic_t *enp,
+ __in efx_filter_flag_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
/* Insert the unknown multicast filter */
efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags,
@@ -1136,6 +1161,7 @@ use_mc_def:
goto fail1;
eftp->eft_mulcst_filter_count = 1;
+ eftp->eft_using_all_mulcst = B_TRUE;
/*
* FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic.
@@ -1147,12 +1173,25 @@ fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
+}
+
+static void
+ef10_filter_remove_old(
+ __in efx_nic_t *enp)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ uint32_t i;
+ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+ if (ef10_filter_entry_is_auto_old(table, i)) {
+ (void) ef10_filter_delete_internal(enp, i);
+ }
+ }
}
static __checkReturn efx_rc_t
-hunt_filter_get_workarounds(
+ef10_filter_get_workarounds(
__in efx_nic_t *enp)
{
efx_nic_cfg_t *encp = &enp->en_nic_cfg;
@@ -1202,13 +1241,14 @@ ef10_filter_reconfigure(
__in boolean_t all_mulcst,
__in boolean_t brdcst,
__in_ecount(6*count) uint8_t const *addrs,
- __in int count)
+ __in uint32_t count)
{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
efx_filter_flag_t filter_flags;
unsigned i;
- int all_unicst_rc;
- int all_mulcst_rc;
+ efx_rc_t all_unicst_rc = 0;
+ efx_rc_t all_mulcst_rc = 0;
efx_rc_t rc;
if (table->eft_default_rxq == NULL) {
@@ -1218,11 +1258,12 @@ ef10_filter_reconfigure(
* filters must be removed (ignore errors in case the MC
* has rebooted, which removes hardware filters).
*/
- if (table->eft_unicst_filter_set != B_FALSE) {
+ for (i = 0; i < table->eft_unicst_filter_count; i++) {
(void) ef10_filter_delete_internal(enp,
- table->eft_unicst_filter_index);
- table->eft_unicst_filter_set = B_FALSE;
+ table->eft_unicst_filter_indexes[i]);
}
+ table->eft_unicst_filter_count = 0;
+
for (i = 0; i < table->eft_mulcst_filter_count; i++) {
(void) ef10_filter_delete_internal(enp,
table->eft_mulcst_filter_indexes[i]);
@@ -1238,27 +1279,39 @@ ef10_filter_reconfigure(
filter_flags = 0;
/* Mark old filters which may need to be removed */
- if (table->eft_unicst_filter_set != B_FALSE) {
+ for (i = 0; i < table->eft_unicst_filter_count; i++) {
ef10_filter_set_entry_auto_old(table,
- table->eft_unicst_filter_index);
+ table->eft_unicst_filter_indexes[i]);
}
for (i = 0; i < table->eft_mulcst_filter_count; i++) {
ef10_filter_set_entry_auto_old(table,
table->eft_mulcst_filter_indexes[i]);
}
- /* Insert or renew unicast filters */
- if ((all_unicst_rc = ef10_filter_unicast_refresh(enp, mac_addr,
- all_unicst, filter_flags)) != 0) {
- if (all_unicst == B_FALSE) {
- rc = all_unicst_rc;
+ /*
+ * Insert or renew unicast filters.
+ *
+ * Frimware does not perform chaining on unicast filters. As traffic is
+ * therefore only delivered to the first matching filter, we should
+ * always insert the specific filter for our MAC address, to try and
+ * ensure we get that traffic.
+ *
+ * (If the filter for our MAC address has already been inserted by
+ * another function, we won't receive traffic sent to us, even if we
+ * insert a unicast mismatch filter. To prevent traffic stealing, this
+ * therefore relies on the privilege model only allowing functions to
+ * insert filters for their own MAC address unless explicitly given
+ * additional privileges by the user. This also means that, even on a
+ * priviliged function, inserting a unicast mismatch filter may not
+ * catch all traffic in multi PCI function scenarios.)
+ */
+ table->eft_unicst_filter_count = 0;
+ rc = ef10_filter_insert_unicast(enp, mac_addr, filter_flags);
+ if (all_unicst || (rc != 0)) {
+ all_unicst_rc = ef10_filter_insert_all_unicast(enp,
+ filter_flags);
+ if ((rc != 0) && (all_unicst_rc != 0))
goto fail1;
- }
- /* Retry without all_unicast flag */
- rc = ef10_filter_unicast_refresh(enp, mac_addr,
- B_FALSE, filter_flags);
- if (rc != 0)
- goto fail2;
}
/*
@@ -1266,43 +1319,94 @@ ef10_filter_reconfigure(
* filters, and can only be enabled or disabled when the hardware filter
* table is empty.
*
+ * Chained multicast filters require support from the datapath firmware,
+ * and may not be available (e.g. low-latency variants or old Huntington
+ * firmware).
+ *
* Firmware will reset (FLR) functions which have inserted filters in
* the hardware filter table when the workaround is enabled/disabled.
* Functions without any hardware filters are not reset.
*
* Re-check if the workaround is enabled after adding unicast hardware
- * filters. This ensures that encp->enc_workaround_bug26807 matches the
+ * filters. This ensures that encp->enc_bug26807_workaround matches the
* firmware state, and that later changes to enable/disable the
* workaround will result in this function seeing a reset (FLR).
*
- * FIXME: On Medford mulicast chaining should always be on.
+ * In common-code drivers, we only support multiple PCI function
+ * scenarios with firmware that supports multicast chaining, so we can
+ * assume it is enabled for such cases and hence simplify the filter
+ * insertion logic. Firmware that does not support multicast chaining
+ * does not support multiple PCI function configurations either, so
+ * filter insertion is much simpler and the same strategies can still be
+ * used.
*/
- if ((rc = hunt_filter_get_workarounds(enp)) != 0)
- goto fail3;
+ if ((rc = ef10_filter_get_workarounds(enp)) != 0)
+ goto fail2;
+
+ if ((table->eft_using_all_mulcst != all_mulcst) &&
+ (encp->enc_bug26807_workaround == B_TRUE)) {
+ /*
+ * Multicast filter chaining is enabled, so traffic that matches
+ * more than one multicast filter will be replicated and
+ * delivered to multiple recipients. To avoid this duplicate
+ * delivery, remove old multicast filters before inserting new
+ * multicast filters.
+ */
+ ef10_filter_remove_old(enp);
+ }
/* Insert or renew multicast filters */
- if ((all_mulcst_rc = ef10_filter_multicast_refresh(enp, mulcst,
- all_mulcst, brdcst,
- addrs, count, filter_flags)) != 0) {
- if (all_mulcst == B_FALSE) {
- rc = all_mulcst_rc;
- goto fail4;
+ if (all_mulcst == B_TRUE) {
+ /*
+ * Insert the all multicast filter. If that fails, try to insert
+ * all of our multicast filters (but without rollback on
+ * failure).
+ */
+ all_mulcst_rc = ef10_filter_insert_all_multicast(enp,
+ filter_flags);
+ if (all_mulcst_rc != 0) {
+ rc = ef10_filter_insert_multicast_list(enp, B_TRUE,
+ brdcst, addrs, count, filter_flags, B_FALSE);
+ if (rc != 0)
+ goto fail3;
}
- /* Retry without all_mulcast flag */
- rc = ef10_filter_multicast_refresh(enp, mulcst,
- B_FALSE, brdcst,
- addrs, count, filter_flags);
- if (rc != 0)
- goto fail5;
- }
+ } else {
+ /*
+ * Insert filters for multicast addresses.
+ * If any insertion fails, then rollback and try to insert the
+ * all multicast filter instead.
+ * If that also fails, try to insert all of the multicast
+ * filters (but without rollback on failure).
+ */
+ rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst,
+ addrs, count, filter_flags, B_TRUE);
+ if (rc != 0) {
+ if ((table->eft_using_all_mulcst == B_FALSE) &&
+ (encp->enc_bug26807_workaround == B_TRUE)) {
+ /*
+ * Multicast filter chaining is on, so remove
+ * old filters before inserting the multicast
+ * all filter to avoid duplicate delivery caused
+ * by packets matching multiple filters.
+ */
+ ef10_filter_remove_old(enp);
+ }
- /* Remove old filters which were not renewed */
- for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
- if (ef10_filter_entry_is_auto_old(table, i)) {
- (void) ef10_filter_delete_internal(enp, i);
+ rc = ef10_filter_insert_all_multicast(enp,
+ filter_flags);
+ if (rc != 0) {
+ rc = ef10_filter_insert_multicast_list(enp,
+ mulcst, brdcst,
+ addrs, count, filter_flags, B_FALSE);
+ if (rc != 0)
+ goto fail4;
+ }
}
}
+ /* Remove old filters which were not renewed */
+ ef10_filter_remove_old(enp);
+
/* report if any optional flags were rejected */
if (((all_unicst != B_FALSE) && (all_unicst_rc != 0)) ||
((all_mulcst != B_FALSE) && (all_mulcst_rc != 0))) {
@@ -1311,8 +1415,6 @@ ef10_filter_reconfigure(
return (rc);
-fail5:
- EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
@@ -1377,4 +1479,4 @@ ef10_filter_default_rxq_clear(
#endif /* EFSYS_OPT_FILTER */
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/ef10_impl.h b/sys/dev/sfxge/common/ef10_impl.h
index 0c687ae..3b850d2 100644
--- a/sys/dev/sfxge/common/ef10_impl.h
+++ b/sys/dev/sfxge/common/ef10_impl.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2015 Solarflare Communications Inc.
+ * Copyright (c) 2015-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,993 @@ extern "C" {
#define EF10_MAX_PIOBUF_NBUFS MEDFORD_PIOBUF_NBUFS
#endif
+/*
+ * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could
+ * possibly be increased, or the write size reported by newer firmware used
+ * instead.
+ */
+#define EF10_NVRAM_CHUNK 0x80
+
+/* Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary
+ */
+#define EF10_RX_WPTR_ALIGN 8
+
+/*
+ * Max byte offset into the packet the TCP header must start for the hardware
+ * to be able to parse the packet correctly.
+ */
+#define EF10_TCP_HEADER_OFFSET_LIMIT 208
+
+/* Invalid RSS context handle */
+#define EF10_RSS_CONTEXT_INVALID (0xffffffff)
+
+
+/* EV */
+
+ __checkReturn efx_rc_t
+ef10_ev_init(
+ __in efx_nic_t *enp);
+
+ void
+ef10_ev_fini(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep);
+
+ void
+ef10_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+ __checkReturn efx_rc_t
+ef10_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+ void
+ef10_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+ __checkReturn efx_rc_t
+ef10_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+#endif /* EFSYS_OPT_QSTATS */
+
+ void
+ef10_ev_rxlabel_init(
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp,
+ __in unsigned int label);
+
+ void
+ef10_ev_rxlabel_fini(
+ __in efx_evq_t *eep,
+ __in unsigned int label);
+
+/* INTR */
+
+ __checkReturn efx_rc_t
+ef10_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+ void
+ef10_intr_enable(
+ __in efx_nic_t *enp);
+
+ void
+ef10_intr_disable(
+ __in efx_nic_t *enp);
+
+ void
+ef10_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+ void
+ef10_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp);
+
+ void
+ef10_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+ void
+ef10_intr_fatal(
+ __in efx_nic_t *enp);
+ void
+ef10_intr_fini(
+ __in efx_nic_t *enp);
+
+/* NIC */
+
+extern __checkReturn efx_rc_t
+ef10_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *vi_countp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_reset(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+ef10_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+ef10_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_nic_unprobe(
+ __in efx_nic_t *enp);
+
+
+/* MAC */
+
+extern __checkReturn efx_rc_t
+ef10_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+ef10_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_addr_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_pdu_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
+extern __checkReturn efx_rc_t
+ef10_mac_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_multicast_list_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+ef10_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_LOOPBACK
+
+extern __checkReturn efx_rc_t
+ef10_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type);
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+extern __checkReturn efx_rc_t
+ef10_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+
+/* MCDI */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+ef10_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern void
+ef10_mcdi_fini(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in void *hdrp,
+ __in size_t hdr_len,
+ __in void *sdup,
+ __in size_t sdu_len);
+
+extern __checkReturn boolean_t
+ef10_mcdi_poll_response(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length);
+
+extern efx_rc_t
+ef10_mcdi_poll_reboot(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+/* NVRAM */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buf_read_tlv(
+ __in efx_nic_t *enp,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buf_write_tlv(
+ __inout_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size,
+ __in uint32_t tag,
+ __in_bcount(tag_size) caddr_t tag_data,
+ __in size_t tag_size,
+ __out size_t *total_lengthp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write_segment_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t all_segments);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern void
+ef10_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+ef10_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+ef10_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+ef10_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_validate(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_create(
+ __in efx_nic_t *enp,
+ __in uint16_t partn_type,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_find_item_start(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_find_end(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+ef10_nvram_buffer_find_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_get_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(item_max_size, *lengthp)
+ caddr_t itemp,
+ __in size_t item_max_size,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_insert_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_delete_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_finish(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+#endif /* EFSYS_OPT_NVRAM */
+
+
+/* PHY */
+
+typedef struct ef10_link_state_s {
+ uint32_t els_adv_cap_mask;
+ uint32_t els_lp_cap_mask;
+ unsigned int els_fcntl;
+ efx_link_mode_t els_link_mode;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t els_loopback;
+#endif
+ boolean_t els_mac_up;
+} ef10_link_state_t;
+
+extern void
+ef10_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+ef10_phy_get_link(
+ __in efx_nic_t *enp,
+ __out ef10_link_state_t *elsp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t on);
+
+extern __checkReturn efx_rc_t
+ef10_phy_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_verify(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+#if EFSYS_OPT_PHY_STATS
+
+extern __checkReturn efx_rc_t
+ef10_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+
+/* TX */
+
+extern __checkReturn efx_rc_t
+ef10_tx_init(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_tx_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp);
+
+extern void
+ef10_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+ef10_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qflush(
+ __in efx_txq_t *etp);
+
+extern void
+ef10_tx_qenable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_enable(
+ __in efx_txq_t *etp);
+
+extern void
+ef10_tx_qpio_disable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+ef10_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+extern void
+ef10_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp);
+
+extern void
+ef10_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count);
+
+extern void
+ef10_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t vlan_tci,
+ __out efx_desc_t *edp);
+
+
+#if EFSYS_OPT_QSTATS
+
+extern void
+ef10_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+typedef uint32_t efx_piobuf_handle_t;
+
+#define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t) -1)
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_alloc(
+ __inout efx_nic_t *enp,
+ __out uint32_t *bufnump,
+ __out efx_piobuf_handle_t *handlep,
+ __out uint32_t *blknump,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_free(
+ __inout efx_nic_t *enp,
+ __in uint32_t bufnum,
+ __in uint32_t blknum);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_link(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_unlink(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index);
+
+
+/* VPD */
+
+#if EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+ef10_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+ef10_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+
+/* RX */
+
+extern __checkReturn efx_rc_t
+ef10_rx_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+extern __checkReturn efx_rc_t
+ef10_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+
+#if EFSYS_OPT_RX_SCALE
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+extern __checkReturn uint32_t
+ef10_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+extern __checkReturn efx_rc_t
+ef10_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp);
+
+extern void
+ef10_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+extern void
+ef10_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+extern __checkReturn efx_rc_t
+ef10_rx_qflush(
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_qenable(
+ __in efx_rxq_t *erp);
+
+extern __checkReturn efx_rc_t
+ef10_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_FILTER
+
+typedef struct ef10_filter_handle_s {
+ uint32_t efh_lo;
+ uint32_t efh_hi;
+} ef10_filter_handle_t;
+
+typedef struct ef10_filter_entry_s {
+ uintptr_t efe_spec; /* pointer to filter spec plus busy bit */
+ ef10_filter_handle_t efe_handle;
+} ef10_filter_entry_t;
+
+/*
+ * BUSY flag indicates that an update is in progress.
+ * AUTO_OLD flag is used to mark and sweep MAC packet filters.
+ */
+#define EFX_EF10_FILTER_FLAG_BUSY 1U
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2U
+#define EFX_EF10_FILTER_FLAGS 3U
+
+/*
+ * Size of the hash table used by the driver. Doesn't need to be the
+ * same size as the hardware's table.
+ */
+#define EFX_EF10_FILTER_TBL_ROWS 8192
+
+/* Only need to allow for one directed and one unknown unicast filter */
+#define EFX_EF10_FILTER_UNICAST_FILTERS_MAX 2
+
+/* Allow for the broadcast address to be added to the multicast list */
+#define EFX_EF10_FILTER_MULTICAST_FILTERS_MAX (EFX_MAC_MULTICAST_LIST_MAX + 1)
+
+typedef struct ef10_filter_table_s {
+ ef10_filter_entry_t eft_entry[EFX_EF10_FILTER_TBL_ROWS];
+ efx_rxq_t * eft_default_rxq;
+ boolean_t eft_using_rss;
+ uint32_t eft_unicst_filter_indexes[
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX];
+ boolean_t eft_unicst_filter_count;
+ uint32_t eft_mulcst_filter_indexes[
+ EFX_EF10_FILTER_MULTICAST_FILTERS_MAX];
+ uint32_t eft_mulcst_filter_count;
+ boolean_t eft_using_all_mulcst;
+} ef10_filter_table_t;
+
+ __checkReturn efx_rc_t
+ef10_filter_init(
+ __in efx_nic_t *enp);
+
+ void
+ef10_filter_fini(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_filter_restore(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace);
+
+ __checkReturn efx_rc_t
+ef10_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+ef10_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out uint32_t *list,
+ __out size_t *length);
+
+extern __checkReturn efx_rc_t
+ef10_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count);
+
+extern void
+ef10_filter_get_default_rxq(
+ __in efx_nic_t *enp,
+ __out efx_rxq_t **erpp,
+ __out boolean_t *using_rss);
+
+extern void
+ef10_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+ef10_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+
+#endif /* EFSYS_OPT_FILTER */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_function_info(
+ __in efx_nic_t *enp,
+ __out uint32_t *pfp,
+ __out_opt uint32_t *vfp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_privilege_mask(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __in uint32_t vf,
+ __out uint32_t *maskp);
+
extern __checkReturn efx_rc_t
efx_mcdi_get_port_assignment(
__in efx_nic_t *enp,
@@ -53,7 +1040,13 @@ efx_mcdi_get_port_assignment(
extern __checkReturn efx_rc_t
efx_mcdi_get_port_modes(
__in efx_nic_t *enp,
- __out uint32_t *modesp);
+ __out uint32_t *modesp,
+ __out_opt uint32_t *current_modep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_port_mode_bandwidth(
+ __in uint32_t port_mode,
+ __out uint32_t *bandwidth_mbpsp);
extern __checkReturn efx_rc_t
efx_mcdi_get_mac_address_pf(
@@ -68,7 +1061,9 @@ efx_mcdi_get_mac_address_vf(
extern __checkReturn efx_rc_t
efx_mcdi_get_clock(
__in efx_nic_t *enp,
- __out uint32_t *sys_freqp);
+ __out uint32_t *sys_freqp,
+ __out uint32_t *dpcpu_freqp);
+
extern __checkReturn efx_rc_t
efx_mcdi_get_vector_cfg(
diff --git a/sys/dev/sfxge/common/hunt_intr.c b/sys/dev/sfxge/common/ef10_intr.c
index 7a4293c..2bcc9fb 100644
--- a/sys/dev/sfxge/common/hunt_intr.c
+++ b/sys/dev/sfxge/common/ef10_intr.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,7 @@ __FBSDID("$FreeBSD$");
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
__checkReturn efx_rc_t
ef10_intr_init(
@@ -197,4 +197,4 @@ ef10_intr_fini(
_NOTE(ARGUNUSED(enp))
}
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/hunt_mac.c b/sys/dev/sfxge/common/ef10_mac.c
index a36a11a..ba80089 100644
--- a/sys/dev/sfxge/common/hunt_mac.c
+++ b/sys/dev/sfxge/common/ef10_mac.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,7 @@ __FBSDID("$FreeBSD$");
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
__checkReturn efx_rc_t
ef10_mac_poll(
@@ -199,6 +199,53 @@ fail1:
return (rc);
}
+static __checkReturn efx_rc_t
+efx_mcdi_mtu_get(
+ __in efx_nic_t *enp,
+ __out size_t *mtu)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
+ MC_CMD_SET_MAC_V2_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_V2_OUT_LEN;
+
+ /*
+ * With MC_CMD_SET_MAC_EXT_IN_CONTROL set to 0, this just queries the
+ * MTU. This should always be supported on Medford, but it is not
+ * supported on older Huntington firmware.
+ */
+ MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_CONTROL, 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ if (req.emr_out_length_used < MC_CMD_SET_MAC_V2_OUT_MTU_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *mtu = MCDI_OUT_DWORD(req, SET_MAC_V2_OUT_MTU);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
__checkReturn efx_rc_t
ef10_mac_pdu_set(
__in efx_nic_t *enp)
@@ -230,6 +277,24 @@ fail1:
return (rc);
}
+ __checkReturn efx_rc_t
+ef10_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_mtu_get(enp, pdu)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
__checkReturn efx_rc_t
ef10_mac_reconfigure(
__in efx_nic_t *enp)
@@ -314,7 +379,7 @@ ef10_mac_multicast_list_set(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
@@ -390,7 +455,7 @@ ef10_mac_loopback_set(
__in efx_loopback_type_t loopback_type)
{
efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
efx_loopback_type_t old_loopback_type;
efx_link_mode_t old_loopback_link_mode;
efx_rc_t rc;
@@ -749,4 +814,4 @@ ef10_mac_stats_update(
#endif /* EFSYS_OPT_MAC_STATS */
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/hunt_mcdi.c b/sys/dev/sfxge/common/ef10_mcdi.c
index f39e977..f14d35f 100644
--- a/sys/dev/sfxge/common/hunt_mcdi.c
+++ b/sys/dev/sfxge/common/ef10_mcdi.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -213,6 +213,7 @@ ef10_mcdi_poll_reboot(
* this can be handled by common code drivers (and reworked to
* support Siena too).
*/
+ _NOTE(CONSTANTCONDITION)
if (B_FALSE) {
rc = EIO;
goto fail1;
@@ -287,7 +288,6 @@ ef10_mcdi_feature_supported(
default:
rc = ENOTSUP;
goto fail1;
- break;
}
return (0);
diff --git a/sys/dev/sfxge/common/ef10_nic.c b/sys/dev/sfxge/common/ef10_nic.c
new file mode 100644
index 0000000..c607f2d
--- /dev/null
+++ b/sys/dev/sfxge/common/ef10_nic.c
@@ -0,0 +1,1697 @@
+/*-
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#include "ef10_tlv_layout.h"
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_port_assignment(
+ __in efx_nic_t *enp,
+ __out uint32_t *portp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
+ MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_port_modes(
+ __in efx_nic_t *enp,
+ __out uint32_t *modesp,
+ __out_opt uint32_t *current_modep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
+ MC_CMD_GET_PORT_MODES_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PORT_MODES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /*
+ * Require only Modes and DefaultMode fields, unless the current mode
+ * was requested (CurrentMode field was added for Medford).
+ */
+ if (req.emr_out_length_used <
+ MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+ if ((current_modep != NULL) && (req.emr_out_length_used <
+ MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
+
+ if (current_modep != NULL) {
+ *current_modep = MCDI_OUT_DWORD(req,
+ GET_PORT_MODES_OUT_CURRENT_MODE);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_port_mode_bandwidth(
+ __in uint32_t port_mode,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ switch (port_mode) {
+ case TLV_PORT_MODE_10G:
+ bandwidth = 10000;
+ break;
+ case TLV_PORT_MODE_10G_10G:
+ bandwidth = 10000 * 2;
+ break;
+ case TLV_PORT_MODE_10G_10G_10G_10G:
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q:
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q2:
+ bandwidth = 10000 * 4;
+ break;
+ case TLV_PORT_MODE_40G:
+ bandwidth = 40000;
+ break;
+ case TLV_PORT_MODE_40G_40G:
+ bandwidth = 40000 * 2;
+ break;
+ case TLV_PORT_MODE_40G_10G_10G:
+ case TLV_PORT_MODE_10G_10G_40G:
+ bandwidth = 40000 + (10000 * 2);
+ break;
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_vadaptor_alloc(
+ __in efx_nic_t *enp,
+ __in uint32_t port_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
+ MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
+ VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
+ enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_vadaptor_free(
+ __in efx_nic_t *enp,
+ __in uint32_t port_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
+ MC_CMD_VADAPTOR_FREE_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_FREE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_pf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_vf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ addrp = MCDI_OUT2(req, uint8_t,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_clock(
+ __in efx_nic_t *enp,
+ __out uint32_t *sys_freqp,
+ __out uint32_t *dpcpu_freqp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
+ MC_CMD_GET_CLOCK_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CLOCK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
+ if (*sys_freqp == 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
+ if (*dpcpu_freqp == 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_vector_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *vec_basep,
+ __out_opt uint32_t *pf_nvecp,
+ __out_opt uint32_t *vf_nvecp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
+ MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (vec_basep != NULL)
+ *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
+ if (pf_nvecp != NULL)
+ *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
+ if (vf_nvecp != NULL)
+ *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_capabilities(
+ __in efx_nic_t *enp,
+ __out uint32_t *flagsp,
+ __out uint32_t *flags2p)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
+ MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CAPABILITIES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1);
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
+ *flags2p = 0;
+ else
+ *flags2p = MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_alloc_vis(
+ __in efx_nic_t *enp,
+ __in uint32_t min_vi_count,
+ __in uint32_t max_vi_count,
+ __out uint32_t *vi_basep,
+ __out uint32_t *vi_countp,
+ __out uint32_t *vi_shiftp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
+ MC_CMD_ALLOC_VIS_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (vi_countp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ALLOC_VIS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ALLOC_VIS_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
+ MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
+ *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
+
+ /* Report VI_SHIFT if available (always zero for Huntington) */
+ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
+ *vi_shiftp = 0;
+ else
+ *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_free_vis(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
+ EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_FREE_VIS;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ /* Ignore ELREADY (no allocated VIs, so nothing to free) */
+ if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_alloc_piobuf(
+ __in efx_nic_t *enp,
+ __out efx_piobuf_handle_t *handlep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
+ MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (handlep == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_free_piobuf(
+ __in efx_nic_t *enp,
+ __in efx_piobuf_handle_t handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
+ MC_CMD_FREE_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FREE_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_link_piobuf(
+ __in efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_LINK_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LINK_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
+ MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_unlink_piobuf(
+ __in efx_nic_t *enp,
+ __in uint32_t vi_index)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+ef10_nic_alloc_piobufs(
+ __in efx_nic_t *enp,
+ __in uint32_t max_piobuf_count)
+{
+ efx_piobuf_handle_t *handlep;
+ unsigned int i;
+
+ EFSYS_ASSERT3U(max_piobuf_count, <=,
+ EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
+
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+
+ for (i = 0; i < max_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
+ goto fail1;
+
+ enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
+ enp->en_arch.ef10.ena_piobuf_count++;
+ }
+
+ return;
+
+fail1:
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ efx_mcdi_free_piobuf(enp, *handlep);
+ *handlep = EFX_PIOBUF_HANDLE_INVALID;
+ }
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+
+static void
+ef10_nic_free_piobufs(
+ __in efx_nic_t *enp)
+{
+ efx_piobuf_handle_t *handlep;
+ unsigned int i;
+
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ efx_mcdi_free_piobuf(enp, *handlep);
+ *handlep = EFX_PIOBUF_HANDLE_INVALID;
+ }
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+/* Sub-allocate a block from a piobuf */
+ __checkReturn efx_rc_t
+ef10_nic_pio_alloc(
+ __inout efx_nic_t *enp,
+ __out uint32_t *bufnump,
+ __out efx_piobuf_handle_t *handlep,
+ __out uint32_t *blknump,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
+ uint32_t blk_per_buf;
+ uint32_t buf, blk;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT(bufnump);
+ EFSYS_ASSERT(handlep);
+ EFSYS_ASSERT(blknump);
+ EFSYS_ASSERT(offsetp);
+ EFSYS_ASSERT(sizep);
+
+ if ((edcp->edc_pio_alloc_size == 0) ||
+ (enp->en_arch.ef10.ena_piobuf_count == 0)) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+ blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
+
+ for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
+ uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
+
+ if (~(*map) == 0)
+ continue;
+
+ EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
+ for (blk = 0; blk < blk_per_buf; blk++) {
+ if ((*map & (1u << blk)) == 0) {
+ *map |= (1u << blk);
+ goto done;
+ }
+ }
+ }
+ rc = ENOMEM;
+ goto fail2;
+
+done:
+ *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
+ *bufnump = buf;
+ *blknump = blk;
+ *sizep = edcp->edc_pio_alloc_size;
+ *offsetp = blk * (*sizep);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Free a piobuf sub-allocated block */
+ __checkReturn efx_rc_t
+ef10_nic_pio_free(
+ __inout efx_nic_t *enp,
+ __in uint32_t bufnum,
+ __in uint32_t blknum)
+{
+ uint32_t *map;
+ efx_rc_t rc;
+
+ if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
+ (blknum >= (8 * sizeof (*map)))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
+ if ((*map & (1u << blknum)) == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+ *map &= ~(1u << blknum);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_pio_link(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle)
+{
+ return (efx_mcdi_link_piobuf(enp, vi_index, handle));
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_pio_unlink(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index)
+{
+ return (efx_mcdi_unlink_piobuf(enp, vi_index));
+}
+
+ __checkReturn efx_rc_t
+ef10_get_datapath_caps(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t flags;
+ uint32_t flags2;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_capabilities(enp, &flags, &flags2)) != 0)
+ goto fail1;
+
+#define CAP_FLAG(flags1, field) \
+ ((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+
+#define CAP_FLAG2(flags2, field) \
+ ((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+
+ /*
+ * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
+ * We only support the 14 byte prefix here.
+ */
+ if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ encp->enc_rx_prefix_size = 14;
+
+ /* Check if the firmware supports TSO */
+ encp->enc_fw_assisted_tso_enabled =
+ CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports FATSOv2 */
+ encp->enc_fw_assisted_tso_v2_enabled =
+ CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware has vadapter/vport/vswitch support */
+ encp->enc_datapath_cap_evb =
+ CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports VLAN insertion */
+ encp->enc_hw_tx_insert_vlan_enabled =
+ CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports RX event batching */
+ encp->enc_rx_batching_enabled =
+ CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE;
+
+ if (encp->enc_rx_batching_enabled)
+ encp->enc_rx_batch_max = 16;
+
+ /* Check if the firmware supports disabling scatter on RXQs */
+ encp->enc_rx_disable_scatter_supported =
+ CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports set mac with running filters */
+ encp->enc_allow_set_mac_with_installed_filters =
+ CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ?
+ B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
+ * specifying which parameters to configure.
+ */
+ encp->enc_enhanced_set_mac_supported =
+ CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE;
+
+#undef CAP_FLAG
+#undef CAP_FLAG2
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#define EF10_LEGACY_PF_PRIVILEGE_MASK \
+ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
+
+#define EF10_LEGACY_VF_PRIVILEGE_MASK 0
+
+
+ __checkReturn efx_rc_t
+ef10_get_privilege_mask(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t mask;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
+ &mask)) != 0) {
+ if (rc != ENOTSUP)
+ goto fail1;
+
+ /* Fallback for old firmware without privilege mask support */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ /* Assume PF has admin privilege */
+ mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
+ } else {
+ /* VF is always unprivileged by default */
+ mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
+ }
+ }
+
+ *maskp = mask;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * The external port mapping is a one-based numbering of the external
+ * connectors on the board. It does not distinguish off-board separated
+ * outputs such as multi-headed cables.
+ * The number of ports that map to each external port connector
+ * on the board is determined by the chip family and the port modes to
+ * which the NIC can be configured. The mapping table lists modes with
+ * port numbering requirements in increasing order.
+ */
+static struct {
+ efx_family_t family;
+ uint32_t modes_mask;
+ uint32_t stride;
+} __ef10_external_port_mappings[] = {
+ /* Supported modes requiring 1 output per port */
+ {
+ EFX_FAMILY_HUNTINGTON,
+ (1 << TLV_PORT_MODE_10G) |
+ (1 << TLV_PORT_MODE_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G),
+ 1
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G) |
+ (1 << TLV_PORT_MODE_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G),
+ 1
+ },
+ /* Supported modes requiring 2 outputs per port */
+ {
+ EFX_FAMILY_HUNTINGTON,
+ (1 << TLV_PORT_MODE_40G) |
+ (1 << TLV_PORT_MODE_40G_40G) |
+ (1 << TLV_PORT_MODE_40G_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_40G),
+ 2
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_40G) |
+ (1 << TLV_PORT_MODE_40G_40G) |
+ (1 << TLV_PORT_MODE_40G_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_40G),
+ 2
+ },
+ /* Supported modes requiring 4 outputs per port */
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2),
+ 4
+ },
+};
+
+ __checkReturn efx_rc_t
+ef10_external_port_mapping(
+ __in efx_nic_t *enp,
+ __in uint32_t port,
+ __out uint8_t *external_portp)
+{
+ efx_rc_t rc;
+ int i;
+ uint32_t port_modes;
+ uint32_t matches;
+ uint32_t stride = 1; /* default 1-1 mapping */
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, NULL)) != 0) {
+ /* No port mode information available - use default mapping */
+ goto out;
+ }
+
+ /*
+ * Infer the internal port -> external port mapping from
+ * the possible port modes for this NIC.
+ */
+ for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
+ if (__ef10_external_port_mappings[i].family !=
+ enp->en_family)
+ continue;
+ matches = (__ef10_external_port_mappings[i].modes_mask &
+ port_modes);
+ if (matches != 0) {
+ stride = __ef10_external_port_mappings[i].stride;
+ port_modes &= ~matches;
+ }
+ }
+
+ if (port_modes != 0) {
+ /* Some advertised modes are not supported */
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+out:
+ /*
+ * Scale as required by last matched mode and then convert to
+ * one-based numbering
+ */
+ *external_portp = (uint8_t)(port / stride) + 1;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_nic_probe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Read and clear any assertion state */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+
+ /* Exit the assertion handler */
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ if (rc != EACCES)
+ goto fail2;
+
+ if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
+ goto fail3;
+
+ if ((rc = enop->eno_board_cfg(enp)) != 0)
+ if (rc != EACCES)
+ goto fail4;
+
+ /*
+ * Set default driver config limits (based on board config).
+ *
+ * FIXME: For now allocate a fixed number of VIs which is likely to be
+ * sufficient and small enough to allow multiple functions on the same
+ * port.
+ */
+ edcp->edc_min_vi_count = edcp->edc_max_vi_count =
+ MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
+
+ /* The client driver must configure and enable PIO buffer support */
+ edcp->edc_max_piobuf_count = 0;
+ edcp->edc_pio_alloc_size = 0;
+
+#if EFSYS_OPT_MAC_STATS
+ /* Wipe the MAC statistics */
+ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
+ goto fail5;
+#endif
+
+#if EFSYS_OPT_LOOPBACK
+ if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
+ goto fail6;
+#endif
+
+#if EFSYS_OPT_MON_STATS
+ if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
+ /* Unprivileged functions do not have access to sensors */
+ if (rc != EACCES)
+ goto fail7;
+ }
+#endif
+
+ encp->enc_features = enp->en_features;
+
+ return (0);
+
+#if EFSYS_OPT_MON_STATS
+fail7:
+ EFSYS_PROBE(fail7);
+#endif
+#if EFSYS_OPT_LOOPBACK
+fail6:
+ EFSYS_PROBE(fail6);
+#endif
+#if EFSYS_OPT_MAC_STATS
+fail5:
+ EFSYS_PROBE(fail5);
+#endif
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ uint32_t min_evq_count, max_evq_count;
+ uint32_t min_rxq_count, max_rxq_count;
+ uint32_t min_txq_count, max_txq_count;
+ efx_rc_t rc;
+
+ if (edlp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Get minimum required and maximum usable VI limits */
+ min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
+ min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
+ min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
+
+ edcp->edc_min_vi_count =
+ MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
+
+ max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
+ max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
+ max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
+
+ edcp->edc_max_vi_count =
+ MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
+
+ /*
+ * Check limits for sub-allocated piobuf blocks.
+ * PIO is optional, so don't fail if the limits are incorrect.
+ */
+ if ((encp->enc_piobuf_size == 0) ||
+ (encp->enc_piobuf_limit == 0) ||
+ (edlp->edl_min_pio_alloc_size == 0) ||
+ (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
+ /* Disable PIO */
+ edcp->edc_max_piobuf_count = 0;
+ edcp->edc_pio_alloc_size = 0;
+ } else {
+ uint32_t blk_size, blk_count, blks_per_piobuf;
+
+ blk_size =
+ MAX(edlp->edl_min_pio_alloc_size,
+ encp->enc_piobuf_min_alloc_size);
+
+ blks_per_piobuf = encp->enc_piobuf_size / blk_size;
+ EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
+
+ blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
+
+ /* A zero max pio alloc count means unlimited */
+ if ((edlp->edl_max_pio_alloc_count > 0) &&
+ (edlp->edl_max_pio_alloc_count < blk_count)) {
+ blk_count = edlp->edl_max_pio_alloc_count;
+ }
+
+ edcp->edc_pio_alloc_size = blk_size;
+ edcp->edc_max_piobuf_count =
+ (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_nic_reset(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
+ MC_CMD_ENTITY_RESET_OUT_LEN)];
+ efx_rc_t rc;
+
+ /* ef10_nic_reset() is called to recover from BADASSERT failures. */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail2;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ENTITY_RESET;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
+
+ MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
+ ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ /* Clear RX/TX DMA queue errors */
+ enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_init(
+ __in efx_nic_t *enp)
+{
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ uint32_t min_vi_count, max_vi_count;
+ uint32_t vi_count, vi_base, vi_shift;
+ uint32_t i;
+ uint32_t retry;
+ uint32_t delay_us;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Enable reporting of some events (e.g. link change) */
+ if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
+ goto fail1;
+
+ /* Allocate (optional) on-chip PIO buffers */
+ ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
+
+ /*
+ * For best performance, PIO writes should use a write-combined
+ * (WC) memory mapping. Using a separate WC mapping for the PIO
+ * aperture of each VI would be a burden to drivers (and not
+ * possible if the host page size is >4Kbyte).
+ *
+ * To avoid this we use a single uncached (UC) mapping for VI
+ * register access, and a single WC mapping for extra VIs used
+ * for PIO writes.
+ *
+ * Each piobuf must be linked to a VI in the WC mapping, and to
+ * each VI that is using a sub-allocated block from the piobuf.
+ */
+ min_vi_count = edcp->edc_min_vi_count;
+ max_vi_count =
+ edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
+
+ /* Ensure that the previously attached driver's VIs are freed */
+ if ((rc = efx_mcdi_free_vis(enp)) != 0)
+ goto fail2;
+
+ /*
+ * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
+ * fails then retrying the request for fewer VI resources may succeed.
+ */
+ vi_count = 0;
+ if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
+ &vi_base, &vi_count, &vi_shift)) != 0)
+ goto fail3;
+
+ EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
+
+ if (vi_count < min_vi_count) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+
+ enp->en_arch.ef10.ena_vi_base = vi_base;
+ enp->en_arch.ef10.ena_vi_count = vi_count;
+ enp->en_arch.ef10.ena_vi_shift = vi_shift;
+
+ if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
+ /* Not enough extra VIs to map piobufs */
+ ef10_nic_free_piobufs(enp);
+ }
+
+ enp->en_arch.ef10.ena_pio_write_vi_base =
+ vi_count - enp->en_arch.ef10.ena_piobuf_count;
+
+ /* Save UC memory mapping details */
+ enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ enp->en_arch.ef10.ena_uc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_pio_write_vi_base);
+ } else {
+ enp->en_arch.ef10.ena_uc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_vi_count);
+ }
+
+ /* Save WC memory mapping details */
+ enp->en_arch.ef10.ena_wc_mem_map_offset =
+ enp->en_arch.ef10.ena_uc_mem_map_offset +
+ enp->en_arch.ef10.ena_uc_mem_map_size;
+
+ enp->en_arch.ef10.ena_wc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_piobuf_count);
+
+ /* Link piobufs to extra VIs in WC mapping */
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ rc = efx_mcdi_link_piobuf(enp,
+ enp->en_arch.ef10.ena_pio_write_vi_base + i,
+ enp->en_arch.ef10.ena_piobuf_handle[i]);
+ if (rc != 0)
+ break;
+ }
+ }
+
+ /*
+ * Allocate a vAdaptor attached to our upstream vPort/pPort.
+ *
+ * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
+ * driver has yet to bring up the EVB port. See bug 56147. In this case,
+ * retry the request several times after waiting a while. The wait time
+ * between retries starts small (10ms) and exponentially increases.
+ * Total wait time is a little over two seconds. Retry logic in the
+ * client driver may mean this whole loop is repeated if it continues to
+ * fail.
+ */
+ retry = 0;
+ delay_us = 10000;
+ while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
+ (rc != ENOENT)) {
+ /*
+ * Do not retry alloc for PF, or for other errors on
+ * a VF.
+ */
+ goto fail5;
+ }
+
+ /* VF startup before PF is ready. Retry allocation. */
+ if (retry > 5) {
+ /* Too many attempts */
+ rc = EINVAL;
+ goto fail6;
+ }
+ EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
+ EFSYS_SLEEP(delay_us);
+ retry++;
+ if (delay_us < 500000)
+ delay_us <<= 2;
+ }
+
+ enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
+ enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
+
+ return (0);
+
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+
+ ef10_nic_free_piobufs(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *vi_countp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * Report VIs that the client driver can use.
+ * Do not include VIs used for PIO buffer writes.
+ */
+ *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
+
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * TODO: Specify host memory mapping alignment and granularity
+ * in efx_drv_limits_t so that they can be taken into account
+ * when allocating extra VIs for PIO writes.
+ */
+ switch (region) {
+ case EFX_REGION_VI:
+ /* UC mapped memory BAR region for VI registers */
+ *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
+ *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
+ break;
+
+ case EFX_REGION_PIO_WRITE_VI:
+ /* WC mapped memory BAR region for piobuf writes */
+ *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
+ *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_nic_fini(
+ __in efx_nic_t *enp)
+{
+ uint32_t i;
+ efx_rc_t rc;
+
+ (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
+ enp->en_vport_id = 0;
+
+ /* Unlink piobufs from extra VIs in WC mapping */
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ rc = efx_mcdi_unlink_piobuf(enp,
+ enp->en_arch.ef10.ena_pio_write_vi_base + i);
+ if (rc != 0)
+ break;
+ }
+ }
+
+ ef10_nic_free_piobufs(enp);
+
+ (void) efx_mcdi_free_vis(enp);
+ enp->en_arch.ef10.ena_vi_count = 0;
+}
+
+ void
+ef10_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_cfg_free(enp);
+#endif /* EFSYS_OPT_MON_STATS */
+ (void) efx_mcdi_drv_attach(enp, B_FALSE);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+ef10_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ /* FIXME */
+ _NOTE(ARGUNUSED(enp))
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ /* FIXME */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/hunt_nvram.c b/sys/dev/sfxge/common/ef10_nvram.c
index dd471f4..7f93df3 100644
--- a/sys/dev/sfxge/common/hunt_nvram.c
+++ b/sys/dev/sfxge/common/ef10_nvram.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,7 +34,7 @@ __FBSDID("$FreeBSD$");
#include "efx.h"
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
@@ -48,14 +48,34 @@ typedef struct tlv_cursor_s {
uint32_t *limit; /* Last dword of data block */
} tlv_cursor_t;
+typedef struct nvram_partition_s {
+ uint16_t type;
+ uint8_t chip_select;
+ uint8_t flags;
+ /*
+ * The full length of the NVRAM partition.
+ * This is different from tlv_partition_header.total_length,
+ * which can be smaller.
+ */
+ uint32_t length;
+ uint32_t erase_size;
+ uint32_t *data;
+ tlv_cursor_t tlv_cursor;
+} nvram_partition_t;
+
+
static __checkReturn efx_rc_t
tlv_validate_state(
- __in tlv_cursor_t *cursor);
+ __inout tlv_cursor_t *cursor);
-/*
- * Operations on TLV formatted partition data.
- */
+static void
+tlv_init_block(
+ __out uint32_t *block)
+{
+ *block = __CPU_TO_LE_32(TLV_TAG_END);
+}
+
static uint32_t
tlv_tag(
__in tlv_cursor_t *cursor)
@@ -122,9 +142,9 @@ tlv_next_item_ptr(
return (cursor->current + TLV_DWORD_COUNT(length));
}
-static efx_rc_t
+static __checkReturn efx_rc_t
tlv_advance(
- __in tlv_cursor_t *cursor)
+ __inout tlv_cursor_t *cursor)
{
efx_rc_t rc;
@@ -177,7 +197,7 @@ fail1:
static efx_rc_t
tlv_find(
- __in tlv_cursor_t *cursor,
+ __inout tlv_cursor_t *cursor,
__in uint32_t tag)
{
efx_rc_t rc;
@@ -194,7 +214,7 @@ tlv_find(
static __checkReturn efx_rc_t
tlv_validate_state(
- __in tlv_cursor_t *cursor)
+ __inout tlv_cursor_t *cursor)
{
efx_rc_t rc;
@@ -242,31 +262,49 @@ static efx_rc_t
tlv_init_cursor(
__out tlv_cursor_t *cursor,
__in uint32_t *block,
- __in uint32_t *limit)
+ __in uint32_t *limit,
+ __in uint32_t *current)
{
cursor->block = block;
cursor->limit = limit;
- cursor->current = cursor->block;
+ cursor->current = current;
cursor->end = NULL;
return (tlv_validate_state(cursor));
}
-static efx_rc_t
+static __checkReturn efx_rc_t
tlv_init_cursor_from_size(
__out tlv_cursor_t *cursor,
- __in uint8_t *block,
+ __in_bcount(size)
+ uint8_t *block,
__in size_t size)
{
uint32_t *limit;
limit = (uint32_t *)(block + size - sizeof (uint32_t));
- return (tlv_init_cursor(cursor, (uint32_t *)block, limit));
+ return (tlv_init_cursor(cursor, (uint32_t *)block,
+ limit, (uint32_t *)block));
}
-static efx_rc_t
+static __checkReturn efx_rc_t
+tlv_init_cursor_at_offset(
+ __out tlv_cursor_t *cursor,
+ __in_bcount(size)
+ uint8_t *block,
+ __in size_t size,
+ __in size_t offset)
+{
+ uint32_t *limit;
+ uint32_t *current;
+ limit = (uint32_t *)(block + size - sizeof (uint32_t));
+ current = (uint32_t *)(block + offset);
+ return (tlv_init_cursor(cursor, (uint32_t *)block, limit, current));
+}
+
+static __checkReturn efx_rc_t
tlv_require_end(
- __in tlv_cursor_t *cursor)
+ __inout tlv_cursor_t *cursor)
{
uint32_t *pos;
efx_rc_t rc;
@@ -290,7 +328,7 @@ fail1:
static size_t
tlv_block_length_used(
- __in tlv_cursor_t *cursor)
+ __inout tlv_cursor_t *cursor)
{
efx_rc_t rc;
@@ -311,8 +349,34 @@ fail1:
return (0);
}
+static uint32_t *
+tlv_last_segment_end(
+ __in tlv_cursor_t *cursor)
+{
+ tlv_cursor_t segment_cursor;
+ uint32_t *last_segment_end = cursor->block;
+ uint32_t *segment_start = cursor->block;
-static __checkReturn uint32_t *
+ /*
+ * Go through each segment and check that it has an end tag. If there
+ * is no end tag then the previous segment was the last valid one,
+ * so return the pointer to its end tag.
+ */
+ for (;;) {
+ if (tlv_init_cursor(&segment_cursor, segment_start,
+ cursor->limit, segment_start) != 0)
+ break;
+ if (tlv_require_end(&segment_cursor) != 0)
+ break;
+ last_segment_end = segment_cursor.end;
+ segment_start = segment_cursor.end + 1;
+ }
+
+ return (last_segment_end);
+}
+
+
+static uint32_t *
tlv_write(
__in tlv_cursor_t *cursor,
__in uint32_t tag,
@@ -338,12 +402,14 @@ tlv_write(
static __checkReturn efx_rc_t
tlv_insert(
- __in tlv_cursor_t *cursor,
+ __inout tlv_cursor_t *cursor,
__in uint32_t tag,
- __in uint8_t *data,
+ __in_bcount(size)
+ uint8_t *data,
__in size_t size)
{
unsigned int delta;
+ uint32_t *last_segment_end;
efx_rc_t rc;
if ((rc = tlv_validate_state(cursor)) != 0)
@@ -357,15 +423,17 @@ tlv_insert(
goto fail3;
}
+ last_segment_end = tlv_last_segment_end(cursor);
+
delta = TLV_DWORD_COUNT(size);
- if (cursor->end + 1 + delta > cursor->limit) {
+ if (last_segment_end + 1 + delta > cursor->limit) {
rc = ENOSPC;
goto fail4;
}
/* Move data up: new space at cursor->current */
memmove(cursor->current + delta, cursor->current,
- (cursor->end + 1 - cursor->current) * sizeof (uint32_t));
+ (last_segment_end + 1 - cursor->current) * sizeof (uint32_t));
/* Adjust the end pointer */
cursor->end += delta;
@@ -388,16 +456,61 @@ fail1:
}
static __checkReturn efx_rc_t
+tlv_delete(
+ __inout tlv_cursor_t *cursor)
+{
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (tlv_tag(cursor) == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ delta = TLV_DWORD_COUNT(tlv_length(cursor));
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail3;
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ /* Shuffle things down, destroying the item at cursor->current */
+ memmove(cursor->current, cursor->current + delta,
+ (last_segment_end + 1 - cursor->current) * sizeof (uint32_t));
+ /* Zero the new space at the end of the TLV chain */
+ memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t));
+ /* Adjust the end pointer */
+ cursor->end -= delta;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
tlv_modify(
- __in tlv_cursor_t *cursor,
+ __inout tlv_cursor_t *cursor,
__in uint32_t tag,
- __in uint8_t *data,
+ __in_bcount(size)
+ uint8_t *data,
__in size_t size)
{
uint32_t *pos;
unsigned int old_ndwords;
unsigned int new_ndwords;
unsigned int delta;
+ uint32_t *last_segment_end;
efx_rc_t rc;
if ((rc = tlv_validate_state(cursor)) != 0)
@@ -418,19 +531,21 @@ tlv_modify(
if ((rc = tlv_require_end(cursor)) != 0)
goto fail4;
+ last_segment_end = tlv_last_segment_end(cursor);
+
if (new_ndwords > old_ndwords) {
/* Expand space used for TLV item */
delta = new_ndwords - old_ndwords;
pos = cursor->current + old_ndwords;
- if (cursor->end + 1 + delta > cursor->limit) {
+ if (last_segment_end + 1 + delta > cursor->limit) {
rc = ENOSPC;
goto fail5;
}
/* Move up: new space at (cursor->current + old_ndwords) */
memmove(pos + delta, pos,
- (cursor->end + 1 - pos) * sizeof (uint32_t));
+ (last_segment_end + 1 - pos) * sizeof (uint32_t));
/* Adjust the end pointer */
cursor->end += delta;
@@ -442,10 +557,11 @@ tlv_modify(
/* Move down: remove words at (cursor->current + new_ndwords) */
memmove(pos, pos + delta,
- (cursor->end + 1 - pos) * sizeof (uint32_t));
+ (last_segment_end + 1 - pos) * sizeof (uint32_t));
/* Zero the new space at the end of the TLV chain */
- memset(cursor->end + 1 - delta, 0, delta * sizeof (uint32_t));
+ memset(last_segment_end + 1 - delta, 0,
+ delta * sizeof (uint32_t));
/* Adjust the end pointer */
cursor->end -= delta;
@@ -470,9 +586,82 @@ fail1:
return (rc);
}
-/* Validate TLV formatted partition contents (before writing to flash) */
+static uint32_t checksum_tlv_partition(
+ __in nvram_partition_t *partition)
+{
+ tlv_cursor_t *cursor;
+ uint32_t *ptr;
+ uint32_t *end;
+ uint32_t csum;
+ size_t len;
+
+ cursor = &partition->tlv_cursor;
+ len = tlv_block_length_used(cursor);
+ EFSYS_ASSERT3U((len & 3), ==, 0);
+
+ csum = 0;
+ ptr = partition->data;
+ end = &ptr[len >> 2];
+
+ while (ptr < end)
+ csum += __LE_TO_CPU_32(*ptr++);
+
+ return (csum);
+}
+
+static __checkReturn efx_rc_t
+tlv_update_partition_len_and_cks(
+ __in tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+ nvram_partition_t partition;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t new_len;
+
+ /*
+ * We just modified the partition, so the total length may not be
+ * valid. Don't use tlv_find(), which performs some sanity checks
+ * that may fail here.
+ */
+ partition.data = cursor->block;
+ memcpy(&partition.tlv_cursor, cursor, sizeof (*cursor));
+ header = (struct tlv_partition_header *)partition.data;
+ /* Sanity check. */
+ if (__LE_TO_CPU_32(header->tag) != TLV_TAG_PARTITION_HEADER) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ new_len = tlv_block_length_used(&partition.tlv_cursor);
+ if (new_len == 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ header->total_length = __CPU_TO_LE_32(new_len);
+ /* Ensure the modified partition always has a new generation count. */
+ header->generation = __CPU_TO_LE_32(
+ __LE_TO_CPU_32(header->generation) + 1);
+
+ trailer = (struct tlv_partition_trailer *)((uint8_t *)header +
+ new_len - sizeof (*trailer) - sizeof (uint32_t));
+ trailer->generation = header->generation;
+ trailer->checksum = __CPU_TO_LE_32(
+ __LE_TO_CPU_32(trailer->checksum) -
+ checksum_tlv_partition(&partition));
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Validate buffer contents (before writing to flash) */
__checkReturn efx_rc_t
-efx_nvram_tlv_validate(
+ef10_nvram_buffer_validate(
__in efx_nic_t *enp,
__in uint32_t partn,
__in_bcount(partn_size) caddr_t partn_data,
@@ -568,6 +757,384 @@ fail1:
return (rc);
}
+
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_create(
+ __in efx_nic_t *enp,
+ __in uint16_t partn_type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ uint32_t *buf = (uint32_t *)partn_data;
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ struct tlv_partition_header header;
+ struct tlv_partition_trailer trailer;
+
+ unsigned min_buf_size = sizeof (struct tlv_partition_header) +
+ sizeof (struct tlv_partition_trailer);
+ if (partn_size < min_buf_size) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ memset(buf, 0xff, partn_size);
+
+ tlv_init_block(buf);
+ if ((rc = tlv_init_cursor(&cursor, buf,
+ (uint32_t *)((uint8_t *)buf + partn_size),
+ buf)) != 0) {
+ goto fail2;
+ }
+
+ header.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_HEADER);
+ header.length = __CPU_TO_LE_32(sizeof (header) - 8);
+ header.type_id = __CPU_TO_LE_16(partn_type);
+ header.preset = 0;
+ header.generation = __CPU_TO_LE_32(1);
+ header.total_length = 0; /* This will be fixed below. */
+ if ((rc = tlv_insert(
+ &cursor, TLV_TAG_PARTITION_HEADER,
+ (uint8_t *)&header.type_id, sizeof (header) - 8)) != 0)
+ goto fail3;
+ if ((rc = tlv_advance(&cursor)) != 0)
+ goto fail4;
+
+ trailer.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_TRAILER);
+ trailer.length = __CPU_TO_LE_32(sizeof (trailer) - 8);
+ trailer.generation = header.generation;
+ trailer.checksum = 0; /* This will be fixed below. */
+ if ((rc = tlv_insert(&cursor, TLV_TAG_PARTITION_TRAILER,
+ (uint8_t *)&trailer.generation, sizeof (trailer) - 8)) != 0)
+ goto fail5;
+
+ if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0)
+ goto fail6;
+
+ /* Check that the partition is valid. */
+ if ((rc = ef10_nvram_buffer_validate(enp, partn_type,
+ partn_data, partn_size)) != 0)
+ goto fail7;
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint32_t
+byte_offset(
+ __in uint32_t *position,
+ __in uint32_t *base)
+{
+ return (uint32_t)((uint8_t *)position - (uint8_t *)base);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_find_item_start(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp)
+{
+ // Read past partition header to find start address of the first key
+ tlv_cursor_t cursor;
+ efx_rc_t rc;
+
+ /* A PARTITION_HEADER tag must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ *startp = byte_offset(cursor.current, cursor.block);
+
+ if ((rc = tlv_require_end(&cursor)) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_find_end(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp)
+{
+ // Read to end of partition
+ tlv_cursor_t cursor;
+ efx_rc_t rc;
+ uint32_t *segment_used;
+
+ _NOTE(ARGUNUSED(offset))
+
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ segment_used = cursor.block;
+
+ /*
+ * Go through each segment and check that it has an end tag. If there
+ * is no end tag then the previous segment was the last valid one,
+ * so return the used space including that end tag.
+ */
+ while (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) {
+ if (tlv_require_end(&cursor) != 0) {
+ if (segment_used == cursor.block) {
+ /*
+ * First segment is corrupt, so there is
+ * no valid data in partition.
+ */
+ rc = EINVAL;
+ goto fail2;
+ }
+ break;
+ }
+ segment_used = cursor.end + 1;
+
+ cursor.current = segment_used;
+ }
+ /* Return space used (including the END tag) */
+ *endp = (segment_used - cursor.block) * sizeof (uint32_t);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+ef10_nvram_buffer_find_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp)
+{
+ // Find TLV at offset and return key start and length
+ tlv_cursor_t cursor;
+ uint8_t *key;
+ uint32_t tag;
+
+ if (tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset) != 0) {
+ return (B_FALSE);
+ }
+
+ while ((key = tlv_item(&cursor)) != NULL) {
+ tag = tlv_tag(&cursor);
+ if (tag == TLV_TAG_PARTITION_HEADER ||
+ tag == TLV_TAG_PARTITION_TRAILER) {
+ if (tlv_advance(&cursor) != 0) {
+ break;
+ }
+ continue;
+ }
+ *startp = byte_offset(cursor.current, cursor.block);
+ *lengthp = byte_offset(tlv_next_item_ptr(&cursor),
+ cursor.current);
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_get_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(item_max_size, *lengthp)
+ caddr_t itemp,
+ __in size_t item_max_size,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ uint32_t item_length;
+
+ if (item_max_size < length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail2;
+ }
+
+ item_length = tlv_length(&cursor);
+ if (length < item_length) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+ memcpy(itemp, tlv_value(&cursor), item_length);
+
+ *lengthp = item_length;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_insert_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail1;
+ }
+
+ rc = tlv_insert(&cursor, TLV_TAG_LICENSE, (uint8_t *)keyp, length);
+
+ if (rc != 0) {
+ goto fail2;
+ }
+
+ *lengthp = byte_offset(tlv_next_item_ptr(&cursor),
+ cursor.current);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_delete_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ _NOTE(ARGUNUSED(length, end))
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail1;
+ }
+
+ if ((rc = tlv_delete(&cursor)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_finish(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ if ((rc = tlv_require_end(&cursor)) != 0)
+ goto fail2;
+
+ if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
/*
* Read and validate a segment from a partition. A segment is a complete
* tlv chain between PARTITION_HEADER and PARTITION_END tags. There may
@@ -1113,7 +1680,7 @@ ef10_nvram_segment_write_tlv(
* Read the segment from NVRAM into the segment_data buffer and validate
* it, returning if it does not validate. This is not a failure unless
* this is the first segment in a partition. In this case the caller
- * must propogate the error.
+ * must propagate the error.
*/
status = ef10_nvram_read_tlv_segment(enp, partn, *partn_offsetp,
*seg_datap, *src_remain_lenp);
@@ -1789,4 +2356,4 @@ ef10_nvram_partn_rw_finish(
#endif /* EFSYS_OPT_NVRAM */
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/ef10_phy.c b/sys/dev/sfxge/common/ef10_phy.c
new file mode 100644
index 0000000..77b2846
--- /dev/null
+++ b/sys/dev/sfxge/common/ef10_phy.c
@@ -0,0 +1,477 @@
+/*-
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static void
+mcdi_phy_decode_cap(
+ __in uint32_t mcdi_cap,
+ __out uint32_t *maskp)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_40000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ mask |= (1 << EFX_PHY_CAP_PAUSE);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ mask |= (1 << EFX_PHY_CAP_ASYM);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mask |= (1 << EFX_PHY_CAP_AN);
+
+ *maskp = mask;
+}
+
+static void
+mcdi_phy_decode_link_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t link_flags,
+ __in unsigned int speed,
+ __in unsigned int fcntl,
+ __out efx_link_mode_t *link_modep,
+ __out unsigned int *fcntlp)
+{
+ boolean_t fd = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ boolean_t up = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (!up)
+ *link_modep = EFX_LINK_DOWN;
+ else if (speed == 40000 && fd)
+ *link_modep = EFX_LINK_40000FDX;
+ else if (speed == 10000 && fd)
+ *link_modep = EFX_LINK_10000FDX;
+ else if (speed == 1000)
+ *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
+ else if (speed == 100)
+ *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
+ else if (speed == 10)
+ *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
+ else
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ if (fcntl == MC_CMD_FCNTL_OFF)
+ *fcntlp = 0;
+ else if (fcntl == MC_CMD_FCNTL_RESPOND)
+ *fcntlp = EFX_FCNTL_RESPOND;
+ else if (fcntl == MC_CMD_FCNTL_GENERATE)
+ *fcntlp = EFX_FCNTL_GENERATE;
+ else if (fcntl == MC_CMD_FCNTL_BIDIR)
+ *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ else {
+ EFSYS_PROBE1(mc_pcol_error, int, fcntl);
+ *fcntlp = 0;
+ }
+}
+
+
+ void
+ef10_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int link_flags;
+ unsigned int speed;
+ unsigned int fcntl;
+ efx_link_mode_t link_mode;
+ uint32_t lp_cap_mask;
+
+ /*
+ * Convert the LINKCHANGE speed enumeration into mbit/s, in the
+ * same way as GET_LINK encodes the speed
+ */
+ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
+ case MCDI_EVENT_LINKCHANGE_SPEED_100M:
+ speed = 100;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_1G:
+ speed = 1000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_10G:
+ speed = 10000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_40G:
+ speed = 40000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
+ mcdi_phy_decode_link_mode(enp, link_flags, speed,
+ MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
+ &link_mode, &fcntl);
+ mcdi_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
+ &lp_cap_mask);
+
+ /*
+ * It's safe to update ep_lp_cap_mask without the driver's port lock
+ * because presumably any concurrently running efx_port_poll() is
+ * only going to arrive at the same value.
+ *
+ * ep_fcntl has two meanings. It's either the link common fcntl
+ * (if the PHY supports AN), or it's the forced link state. If
+ * the former, it's safe to update the value for the same reason as
+ * for ep_lp_cap_mask. If the latter, then just ignore the value,
+ * because we can race with efx_mac_fcntl_set().
+ */
+ epp->ep_lp_cap_mask = lp_cap_mask;
+ epp->ep_fcntl = fcntl;
+
+ *link_modep = link_mode;
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t power)
+{
+ efx_rc_t rc;
+
+ if (!power)
+ return (0);
+
+ /* Check if the PHY is a zombie */
+ if ((rc = ef10_phy_verify(enp)) != 0)
+ goto fail1;
+
+ enp->en_reset_flags |= EFX_RESET_PHY;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_get_link(
+ __in efx_nic_t *enp,
+ __out ef10_link_state_t *elsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
+ &elsp->els_adv_cap_mask);
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
+ &elsp->els_lp_cap_mask);
+
+ mcdi_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
+ &elsp->els_link_mode, &elsp->els_fcntl);
+
+#if EFSYS_OPT_LOOPBACK
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+
+ elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ elsp->els_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN,
+ MC_CMD_SET_LINK_OUT_LEN)];
+ uint32_t cap_mask;
+ unsigned int led_mode;
+ unsigned int speed;
+ efx_rc_t rc;
+
+ if (~encp->enc_func_flags & EFX_NIC_FUNC_LINKCTRL)
+ goto out;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
+
+ cap_mask = epp->ep_adv_cap_mask;
+ MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
+ PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
+ PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
+ PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
+ PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
+ PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
+ PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
+ PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
+ PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
+ PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
+ PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
+ /* Too many fields for for POPULATE macros, so insert this afterwards */
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1);
+
+#if EFSYS_OPT_LOOPBACK
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
+ epp->ep_loopback_type);
+ switch (epp->ep_loopback_link_mode) {
+ case EFX_LINK_100FDX:
+ speed = 100;
+ break;
+ case EFX_LINK_1000FDX:
+ speed = 1000;
+ break;
+ case EFX_LINK_10000FDX:
+ speed = 10000;
+ break;
+ case EFX_LINK_40000FDX:
+ speed = 40000;
+ break;
+ default:
+ speed = 0;
+ }
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
+ speed = 0;
+#endif /* EFSYS_OPT_LOOPBACK */
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
+
+#if EFSYS_OPT_PHY_FLAGS
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* And set the blink mode */
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_ID_LED;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+ switch (epp->ep_phy_led_mode) {
+ case EFX_PHY_LED_DEFAULT:
+ led_mode = MC_CMD_LED_DEFAULT;
+ break;
+ case EFX_PHY_LED_OFF:
+ led_mode = MC_CMD_LED_OFF;
+ break;
+ case EFX_PHY_LED_ON:
+ led_mode = MC_CMD_LED_ON;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ led_mode = MC_CMD_LED_DEFAULT;
+ }
+
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
+#else
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+out:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ uint32_t state;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
+ if (state != MC_CMD_PHY_STATE_OK) {
+ if (state != MC_CMD_PHY_STATE_ZOMBIE)
+ EFSYS_PROBE1(mc_pcol_error, int, state);
+ rc = ENOTACTIVE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ _NOTE(ARGUNUSED(enp, ouip))
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+ __checkReturn efx_rc_t
+ef10_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ /* TBD: no stats support in firmware yet */
+ _NOTE(ARGUNUSED(enp, esmp))
+ memset(stat, 0, EFX_PHY_NSTATS * sizeof (*stat));
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/hunt_rx.c b/sys/dev/sfxge/common/ef10_rx.c
index 984d48d..3bbee26 100644
--- a/sys/dev/sfxge/common/hunt_rx.c
+++ b/sys/dev/sfxge/common/ef10_rx.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,7 @@ __FBSDID("$FreeBSD$");
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
static __checkReturn efx_rc_t
@@ -50,14 +50,15 @@ efx_mcdi_init_rxq(
{
efx_mcdi_req_t req;
uint8_t payload[
- MAX(MC_CMD_INIT_RXQ_IN_LEN(EFX_RXQ_NBUFS(EFX_RXQ_MAXNDESCS)),
- MC_CMD_INIT_RXQ_OUT_LEN)];
+ MC_CMD_INIT_RXQ_IN_LEN(EFX_RXQ_NBUFS(EFX_RXQ_MAXNDESCS))];
int npages = EFX_RXQ_NBUFS(size);
int i;
efx_qword_t *dma_addr;
uint64_t addr;
efx_rc_t rc;
+ /* If this changes, then the payload size might need to change. */
+ EFSYS_ASSERT3U(MC_CMD_INIT_RXQ_OUT_LEN, ==, 0);
EFSYS_ASSERT3U(size, <=, EFX_RXQ_MAXNDESCS);
(void) memset(payload, 0, sizeof (payload));
@@ -127,7 +128,7 @@ efx_mcdi_fini_rxq(
MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
- efx_mcdi_execute(enp, &req);
+ efx_mcdi_execute_quiet(enp, &req);
if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
rc = req.emr_rc;
@@ -249,7 +250,7 @@ efx_mcdi_rss_context_free(
MCDI_IN_SET_DWORD(req, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, rss_context);
- efx_mcdi_execute(enp, &req);
+ efx_mcdi_execute_quiet(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
@@ -601,6 +602,8 @@ ef10_rx_prefix_pktlen(
__in uint8_t *buffer,
__out uint16_t *lengthp)
{
+ _NOTE(ARGUNUSED(enp))
+
/*
* The RX pseudo-header contains the packet length, excluding the
* pseudo-header. If the hardware receive datapath was operating in
@@ -619,6 +622,8 @@ ef10_rx_prefix_hash(
__in efx_rx_hash_alg_t func,
__in uint8_t *buffer)
{
+ _NOTE(ARGUNUSED(enp))
+
switch (func) {
case EFX_RX_HASHALG_TOEPLITZ:
return (buffer[0] |
@@ -745,7 +750,7 @@ ef10_rx_qcreate(
efx_rc_t rc;
boolean_t disable_scatter;
- _NOTE(ARGUNUSED(erp))
+ _NOTE(ARGUNUSED(id, erp))
EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH));
EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
@@ -823,4 +828,4 @@ ef10_rx_fini(
#endif /* EFSYS_OPT_RX_SCALE */
}
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/ef10_tlv_layout.h b/sys/dev/sfxge/common/ef10_tlv_layout.h
index 80364ce..1360f5e 100644
--- a/sys/dev/sfxge/common/ef10_tlv_layout.h
+++ b/sys/dev/sfxge/common/ef10_tlv_layout.h
@@ -54,8 +54,10 @@
* where:
*
* - L is a location, indicating where this tag is expected to be found:
- * 0 for static configuration, or 1 for dynamic configuration. Other
- * values are reserved.
+ * 0: static configuration
+ * 1: dynamic configuration
+ * 2: firmware internal use
+ * 3: license partition
*
* - TTT is a type, which is just a unique value. The same type value
* might appear in both locations, indicating a relationship between
@@ -774,7 +776,40 @@ struct tlv_pcie_link_settings {
uint16_t width; /* Number of lanes */
};
-#define TLV_TAG_LICENSE (0x20800000)
+/* TX event merging config.
+ *
+ * Sets the global maximum number of events for the merging bins, and the
+ * global timeout configuration for the bins, and the global timeout for
+ * empty queues.
+ */
+#define TLV_TAG_TX_EVENT_MERGING_CONFIG (0x10210000)
+struct tlv_tx_event_merging_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t max_events;
+#define TLV_TX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1)
+ uint32_t timeout_ns;
+ uint32_t qempty_timeout_ns; /* Medford only */
+};
+#define TLV_TX_EVENT_MERGING_MAX_EVENTS_DEFAULT 7
+#define TLV_TX_EVENT_MERGING_TIMEOUT_NS_DEFAULT 1400
+#define TLV_TX_EVENT_MERGING_QEMPTY_TIMEOUT_NS_DEFAULT 700
+
+/* Tx vFIFO Low latency configuration
+ *
+ * To keep the desired booting behaviour for the switch, it just requires to
+ * know if the low latency mode is enabled.
+ */
+
+#define TLV_TAG_TX_VFIFO_ULL_MODE (0x10220000)
+struct tlv_tx_vfifo_ull_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t mode;
+#define TLV_TX_VFIFO_ULL_MODE_DEFAULT 0
+};
+
+#define TLV_TAG_LICENSE (0x30800000)
typedef struct tlv_license {
uint32_t tag;
@@ -782,4 +817,104 @@ typedef struct tlv_license {
uint8_t data[];
} tlv_license_t;
+/* TSA NIC IP address configuration
+ *
+ * Sets the TSA NIC IP address statically via configuration tool or dynamically
+ * via DHCP via snooping based on the mode selection (0=Static, 1=DHCP, 2=Snoop)
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_TSAN_CONFIG (0x10220000)
+
+#define TLV_TSAN_IP_MODE_STATIC (0)
+#define TLV_TSAN_IP_MODE_DHCP (1)
+#define TLV_TSAN_IP_MODE_SNOOP (2)
+typedef struct tlv_tsan_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+ uint32_t ip;
+ uint32_t netmask;
+ uint32_t gateway;
+ uint32_t port;
+ uint32_t bind_retry;
+ uint32_t bind_bkout;
+} tlv_tsan_config_t;
+
+/* TSA Controller IP address configuration
+ *
+ * Sets the TSA Controller IP address statically via configuration tool
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_TSAC_CONFIG (0x10230000)
+
+#define TLV_MAX_TSACS (4)
+typedef struct tlv_tsac_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t num_tsacs;
+ uint32_t ip[TLV_MAX_TSACS];
+ uint32_t port[TLV_MAX_TSACS];
+} tlv_tsac_config_t;
+
+/* Binding ticket
+ *
+ * Sets the TSA NIC binding ticket used for binding process between the TSA NIC
+ * and the TSA Controller
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_BINDING_TICKET (0x10240000)
+
+typedef struct tlv_binding_ticket {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_binding_ticket_t;
+
+/* Solarflare private key
+ *
+ * Sets the Solareflare private key used for signing during the binding process
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_PIK_SF (0x10250000)
+
+typedef struct tlv_pik_sf {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_pik_sf_t;
+
+/* CA root certificate
+ *
+ * Sets the CA root certificate used for TSA Controller verfication during
+ * TLS connection setup between the TSA NIC and the TSA Controller
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_CA_ROOT_CERT (0x10260000)
+
+typedef struct tlv_ca_root_cert {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_ca_root_cert_t;
+
#endif /* CI_MGMT_TLV_LAYOUT_H */
diff --git a/sys/dev/sfxge/common/hunt_tx.c b/sys/dev/sfxge/common/ef10_tx.c
index baa7444..a4729a1 100755
--- a/sys/dev/sfxge/common/hunt_tx.c
+++ b/sys/dev/sfxge/common/ef10_tx.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,7 @@ __FBSDID("$FreeBSD$");
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
#if EFSYS_OPT_QSTATS
#define EFX_TX_QSTAT_INCR(_etp, _stat) \
@@ -149,7 +149,7 @@ efx_mcdi_fini_txq(
MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
- efx_mcdi_execute(enp, &req);
+ efx_mcdi_execute_quiet(enp, &req);
if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
rc = req.emr_rc;
@@ -195,6 +195,7 @@ ef10_tx_qcreate(
efx_qword_t desc;
efx_rc_t rc;
+ _NOTE(ARGUNUSED(id))
if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,
esmp)) != 0)
@@ -569,7 +570,7 @@ ef10_tx_qdesc_dma_create(
}
void
-hunt_tx_qdesc_tso_create(
+ef10_tx_qdesc_tso_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
__in uint32_t tcp_seq,
@@ -648,6 +649,7 @@ ef10_tx_qpace(
/* FIXME */
_NOTE(ARGUNUSED(etp, ns))
+ _NOTE(CONSTANTCONDITION)
if (B_FALSE) {
rc = ENOTSUP;
goto fail1;
@@ -707,4 +709,4 @@ ef10_tx_qstats_update(
#endif /* EFSYS_OPT_QSTATS */
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/hunt_vpd.c b/sys/dev/sfxge/common/ef10_vpd.c
index 41b4b83..961f3b6 100644
--- a/sys/dev/sfxge/common/hunt_vpd.c
+++ b/sys/dev/sfxge/common/ef10_vpd.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,7 +37,7 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_VPD
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
#include "ef10_tlv_layout.h"
@@ -74,7 +74,7 @@ ef10_vpd_init(
tag, &svpd, &svpd_size);
if (rc != 0) {
if (rc == EACCES) {
- /* Unpriviledged functions cannot access VPD */
+ /* Unprivileged functions cannot access VPD */
goto out;
}
goto fail1;
@@ -332,8 +332,11 @@ ef10_vpd_get(
/* And then from the provided data buffer */
if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag,
- evvp->evv_keyword, &offset, &length)) != 0)
+ evvp->evv_keyword, &offset, &length)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
goto fail2;
+ }
evvp->evv_length = length;
memcpy(evvp->evv_value, data + offset, length);
@@ -458,6 +461,6 @@ ef10_vpd_fini(
}
}
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
#endif /* EFSYS_OPT_VPD */
diff --git a/sys/dev/sfxge/common/efsys.h b/sys/dev/sfxge/common/efsys.h
index 14238ad..aef9a07 100644
--- a/sys/dev/sfxge/common/efsys.h
+++ b/sys/dev/sfxge/common/efsys.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2010-2015 Solarflare Communications Inc.
+ * Copyright (c) 2010-2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
@@ -236,11 +236,9 @@ sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
#define EFSYS_OPT_NAMES 1
-#define EFSYS_OPT_FALCON 0
-#define EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE 0
#define EFSYS_OPT_SIENA 1
#define EFSYS_OPT_HUNTINGTON 1
-#define EFSYS_OPT_MEDFORD 0
+#define EFSYS_OPT_MEDFORD 1
#ifdef DEBUG
#define EFSYS_OPT_CHECK_REG 1
#else
@@ -251,39 +249,22 @@ sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
#define EFSYS_OPT_MCDI_LOGGING 0
#define EFSYS_OPT_MCDI_PROXY_AUTH 0
-#define EFSYS_OPT_MAC_FALCON_GMAC 0
-#define EFSYS_OPT_MAC_FALCON_XMAC 0
#define EFSYS_OPT_MAC_STATS 1
#define EFSYS_OPT_LOOPBACK 0
-#define EFSYS_OPT_MON_NULL 0
-#define EFSYS_OPT_MON_LM87 0
-#define EFSYS_OPT_MON_MAX6647 0
#define EFSYS_OPT_MON_MCDI 0
#define EFSYS_OPT_MON_STATS 0
-#define EFSYS_OPT_PHY_NULL 0
-#define EFSYS_OPT_PHY_QT2022C2 0
-#define EFSYS_OPT_PHY_SFX7101 0
-#define EFSYS_OPT_PHY_TXC43128 0
-#define EFSYS_OPT_PHY_SFT9001 0
-#define EFSYS_OPT_PHY_QT2025C 0
#define EFSYS_OPT_PHY_STATS 1
-#define EFSYS_OPT_PHY_PROPS 0
-#define EFSYS_OPT_PHY_BIST 0
#define EFSYS_OPT_BIST 1
#define EFSYS_OPT_PHY_LED_CONTROL 1
#define EFSYS_OPT_PHY_FLAGS 0
#define EFSYS_OPT_VPD 1
#define EFSYS_OPT_NVRAM 1
-#define EFSYS_OPT_NVRAM_FALCON_BOOTROM 0
-#define EFSYS_OPT_NVRAM_SFT9001 0
-#define EFSYS_OPT_NVRAM_SFX7101 0
#define EFSYS_OPT_BOOTCFG 0
-#define EFSYS_OPT_PCIE_TUNE 0
#define EFSYS_OPT_DIAG 0
#define EFSYS_OPT_WOL 1
#define EFSYS_OPT_RX_SCALE 1
@@ -297,6 +278,8 @@ sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
#define EFSYS_OPT_LICENSING 0
+#define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
+
/* ID */
typedef struct __efsys_identifier_s efsys_identifier_t;
@@ -1129,22 +1112,6 @@ typedef struct efsys_lock_s {
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
-/* PREEMPT */
-
-#define EFSYS_PREEMPT_DISABLE(_state) \
- do { \
- (_state) = (_state); \
- critical_enter(); \
- _NOTE(CONSTANTCONDITION) \
- } while (B_FALSE)
-
-#define EFSYS_PREEMPT_ENABLE(_state) \
- do { \
- (_state) = (_state); \
- critical_exit(_state); \
- _NOTE(CONSTANTCONDITION) \
- } while (B_FALSE)
-
/* STAT */
typedef uint64_t efsys_stat_t;
diff --git a/sys/dev/sfxge/common/efx.h b/sys/dev/sfxge/common/efx.h
index 14374cf..32a3dfa 100644
--- a/sys/dev/sfxge/common/efx.h
+++ b/sys/dev/sfxge/common/efx.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2006-2015 Solarflare Communications Inc.
+ * Copyright (c) 2006-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,6 +34,7 @@
#define _SYS_EFX_H
#include "efsys.h"
+#include "efx_check.h"
#include "efx_phy_ids.h"
#ifdef __cplusplus
@@ -58,7 +59,7 @@ typedef __success(return == 0) int efx_rc_t;
typedef enum efx_family_e {
EFX_FAMILY_INVALID,
- EFX_FAMILY_FALCON,
+ EFX_FAMILY_FALCON, /* Obsolete and not supported */
EFX_FAMILY_SIENA,
EFX_FAMILY_HUNTINGTON,
EFX_FAMILY_MEDFORD,
@@ -71,10 +72,6 @@ efx_family(
__in uint16_t devid,
__out efx_family_t *efp);
-extern __checkReturn efx_rc_t
-efx_infer_family(
- __in efsys_bar_t *esbp,
- __out efx_family_t *efp);
#define EFX_PCI_VENID_SFC 0x1924
@@ -148,19 +145,6 @@ extern __checkReturn efx_rc_t
efx_nic_probe(
__in efx_nic_t *enp);
-#if EFSYS_OPT_PCIE_TUNE
-
-extern __checkReturn efx_rc_t
-efx_nic_pcie_tune(
- __in efx_nic_t *enp,
- unsigned int nlanes);
-
-extern __checkReturn efx_rc_t
-efx_nic_pcie_extended_sync(
- __in efx_nic_t *enp);
-
-#endif /* EFSYS_OPT_PCIE_TUNE */
-
extern __checkReturn efx_rc_t
efx_nic_init(
__in efx_nic_t *enp);
@@ -189,6 +173,30 @@ extern void
efx_nic_destroy(
__in efx_nic_t *enp);
+#define EFX_PCIE_LINK_SPEED_GEN1 1
+#define EFX_PCIE_LINK_SPEED_GEN2 2
+#define EFX_PCIE_LINK_SPEED_GEN3 3
+
+typedef enum efx_pcie_link_performance_e {
+ EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH,
+ EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH,
+ EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY,
+ EFX_PCIE_LINK_PERFORMANCE_OPTIMAL
+} efx_pcie_link_performance_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_calculate_pcie_link_bandwidth(
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out uint32_t *bandwidth_mbpsp);
+
+extern __checkReturn efx_rc_t
+efx_nic_check_pcie_link_speed(
+ __in efx_nic_t *enp,
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out efx_pcie_link_performance_t *resultp);
+
#if EFSYS_OPT_MCDI
#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
@@ -262,7 +270,6 @@ efx_mcdi_fini(
/* INTR */
-#define EFX_NINTR_FALCON 64
#define EFX_NINTR_SIENA 1024
typedef enum efx_intr_type_e {
@@ -435,18 +442,30 @@ typedef enum efx_link_mode_e {
#define EFX_MAC_SDU_MAX 9202
-#define EFX_MAC_PDU(_sdu) \
- P2ROUNDUP(((_sdu) \
- + /* EtherII */ 14 \
- + /* VLAN */ 4 \
- + /* CRC */ 4 \
- + /* bug16011 */ 16), \
- (1 << 3))
+#define EFX_MAC_PDU_ADJUSTMENT \
+ (/* EtherII */ 14 \
+ + /* VLAN */ 4 \
+ + /* CRC */ 4 \
+ + /* bug16011 */ 16) \
+
+#define EFX_MAC_PDU(_sdu) \
+ P2ROUNDUP((_sdu) + EFX_MAC_PDU_ADJUSTMENT, 8)
+
+/*
+ * Due to the P2ROUNDUP in EFX_MAC_PDU(), EFX_MAC_SDU_FROM_PDU() may give
+ * the SDU rounded up slightly.
+ */
+#define EFX_MAC_SDU_FROM_PDU(_pdu) ((_pdu) - EFX_MAC_PDU_ADJUSTMENT)
#define EFX_MAC_PDU_MIN 60
#define EFX_MAC_PDU_MAX EFX_MAC_PDU(EFX_MAC_SDU_MAX)
extern __checkReturn efx_rc_t
+efx_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
+extern __checkReturn efx_rc_t
efx_mac_pdu_set(
__in efx_nic_t *enp,
__in size_t pdu);
@@ -557,9 +576,6 @@ efx_mac_stats_update(
typedef enum efx_mon_type_e {
EFX_MON_INVALID = 0,
- EFX_MON_NULL,
- EFX_MON_LM87,
- EFX_MON_MAX6647,
EFX_MON_SFC90X0,
EFX_MON_SFC91X0,
EFX_MON_SFC92X0,
@@ -583,7 +599,7 @@ efx_mon_init(
#define EFX_MON_STATS_PAGE_SIZE 0x100
#define EFX_MON_MASK_ELEMENT_SIZE 32
-/* START MKCONFIG GENERATED MonitorHeaderStatsBlock c09b13f732431f23 */
+/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 5d4ee5185e419abe */
typedef enum efx_mon_stat_e {
EFX_MON_STAT_2_5V,
EFX_MON_STAT_VCCP1,
@@ -660,6 +676,8 @@ typedef enum efx_mon_stat_e {
EFX_MON_STAT_PHY0_VCC,
EFX_MON_STAT_PHY1_VCC,
EFX_MON_STAT_CONTROLLER_TDIODE_TEMP,
+ EFX_MON_STAT_BOARD_FRONT_TEMP,
+ EFX_MON_STAT_BOARD_BACK_TEMP,
EFX_MON_NSTATS
} efx_mon_stat_t;
@@ -701,15 +719,6 @@ efx_mon_fini(
/* PHY */
-#define PMA_PMD_MMD 1
-#define PCS_MMD 3
-#define PHY_XS_MMD 4
-#define DTE_XS_MMD 5
-#define AN_MMD 7
-#define CL22EXT_MMD 29
-
-#define MAXMMD ((1 << 5) - 1)
-
extern __checkReturn efx_rc_t
efx_phy_verify(
__in efx_nic_t *enp);
@@ -961,33 +970,6 @@ efx_phy_stats_update(
#endif /* EFSYS_OPT_PHY_STATS */
-#if EFSYS_OPT_PHY_PROPS
-
-#if EFSYS_OPT_NAMES
-
-extern const char *
-efx_phy_prop_name(
- __in efx_nic_t *enp,
- __in unsigned int id);
-
-#endif /* EFSYS_OPT_NAMES */
-
-#define EFX_PHY_PROP_DEFAULT 0x00000001
-
-extern __checkReturn efx_rc_t
-efx_phy_prop_get(
- __in efx_nic_t *enp,
- __in unsigned int id,
- __in uint32_t flags,
- __out uint32_t *valp);
-
-extern __checkReturn efx_rc_t
-efx_phy_prop_set(
- __in efx_nic_t *enp,
- __in unsigned int id,
- __in uint32_t val);
-
-#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_BIST
@@ -1124,9 +1106,6 @@ typedef struct efx_nic_cfg_s {
#if EFSYS_OPT_PHY_STATS
uint64_t enc_phy_stat_mask;
#endif /* EFSYS_OPT_PHY_STATS */
-#if EFSYS_OPT_PHY_PROPS
- unsigned int enc_phy_nprops;
-#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_SIENA
uint8_t enc_mcdi_mdio_channel;
#if EFSYS_OPT_PHY_STATS
@@ -1173,6 +1152,9 @@ typedef struct efx_nic_cfg_s {
uint32_t enc_mcdi_max_payload_length;
/* VPD may be per-PF or global */
boolean_t enc_vpd_is_global;
+ /* Minimum unidirectional bandwidth in Mb/s to max out all ports */
+ uint32_t enc_required_pcie_bandwidth_mbps;
+ uint32_t enc_max_pcie_link_gen;
} efx_nic_cfg_t;
#define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff)
@@ -1382,11 +1364,10 @@ efx_nvram_set_version(
__in efx_nvram_type_t type,
__in_ecount(4) uint16_t version[4]);
-/* Validate contents of TLV formatted partition */
extern __checkReturn efx_rc_t
-efx_nvram_tlv_validate(
+efx_nvram_validate(
__in efx_nic_t *enp,
- __in uint32_t partn,
+ __in efx_nvram_type_t type,
__in_bcount(partn_size) caddr_t partn_data,
__in size_t partn_size);
@@ -2345,6 +2326,10 @@ extern void
efx_lic_fini(
__in efx_nic_t *enp);
+extern __checkReturn boolean_t
+efx_lic_check_support(
+ __in efx_nic_t *enp);
+
extern __checkReturn efx_rc_t
efx_lic_update_licenses(
__in efx_nic_t *enp);
@@ -2369,6 +2354,97 @@ efx_lic_get_id(
__out_opt uint8_t *bufferp);
+extern __checkReturn efx_rc_t
+efx_lic_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
#endif /* EFSYS_OPT_LICENSING */
diff --git a/sys/dev/sfxge/common/efx_bootcfg.c b/sys/dev/sfxge/common/efx_bootcfg.c
index 16ee337..90ee1e4 100644
--- a/sys/dev/sfxge/common/efx_bootcfg.c
+++ b/sys/dev/sfxge/common/efx_bootcfg.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/efx_check.h b/sys/dev/sfxge/common/efx_check.h
index d6b7cf4..ee473c4 100644
--- a/sys/dev/sfxge/common/efx_check.h
+++ b/sys/dev/sfxge/common/efx_check.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,6 +43,10 @@
* from client code (and do not reappear in merges from other branches).
*/
+#ifdef EFSYS_OPT_FALCON
+# error "FALCON is obsolete and is not supported."
+#endif
+
/* Support NVRAM based boot config */
#if EFSYS_OPT_BOOTCFG
# if !EFSYS_OPT_NVRAM
@@ -52,47 +56,40 @@
/* Verify chip implements accessed registers */
#if EFSYS_OPT_CHECK_REG
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA || \
- EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "CHECK_REG requires FALCON or SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_CHECK_REG */
/* Decode fatal errors */
#if EFSYS_OPT_DECODE_INTR_FATAL
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA)
-# error "INTR_FATAL requires FALCON or SIENA"
+# if !EFSYS_OPT_SIENA
+# error "INTR_FATAL requires SIENA"
# endif
#endif /* EFSYS_OPT_DECODE_INTR_FATAL */
/* Support diagnostic hardware tests */
#if EFSYS_OPT_DIAG
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA || \
- EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "DIAG requires FALCON or SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "DIAG requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_DIAG */
/* Support optimized EVQ data access */
#if EFSYS_OPT_EV_PREFETCH
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA || \
- EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "EV_PREFETCH requires FALCON or SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_EV_PREFETCH */
-/* Support overriding the NVRAM and VPD configuration */
-#if EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE
-# if !EFSYS_OPT_FALCON
-# error "FALCON_NIC_CFG_OVERRIDE requires FALCON"
-# endif
-#endif /* EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE */
+#ifdef EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE
+# error "FALCON_NIC_CFG_OVERRIDE is obsolete and is not supported."
+#endif
/* Support hardware packet filters */
#if EFSYS_OPT_FILTER
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA || \
- EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "FILTER requires FALCON or SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "FILTER requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_FILTER */
@@ -104,31 +101,23 @@
/* Support hardware loopback modes */
#if EFSYS_OPT_LOOPBACK
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA || \
- EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "LOOPBACK requires FALCON or SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_LOOPBACK */
-/* Support Falcon GMAC */
-#if EFSYS_OPT_MAC_FALCON_GMAC
-# if !EFSYS_OPT_FALCON
-# error "MAC_FALCON_GMAC requires FALCON"
-# endif
-#endif /* EFSYS_OPT_MAC_FALCON_GMAC */
+#ifdef EFSYS_OPT_MAC_FALCON_GMAC
+# error "MAC_FALCON_GMAC is obsolete and is not supported."
+#endif
-/* Support Falcon XMAC */
-#if EFSYS_OPT_MAC_FALCON_XMAC
-# if !EFSYS_OPT_FALCON
-# error "MAC_FALCON_XMAC requires FALCON"
-# endif
-#endif /* EFSYS_OPT_MAC_FALCON_XMAC */
+#ifdef EFSYS_OPT_MAC_FALCON_XMAC
+# error "MAC_FALCON_XMAC is obsolete and is not supported."
+#endif
/* Support MAC statistics */
#if EFSYS_OPT_MAC_STATS
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA || \
- EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "MAC_STATS requires FALCON or SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_MAC_STATS */
@@ -159,42 +148,30 @@
# endif
#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
-/* Support LM87 monitor */
-#if EFSYS_OPT_MON_LM87
-# if !EFSYS_OPT_FALCON
-# error "MON_LM87 requires FALCON"
-# endif
-#endif /* EFSYS_OPT_MON_LM87 */
+#ifdef EFSYS_OPT_MON_LM87
+# error "MON_LM87 is obsolete and is not supported."
+#endif
-/* Support MAX6647 monitor */
-#if EFSYS_OPT_MON_MAX6647
-# if !EFSYS_OPT_FALCON
-# error "MON_MAX6647 requires FALCON"
-# endif
-#endif /* EFSYS_OPT_MON_MAX6647 */
+#ifdef EFSYS_OPT_MON_MAX6647
+# error "MON_MAX6647 is obsolete and is not supported."
+#endif
-/* Support null monitor */
-#if EFSYS_OPT_MON_NULL
-# if !EFSYS_OPT_FALCON
-# error "MON_NULL requires FALCON"
-# endif
-#endif /* EFSYS_OPT_MON_NULL */
+#ifdef EFSYS_OPT_MON_NULL
+# error "MON_NULL is obsolete and is not supported."
+#endif
-/* Obsolete option */
#ifdef EFSYS_OPT_MON_SIENA
# error "MON_SIENA is obsolete (replaced by MON_MCDI)."
-#endif /* EFSYS_OPT_MON_SIENA*/
+#endif
-/* Obsolete option */
#ifdef EFSYS_OPT_MON_HUNTINGTON
# error "MON_HUNTINGTON is obsolete (replaced by MON_MCDI)."
-#endif /* EFSYS_OPT_MON_HUNTINGTON*/
+#endif
/* Support monitor statistics (voltage/temperature) */
#if EFSYS_OPT_MON_STATS
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA || \
- EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "MON_STATS requires FALCON or SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_MON_STATS */
@@ -208,176 +185,124 @@
/* Support printable names for statistics */
#if EFSYS_OPT_NAMES
# if !(EFSYS_OPT_LOOPBACK || EFSYS_OPT_MAC_STATS || EFSYS_OPT_MCDI || \
- EFSYS_MON_STATS || EFSYS_OPT_PHY_PROPS || EFSYS_OPT_PHY_STATS || \
- EFSYS_OPT_QSTATS)
-# error "NAMES requires LOOPBACK or xxxSTATS or MCDI or PHY_PROPS"
+ EFSYS_MON_STATS || EFSYS_OPT_PHY_STATS || EFSYS_OPT_QSTATS)
+# error "NAMES requires LOOPBACK or xxxSTATS or MCDI"
# endif
#endif /* EFSYS_OPT_NAMES */
/* Support non volatile configuration */
#if EFSYS_OPT_NVRAM
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA || \
- EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "NVRAM requires FALCON or SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "NVRAM requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_NVRAM */
-/* Support Falcon bootrom */
-#if EFSYS_OPT_NVRAM_FALCON_BOOTROM
-# if !EFSYS_OPT_NVRAM
-# error "NVRAM_FALCON_BOOTROM requires NVRAM"
-# endif
-# if !EFSYS_OPT_FALCON
-# error "NVRAM_FALCON_BOOTROM requires FALCON"
-# endif
-#endif /* EFSYS_OPT_NVRAM_FALCON_BOOTROM */
+#ifdef EFSYS_OPT_NVRAM_FALCON_BOOTROM
+# error "NVRAM_FALCON_BOOTROM is obsolete and is not supported."
+#endif
-/* Support NVRAM config for SFT9001 */
-#if EFSYS_OPT_NVRAM_SFT9001
-# if !EFSYS_OPT_NVRAM
-# error "NVRAM_SFT9001 requires NVRAM"
-# endif
-# if !EFSYS_OPT_FALCON
-# error "NVRAM_SFT9001 requires FALCON"
-# endif
-#endif /* EFSYS_OPT_NVRAM_SFT9001 */
+#ifdef EFSYS_OPT_NVRAM_SFT9001
+# error "NVRAM_SFT9001 is obsolete and is not supported."
+#endif
-/* Support NVRAM config for SFX7101 */
-#if EFSYS_OPT_NVRAM_SFX7101
-# if !EFSYS_OPT_NVRAM
-# error "NVRAM_SFX7101 requires NVRAM"
-# endif
-# if !EFSYS_OPT_FALCON
-# error "NVRAM_SFX7101 requires FALCON"
-# endif
-#endif /* EFSYS_OPT_NVRAM_SFX7101 */
+#ifdef EFSYS_OPT_NVRAM_SFX7101
+# error "NVRAM_SFX7101 is obsolete and is not supported."
+#endif
-/* Support PCIe interface tuning */
-#if EFSYS_OPT_PCIE_TUNE
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA)
-# error "PCIE_TUNE requires FALCON or SIENA"
-# endif
-#endif /* EFSYS_OPT_PCIE_TUNE */
+#ifdef EFSYS_OPT_PCIE_TUNE
+# error "PCIE_TUNE is obsolete and is not supported."
+#endif
-/* Obsolete option */
-#if EFSYS_OPT_PHY_BIST
-# error "PHY_BIST is obsolete (replaced by BIST)."
-#endif /* EFSYS_OPT_PHY_BIST */
+#ifdef EFSYS_OPT_PHY_BIST
+# error "PHY_BIST is obsolete (replaced by BIST)."
+#endif
/* Support PHY flags */
#if EFSYS_OPT_PHY_FLAGS
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA)
-# error "PHY_FLAGS requires FALCON or SIENA"
+# if !EFSYS_OPT_SIENA
+# error "PHY_FLAGS requires SIENA"
# endif
#endif /* EFSYS_OPT_PHY_FLAGS */
/* Support for PHY LED control */
#if EFSYS_OPT_PHY_LED_CONTROL
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA)
-# error "PHY_LED_CONTROL requires FALCON or SIENA"
+# if !EFSYS_OPT_SIENA
+# error "PHY_LED_CONTROL requires SIENA"
# endif
#endif /* EFSYS_OPT_PHY_LED_CONTROL */
-/* Support NULL PHY */
-#if EFSYS_OPT_PHY_NULL
-# if !EFSYS_OPT_FALCON
-# error "PHY_NULL requires FALCON"
-# endif
-#endif /* EFSYS_OPT_PHY_NULL */
+#ifdef EFSYS_OPT_PHY_NULL
+# error "PHY_NULL is obsolete and is not supported."
+#endif
-/* Obsolete option */
#ifdef EFSYS_OPT_PHY_PM8358
-# error "EFSYS_OPT_PHY_PM8358 is obsolete and is not supported."
+# error "PHY_PM8358 is obsolete and is not supported."
#endif
-/* Support PHY properties */
-#if EFSYS_OPT_PHY_PROPS
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA)
-# error "PHY_PROPS requires FALCON or SIENA"
-# endif
-#endif /* EFSYS_OPT_PHY_PROPS */
+#ifdef EFSYS_OPT_PHY_PROPS
+# error "PHY_PROPS is obsolete and is not supported."
+#endif
-/* Support QT2022C2 PHY */
-#if EFSYS_OPT_PHY_QT2022C2
-# if !EFSYS_OPT_FALCON
-# error "PHY_QT2022C2 requires FALCON"
-# endif
-#endif /* EFSYS_OPT_PHY_QT2022C2 */
+#ifdef EFSYS_OPT_PHY_QT2022C2
+# error "PHY_QT2022C2 is obsolete and is not supported."
+#endif
-/* Support QT2025C PHY (Wakefield NIC) */
-#if EFSYS_OPT_PHY_QT2025C
-# if !EFSYS_OPT_FALCON
-# error "PHY_QT2025C requires FALCON"
-# endif
-#endif /* EFSYS_OPT_PHY_QT2025C */
+#ifdef EFSYS_OPT_PHY_QT2025C
+# error "PHY_QT2025C is obsolete and is not supported."
+#endif
-/* Support SFT9001 PHY (Starbolt NIC) */
-#if EFSYS_OPT_PHY_SFT9001
-# if !EFSYS_OPT_FALCON
-# error "PHY_SFT9001 requires FALCON"
-# endif
-#endif /* EFSYS_OPT_PHY_SFT9001 */
+#ifdef EFSYS_OPT_PHY_SFT9001
+# error "PHY_SFT9001 is obsolete and is not supported."
+#endif
-/* Support SFX7101 PHY (SFE4001 NIC) */
-#if EFSYS_OPT_PHY_SFX7101
-# if !EFSYS_OPT_FALCON
-# error "PHY_SFX7101 requires FALCON"
-# endif
-#endif /* EFSYS_OPT_PHY_SFX7101 */
+#ifdef EFSYS_OPT_PHY_SFX7101
+# error "PHY_SFX7101 is obsolete and is not supported."
+#endif
/* Support PHY statistics */
#if EFSYS_OPT_PHY_STATS
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA)
-# error "PHY_STATS requires FALCON or SIENA"
+# if !EFSYS_OPT_SIENA
+# error "PHY_STATS requires SIENA"
# endif
#endif /* EFSYS_OPT_PHY_STATS */
-/* Support TXC43128 PHY (SFE4003 NIC) */
-#if EFSYS_OPT_PHY_TXC43128
-# if !EFSYS_OPT_FALCON
-# error "PHY_TXC43128 requires FALCON"
-# endif
-#endif /* EFSYS_OPT_PHY_TXC43128 */
+#ifdef EFSYS_OPT_PHY_TXC43128
+# error "PHY_TXC43128 is obsolete and is not supported."
+#endif
/* Support EVQ/RXQ/TXQ statistics */
#if EFSYS_OPT_QSTATS
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA || \
- EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "QSTATS requires FALCON or SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "QSTATS requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_QSTATS */
-/* Obsolete option */
#ifdef EFSYS_OPT_RX_HDR_SPLIT
# error "RX_HDR_SPLIT is obsolete and is not supported"
-#endif /* EFSYS_OPT_RX_HDR_SPLIT */
+#endif
/* Support receive scaling (RSS) */
#if EFSYS_OPT_RX_SCALE
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA || \
- EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "RX_SCALE requires FALCON or SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_RX_SCALE */
/* Support receive scatter DMA */
#if EFSYS_OPT_RX_SCATTER
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA || \
- EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "RX_SCATTER requires FALCON or SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_RX_SCATTER */
-/* Obsolete option */
#ifdef EFSYS_OPT_STAT_NAME
# error "STAT_NAME is obsolete (replaced by NAMES)."
#endif
/* Support PCI Vital Product Data (VPD) */
#if EFSYS_OPT_VPD
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA || \
- EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "VPD requires FALCON or SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "VPD requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_VPD */
@@ -388,16 +313,14 @@
# endif
#endif /* EFSYS_OPT_WOL */
-/* Obsolete option */
#ifdef EFSYS_OPT_MCAST_FILTER_LIST
# error "MCAST_FILTER_LIST is obsolete and is not supported"
-#endif /* EFSYS_OPT_MCAST_FILTER_LIST */
+#endif
/* Support BIST */
#if EFSYS_OPT_BIST
-# if !(EFSYS_OPT_FALCON || EFSYS_OPT_SIENA || \
- EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "BIST requires FALCON or SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "BIST requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_BIST */
@@ -411,5 +334,12 @@
# endif
#endif /* EFSYS_OPT_LICENSING */
+/* Support adapters with missing static config (for factory use only) */
+#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
+# if !EFSYS_OPT_MEDFORD
+# error "ALLOW_UNCONFIGURED_NIC requires MEDFORD"
+# endif
+#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+
#endif /* _SYS_EFX_CHECK_H */
diff --git a/sys/dev/sfxge/common/efx_crc32.c b/sys/dev/sfxge/common/efx_crc32.c
index 23d77ae..779fa63 100644
--- a/sys/dev/sfxge/common/efx_crc32.c
+++ b/sys/dev/sfxge/common/efx_crc32.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2013-2015 Solarflare Communications Inc.
+ * Copyright (c) 2013-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/efx_ev.c b/sys/dev/sfxge/common/efx_ev.c
index 037dbfa..5e3bc40 100644
--- a/sys/dev/sfxge/common/efx_ev.c
+++ b/sys/dev/sfxge/common/efx_ev.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -53,18 +53,18 @@ __FBSDID("$FreeBSD$");
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
+#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_ev_init(
+siena_ev_init(
__in efx_nic_t *enp);
static void
-falconsiena_ev_fini(
+siena_ev_fini(
__in efx_nic_t *enp);
static __checkReturn efx_rc_t
-falconsiena_ev_qcreate(
+siena_ev_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in efsys_mem_t *esmp,
@@ -73,73 +73,58 @@ falconsiena_ev_qcreate(
__in efx_evq_t *eep);
static void
-falconsiena_ev_qdestroy(
+siena_ev_qdestroy(
__in efx_evq_t *eep);
static __checkReturn efx_rc_t
-falconsiena_ev_qprime(
+siena_ev_qprime(
__in efx_evq_t *eep,
__in unsigned int count);
static void
-falconsiena_ev_qpoll(
+siena_ev_qpoll(
__in efx_evq_t *eep,
__inout unsigned int *countp,
__in const efx_ev_callbacks_t *eecp,
__in_opt void *arg);
static void
-falconsiena_ev_qpost(
+siena_ev_qpost(
__in efx_evq_t *eep,
__in uint16_t data);
static __checkReturn efx_rc_t
-falconsiena_ev_qmoderate(
+siena_ev_qmoderate(
__in efx_evq_t *eep,
__in unsigned int us);
#if EFSYS_OPT_QSTATS
static void
-falconsiena_ev_qstats_update(
+siena_ev_qstats_update(
__in efx_evq_t *eep,
__inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
#endif
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
-
-#if EFSYS_OPT_FALCON
-static efx_ev_ops_t __efx_ev_falcon_ops = {
- falconsiena_ev_init, /* eevo_init */
- falconsiena_ev_fini, /* eevo_fini */
- falconsiena_ev_qcreate, /* eevo_qcreate */
- falconsiena_ev_qdestroy, /* eevo_qdestroy */
- falconsiena_ev_qprime, /* eevo_qprime */
- falconsiena_ev_qpost, /* eevo_qpost */
- falconsiena_ev_qmoderate, /* eevo_qmoderate */
-#if EFSYS_OPT_QSTATS
- falconsiena_ev_qstats_update, /* eevo_qstats_update */
-#endif
-};
-#endif /* EFSYS_OPT_FALCON */
+#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_SIENA
-static efx_ev_ops_t __efx_ev_siena_ops = {
- falconsiena_ev_init, /* eevo_init */
- falconsiena_ev_fini, /* eevo_fini */
- falconsiena_ev_qcreate, /* eevo_qcreate */
- falconsiena_ev_qdestroy, /* eevo_qdestroy */
- falconsiena_ev_qprime, /* eevo_qprime */
- falconsiena_ev_qpost, /* eevo_qpost */
- falconsiena_ev_qmoderate, /* eevo_qmoderate */
+static const efx_ev_ops_t __efx_ev_siena_ops = {
+ siena_ev_init, /* eevo_init */
+ siena_ev_fini, /* eevo_fini */
+ siena_ev_qcreate, /* eevo_qcreate */
+ siena_ev_qdestroy, /* eevo_qdestroy */
+ siena_ev_qprime, /* eevo_qprime */
+ siena_ev_qpost, /* eevo_qpost */
+ siena_ev_qmoderate, /* eevo_qmoderate */
#if EFSYS_OPT_QSTATS
- falconsiena_ev_qstats_update, /* eevo_qstats_update */
+ siena_ev_qstats_update, /* eevo_qstats_update */
#endif
};
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
-static efx_ev_ops_t __efx_ev_ef10_ops = {
+static const efx_ev_ops_t __efx_ev_ef10_ops = {
ef10_ev_init, /* eevo_init */
ef10_ev_fini, /* eevo_fini */
ef10_ev_qcreate, /* eevo_qcreate */
@@ -158,7 +143,7 @@ static efx_ev_ops_t __efx_ev_ef10_ops = {
efx_ev_init(
__in efx_nic_t *enp)
{
- efx_ev_ops_t *eevop;
+ const efx_ev_ops_t *eevop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -170,27 +155,21 @@ efx_ev_init(
}
switch (enp->en_family) {
-#if EFSYS_OPT_FALCON
- case EFX_FAMILY_FALCON:
- eevop = (efx_ev_ops_t *)&__efx_ev_falcon_ops;
- break;
-#endif /* EFSYS_OPT_FALCON */
-
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
- eevop = (efx_ev_ops_t *)&__efx_ev_siena_ops;
+ eevop = &__efx_ev_siena_ops;
break;
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON
case EFX_FAMILY_HUNTINGTON:
- eevop = (efx_ev_ops_t *)&__efx_ev_ef10_ops;
+ eevop = &__efx_ev_ef10_ops;
break;
#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
- eevop = (efx_ev_ops_t *)&__efx_ev_ef10_ops;
+ eevop = &__efx_ev_ef10_ops;
break;
#endif /* EFSYS_OPT_MEDFORD */
@@ -224,7 +203,7 @@ fail1:
efx_ev_fini(
__in efx_nic_t *enp)
{
- efx_ev_ops_t *eevop = enp->en_eevop;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
@@ -249,7 +228,7 @@ efx_ev_qcreate(
__in uint32_t id,
__deref_out efx_evq_t **eepp)
{
- efx_ev_ops_t *eevop = enp->en_eevop;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_evq_t *eep;
efx_rc_t rc;
@@ -272,16 +251,27 @@ efx_ev_qcreate(
eep->ee_mask = n - 1;
eep->ee_esmp = esmp;
- if ((rc = eevop->eevo_qcreate(enp, index, esmp, n, id, eep)) != 0)
- goto fail2;
-
+ /*
+ * Set outputs before the queue is created because interrupts may be
+ * raised for events immediately after the queue is created, before the
+ * function call below returns. See bug58606.
+ *
+ * The eepp pointer passed in by the client must therefore point to data
+ * shared with the client's event processing context.
+ */
enp->en_ev_qcount++;
*eepp = eep;
+ if ((rc = eevop->eevo_qcreate(enp, index, esmp, n, id, eep)) != 0)
+ goto fail2;
+
return (0);
fail2:
EFSYS_PROBE(fail2);
+
+ *eepp = NULL;
+ enp->en_ev_qcount--;
EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -293,7 +283,7 @@ efx_ev_qdestroy(
__in efx_evq_t *eep)
{
efx_nic_t *enp = eep->ee_enp;
- efx_ev_ops_t *eevop = enp->en_eevop;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
@@ -312,7 +302,7 @@ efx_ev_qprime(
__in unsigned int count)
{
efx_nic_t *enp = eep->ee_enp;
- efx_ev_ops_t *eevop = enp->en_eevop;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
efx_rc_t rc;
EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
@@ -395,7 +385,7 @@ efx_ev_qpoll(
EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
#endif
- falconsiena_ev_qpoll(eep, countp, eecp, arg);
+ siena_ev_qpoll(eep, countp, eecp, arg);
}
void
@@ -404,7 +394,7 @@ efx_ev_qpost(
__in uint16_t data)
{
efx_nic_t *enp = eep->ee_enp;
- efx_ev_ops_t *eevop = enp->en_eevop;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
@@ -420,7 +410,7 @@ efx_ev_qmoderate(
__in unsigned int us)
{
efx_nic_t *enp = eep->ee_enp;
- efx_ev_ops_t *eevop = enp->en_eevop;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
efx_rc_t rc;
EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
@@ -442,7 +432,7 @@ efx_ev_qstats_update(
__inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
{ efx_nic_t *enp = eep->ee_enp;
- efx_ev_ops_t *eevop = enp->en_eevop;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
@@ -451,10 +441,10 @@ efx_ev_qstats_update(
#endif /* EFSYS_OPT_QSTATS */
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
+#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_ev_init(
+siena_ev_init(
__in efx_nic_t *enp)
{
efx_oword_t oword;
@@ -472,7 +462,7 @@ falconsiena_ev_init(
}
static __checkReturn boolean_t
-falconsiena_ev_rx_not_ok(
+siena_ev_rx_not_ok(
__in efx_evq_t *eep,
__in efx_qword_t *eqp,
__in uint32_t label,
@@ -562,13 +552,12 @@ falconsiena_ev_rx_not_ok(
}
static __checkReturn boolean_t
-falconsiena_ev_rx(
+siena_ev_rx(
__in efx_evq_t *eep,
__in efx_qword_t *eqp,
__in const efx_ev_callbacks_t *eecp,
__in_opt void *arg)
{
- efx_nic_t *enp = eep->ee_enp;
uint32_t id;
uint32_t size;
uint32_t label;
@@ -598,8 +587,7 @@ falconsiena_ev_rx(
hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
- is_v6 = (enp->en_family != EFX_FAMILY_FALCON &&
- EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
+ is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
/*
* If packet is marked as OK and packet type is TCP/IP or
@@ -659,7 +647,7 @@ falconsiena_ev_rx(
/* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
if (!ok) {
- ignore = falconsiena_ev_rx_not_ok(eep, eqp, label, id, &flags);
+ ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
if (ignore) {
EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
uint32_t, size, uint16_t, flags);
@@ -692,7 +680,7 @@ falconsiena_ev_rx(
* (which clears PKT_OK). If this is set, then don't trust
* the PKT_TYPE field.
*/
- if (enp->en_family != EFX_FAMILY_FALCON && !ok) {
+ if (!ok) {
uint32_t parse_err;
parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
@@ -718,7 +706,7 @@ falconsiena_ev_rx(
}
static __checkReturn boolean_t
-falconsiena_ev_tx(
+siena_ev_tx(
__in efx_evq_t *eep,
__in efx_qword_t *eqp,
__in const efx_ev_callbacks_t *eecp,
@@ -765,33 +753,21 @@ falconsiena_ev_tx(
}
static __checkReturn boolean_t
-falconsiena_ev_global(
+siena_ev_global(
__in efx_evq_t *eep,
__in efx_qword_t *eqp,
__in const efx_ev_callbacks_t *eecp,
__in_opt void *arg)
{
- efx_nic_t *enp = eep->ee_enp;
- efx_port_t *epp = &(enp->en_port);
- boolean_t should_abort;
+ _NOTE(ARGUNUSED(eqp, eecp, arg))
EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
- should_abort = B_FALSE;
-
- /* Check for a link management event */
- if (EFX_QWORD_FIELD(*eqp, FSF_BZ_GLB_EV_XG_MNT_INTR) != 0) {
- EFX_EV_QSTAT_INCR(eep, EV_GLOBAL_MNT);
- EFSYS_PROBE(xg_mgt);
-
- epp->ep_mac_poll_needed = B_TRUE;
- }
-
- return (should_abort);
+ return (B_FALSE);
}
static __checkReturn boolean_t
-falconsiena_ev_driver(
+siena_ev_driver(
__in efx_evq_t *eep,
__in efx_qword_t *eqp,
__in const efx_ev_callbacks_t *eecp,
@@ -920,7 +896,7 @@ falconsiena_ev_driver(
}
static __checkReturn boolean_t
-falconsiena_ev_drv_gen(
+siena_ev_drv_gen(
__in efx_evq_t *eep,
__in efx_qword_t *eqp,
__in const efx_ev_callbacks_t *eecp,
@@ -948,7 +924,7 @@ falconsiena_ev_drv_gen(
#if EFSYS_OPT_MCDI
static __checkReturn boolean_t
-falconsiena_ev_mcdi(
+siena_ev_mcdi(
__in efx_evq_t *eep,
__in efx_qword_t *eqp,
__in const efx_ev_callbacks_t *eecp,
@@ -1053,7 +1029,7 @@ out:
#endif /* EFSYS_OPT_MCDI */
static __checkReturn efx_rc_t
-falconsiena_ev_qprime(
+siena_ev_qprime(
__in efx_evq_t *eep,
__in unsigned int count)
{
@@ -1074,7 +1050,7 @@ falconsiena_ev_qprime(
#define EFX_EV_BATCH 8
static void
-falconsiena_ev_qpoll(
+siena_ev_qpoll(
__in efx_evq_t *eep,
__inout unsigned int *countp,
__in const efx_ev_callbacks_t *eecp,
@@ -1207,7 +1183,7 @@ falconsiena_ev_qpoll(
}
static void
-falconsiena_ev_qpost(
+siena_ev_qpost(
__in efx_evq_t *eep,
__in uint16_t data)
{
@@ -1226,7 +1202,7 @@ falconsiena_ev_qpost(
}
static __checkReturn efx_rc_t
-falconsiena_ev_qmoderate(
+siena_ev_qmoderate(
__in efx_evq_t *eep,
__in unsigned int us)
{
@@ -1243,14 +1219,9 @@ falconsiena_ev_qmoderate(
/* If the value is zero then disable the timer */
if (us == 0) {
- if (enp->en_family == EFX_FAMILY_FALCON)
- EFX_POPULATE_DWORD_2(dword,
- FRF_AB_TC_TIMER_MODE, FFE_AB_TIMER_MODE_DIS,
- FRF_AB_TC_TIMER_VAL, 0);
- else
- EFX_POPULATE_DWORD_2(dword,
- FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
- FRF_CZ_TC_TIMER_VAL, 0);
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
+ FRF_CZ_TC_TIMER_VAL, 0);
} else {
uint32_t timer_val;
@@ -1261,14 +1232,9 @@ falconsiena_ev_qmoderate(
if (timer_val > 0)
timer_val--;
- if (enp->en_family == EFX_FAMILY_FALCON)
- EFX_POPULATE_DWORD_2(dword,
- FRF_AB_TC_TIMER_MODE, FFE_AB_TIMER_MODE_INT_HLDOFF,
- FRF_AB_TIMER_VAL, timer_val);
- else
- EFX_POPULATE_DWORD_2(dword,
- FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
- FRF_CZ_TC_TIMER_VAL, timer_val);
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
+ FRF_CZ_TC_TIMER_VAL, timer_val);
}
locked = (eep->ee_index == 0) ? 1 : 0;
@@ -1285,7 +1251,7 @@ fail1:
}
static __checkReturn efx_rc_t
-falconsiena_ev_qcreate(
+siena_ev_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in efsys_mem_t *esmp,
@@ -1298,6 +1264,8 @@ falconsiena_ev_qcreate(
efx_oword_t oword;
efx_rc_t rc;
+ _NOTE(ARGUNUSED(esmp))
+
EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
@@ -1326,20 +1294,18 @@ falconsiena_ev_qcreate(
}
/* Set up the handler table */
- eep->ee_rx = falconsiena_ev_rx;
- eep->ee_tx = falconsiena_ev_tx;
- eep->ee_driver = falconsiena_ev_driver;
- eep->ee_global = falconsiena_ev_global;
- eep->ee_drv_gen = falconsiena_ev_drv_gen;
+ eep->ee_rx = siena_ev_rx;
+ eep->ee_tx = siena_ev_tx;
+ eep->ee_driver = siena_ev_driver;
+ eep->ee_global = siena_ev_global;
+ eep->ee_drv_gen = siena_ev_drv_gen;
#if EFSYS_OPT_MCDI
- eep->ee_mcdi = falconsiena_ev_mcdi;
+ eep->ee_mcdi = siena_ev_mcdi;
#endif /* EFSYS_OPT_MCDI */
/* Set up the new event queue */
- if (enp->en_family != EFX_FAMILY_FALCON) {
- EFX_POPULATE_OWORD_1(oword, FRF_CZ_TIMER_Q_EN, 1);
- EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
- }
+ EFX_POPULATE_OWORD_1(oword, FRF_CZ_TIMER_Q_EN, 1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
FRF_AZ_EVQ_BUF_BASE_ID, id);
@@ -1362,7 +1328,7 @@ fail1:
return (rc);
}
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
+#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_QSTATS
#if EFSYS_OPT_NAMES
@@ -1421,11 +1387,11 @@ efx_ev_qstat_name(
#endif /* EFSYS_OPT_NAMES */
#endif /* EFSYS_OPT_QSTATS */
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
+#if EFSYS_OPT_SIENA
#if EFSYS_OPT_QSTATS
static void
-falconsiena_ev_qstats_update(
+siena_ev_qstats_update(
__in efx_evq_t *eep,
__inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
{
@@ -1441,7 +1407,7 @@ falconsiena_ev_qstats_update(
#endif /* EFSYS_OPT_QSTATS */
static void
-falconsiena_ev_qdestroy(
+siena_ev_qdestroy(
__in efx_evq_t *eep)
{
efx_nic_t *enp = eep->ee_enp;
@@ -1453,18 +1419,15 @@ falconsiena_ev_qdestroy(
EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
eep->ee_index, &oword, B_TRUE);
- if (enp->en_family != EFX_FAMILY_FALCON) {
- EFX_ZERO_OWORD(oword);
- EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL,
- eep->ee_index, &oword, B_TRUE);
- }
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
}
static void
-falconsiena_ev_fini(
+siena_ev_fini(
__in efx_nic_t *enp)
{
_NOTE(ARGUNUSED(enp))
}
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
+#endif /* EFSYS_OPT_SIENA */
diff --git a/sys/dev/sfxge/common/efx_filter.c b/sys/dev/sfxge/common/efx_filter.c
index a4b59cc..242a772 100644
--- a/sys/dev/sfxge/common/efx_filter.c
+++ b/sys/dev/sfxge/common/efx_filter.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,65 +37,53 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_FILTER
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
+#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_filter_init(
+siena_filter_init(
__in efx_nic_t *enp);
static void
-falconsiena_filter_fini(
+siena_filter_fini(
__in efx_nic_t *enp);
static __checkReturn efx_rc_t
-falconsiena_filter_restore(
+siena_filter_restore(
__in efx_nic_t *enp);
static __checkReturn efx_rc_t
-falconsiena_filter_add(
+siena_filter_add(
__in efx_nic_t *enp,
__inout efx_filter_spec_t *spec,
__in boolean_t may_replace);
static __checkReturn efx_rc_t
-falconsiena_filter_delete(
+siena_filter_delete(
__in efx_nic_t *enp,
__inout efx_filter_spec_t *spec);
static __checkReturn efx_rc_t
-falconsiena_filter_supported_filters(
+siena_filter_supported_filters(
__in efx_nic_t *enp,
__out uint32_t *list,
__out size_t *length);
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
-
-#if EFSYS_OPT_FALCON
-static efx_filter_ops_t __efx_filter_falcon_ops = {
- falconsiena_filter_init, /* efo_init */
- falconsiena_filter_fini, /* efo_fini */
- falconsiena_filter_restore, /* efo_restore */
- falconsiena_filter_add, /* efo_add */
- falconsiena_filter_delete, /* efo_delete */
- falconsiena_filter_supported_filters, /* efo_supported_filters */
- NULL, /* efo_reconfigure */
-};
-#endif /* EFSYS_OPT_FALCON */
+#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_SIENA
-static efx_filter_ops_t __efx_filter_siena_ops = {
- falconsiena_filter_init, /* efo_init */
- falconsiena_filter_fini, /* efo_fini */
- falconsiena_filter_restore, /* efo_restore */
- falconsiena_filter_add, /* efo_add */
- falconsiena_filter_delete, /* efo_delete */
- falconsiena_filter_supported_filters, /* efo_supported_filters */
- NULL, /* efo_reconfigure */
+static const efx_filter_ops_t __efx_filter_siena_ops = {
+ siena_filter_init, /* efo_init */
+ siena_filter_fini, /* efo_fini */
+ siena_filter_restore, /* efo_restore */
+ siena_filter_add, /* efo_add */
+ siena_filter_delete, /* efo_delete */
+ siena_filter_supported_filters, /* efo_supported_filters */
+ NULL, /* efo_reconfigure */
};
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
-static efx_filter_ops_t __efx_filter_ef10_ops = {
+static const efx_filter_ops_t __efx_filter_ef10_ops = {
ef10_filter_init, /* efo_init */
ef10_filter_fini, /* efo_fini */
ef10_filter_restore, /* efo_restore */
@@ -111,7 +99,7 @@ efx_filter_insert(
__in efx_nic_t *enp,
__inout efx_filter_spec_t *spec)
{
- efx_filter_ops_t *efop = enp->en_efop;
+ const efx_filter_ops_t *efop = enp->en_efop;
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
EFSYS_ASSERT3P(spec, !=, NULL);
@@ -125,7 +113,7 @@ efx_filter_remove(
__in efx_nic_t *enp,
__inout efx_filter_spec_t *spec)
{
- efx_filter_ops_t *efop = enp->en_efop;
+ const efx_filter_ops_t *efop = enp->en_efop;
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
EFSYS_ASSERT3P(spec, !=, NULL);
@@ -161,7 +149,7 @@ fail1:
efx_filter_init(
__in efx_nic_t *enp)
{
- efx_filter_ops_t *efop;
+ const efx_filter_ops_t *efop;
efx_rc_t rc;
/* Check that efx_filter_spec_t is 64 bytes. */
@@ -172,27 +160,21 @@ efx_filter_init(
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_FILTER));
switch (enp->en_family) {
-#if EFSYS_OPT_FALCON
- case EFX_FAMILY_FALCON:
- efop = (efx_filter_ops_t *)&__efx_filter_falcon_ops;
- break;
-#endif /* EFSYS_OPT_FALCON */
-
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
- efop = (efx_filter_ops_t *)&__efx_filter_siena_ops;
+ efop = &__efx_filter_siena_ops;
break;
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON
case EFX_FAMILY_HUNTINGTON:
- efop = (efx_filter_ops_t *)&__efx_filter_ef10_ops;
+ efop = &__efx_filter_ef10_ops;
break;
#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
- efop = (efx_filter_ops_t *)&__efx_filter_ef10_ops;
+ efop = &__efx_filter_ef10_ops;
break;
#endif /* EFSYS_OPT_MEDFORD */
@@ -266,7 +248,7 @@ efx_filter_reconfigure(
__in boolean_t all_mulcst,
__in boolean_t brdcst,
__in_ecount(6*count) uint8_t const *addrs,
- __in int count)
+ __in uint32_t count)
{
efx_rc_t rc;
@@ -428,7 +410,7 @@ efx_filter_spec_set_mc_def(
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
+#if EFSYS_OPT_SIENA
/*
* "Fudge factors" - difference between programmed value and actual depth.
@@ -446,8 +428,8 @@ efx_filter_spec_set_mc_def(
#define FILTER_CTL_SRCH_MAX 200
static __checkReturn efx_rc_t
-falconsiena_filter_spec_from_gen_spec(
- __out falconsiena_filter_spec_t *fs_spec,
+siena_filter_spec_from_gen_spec(
+ __out siena_filter_spec_t *sf_spec,
__in efx_filter_spec_t *gen_spec)
{
efx_rc_t rc;
@@ -465,8 +447,8 @@ falconsiena_filter_spec_from_gen_spec(
goto fail1;
}
- fs_spec->fsfs_flags = gen_spec->efs_flags;
- fs_spec->fsfs_dmaq_id = gen_spec->efs_dmaq_id;
+ sf_spec->sfs_flags = gen_spec->efs_flags;
+ sf_spec->sfs_dmaq_id = gen_spec->efs_dmaq_id;
switch (gen_spec->efs_match_flags) {
case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
@@ -491,24 +473,24 @@ falconsiena_filter_spec_from_gen_spec(
switch (gen_spec->efs_ip_proto) {
case EFX_IPPROTO_TCP:
if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
- fs_spec->fsfs_type = (is_full ?
- EFX_FS_FILTER_TX_TCP_FULL :
- EFX_FS_FILTER_TX_TCP_WILD);
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_TCP_FULL :
+ EFX_SIENA_FILTER_TX_TCP_WILD);
} else {
- fs_spec->fsfs_type = (is_full ?
- EFX_FS_FILTER_RX_TCP_FULL :
- EFX_FS_FILTER_RX_TCP_WILD);
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_TCP_FULL :
+ EFX_SIENA_FILTER_RX_TCP_WILD);
}
break;
case EFX_IPPROTO_UDP:
if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
- fs_spec->fsfs_type = (is_full ?
- EFX_FS_FILTER_TX_UDP_FULL :
- EFX_FS_FILTER_TX_UDP_WILD);
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_UDP_FULL :
+ EFX_SIENA_FILTER_TX_UDP_WILD);
} else {
- fs_spec->fsfs_type = (is_full ?
- EFX_FS_FILTER_RX_UDP_FULL :
- EFX_FS_FILTER_RX_UDP_WILD);
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_UDP_FULL :
+ EFX_SIENA_FILTER_RX_UDP_WILD);
}
break;
default:
@@ -531,7 +513,8 @@ falconsiena_filter_spec_from_gen_spec(
host2 = gen_spec->efs_loc_host.eo_u32[0];
}
if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
- if (fs_spec->fsfs_type == EFX_FS_FILTER_TX_UDP_WILD) {
+ if (sf_spec->sfs_type ==
+ EFX_SIENA_FILTER_TX_UDP_WILD) {
port1 = rport;
port2 = gen_spec->efs_loc_port;
} else {
@@ -539,7 +522,8 @@ falconsiena_filter_spec_from_gen_spec(
port2 = rport;
}
} else {
- if (fs_spec->fsfs_type == EFX_FS_FILTER_RX_UDP_WILD) {
+ if (sf_spec->sfs_type ==
+ EFX_SIENA_FILTER_RX_UDP_WILD) {
port1 = gen_spec->efs_loc_port;
port2 = rport;
} else {
@@ -547,9 +531,9 @@ falconsiena_filter_spec_from_gen_spec(
port2 = gen_spec->efs_loc_port;
}
}
- fs_spec->fsfs_dword[0] = (host1 << 16) | port1;
- fs_spec->fsfs_dword[1] = (port2 << 16) | (host1 >> 16);
- fs_spec->fsfs_dword[2] = host2;
+ sf_spec->sfs_dword[0] = (host1 << 16) | port1;
+ sf_spec->sfs_dword[1] = (port2 << 16) | (host1 >> 16);
+ sf_spec->sfs_dword[2] = host2;
break;
}
@@ -558,21 +542,21 @@ falconsiena_filter_spec_from_gen_spec(
/* Fall through */
case EFX_FILTER_MATCH_LOC_MAC:
if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
- fs_spec->fsfs_type = (is_full ?
- EFX_FS_FILTER_TX_MAC_FULL :
- EFX_FS_FILTER_TX_MAC_WILD);
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_MAC_FULL :
+ EFX_SIENA_FILTER_TX_MAC_WILD);
} else {
- fs_spec->fsfs_type = (is_full ?
- EFX_FS_FILTER_RX_MAC_FULL :
- EFX_FS_FILTER_RX_MAC_WILD);
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_MAC_FULL :
+ EFX_SIENA_FILTER_RX_MAC_WILD);
}
- fs_spec->fsfs_dword[0] = is_full ? gen_spec->efs_outer_vid : 0;
- fs_spec->fsfs_dword[1] =
+ sf_spec->sfs_dword[0] = is_full ? gen_spec->efs_outer_vid : 0;
+ sf_spec->sfs_dword[1] =
gen_spec->efs_loc_mac[2] << 24 |
gen_spec->efs_loc_mac[3] << 16 |
gen_spec->efs_loc_mac[4] << 8 |
gen_spec->efs_loc_mac[5];
- fs_spec->fsfs_dword[2] =
+ sf_spec->sfs_dword[2] =
gen_spec->efs_loc_mac[0] << 8 |
gen_spec->efs_loc_mac[1];
break;
@@ -604,7 +588,7 @@ fail1:
* key derived from the n-tuple.
*/
static uint16_t
-falconsiena_filter_tbl_hash(
+siena_filter_tbl_hash(
__in uint32_t key)
{
uint16_t tmp;
@@ -627,116 +611,112 @@ falconsiena_filter_tbl_hash(
* increments from the first possible entry selected by the hash.
*/
static uint16_t
-falconsiena_filter_tbl_increment(
+siena_filter_tbl_increment(
__in uint32_t key)
{
return ((uint16_t)(key * 2 - 1));
}
static __checkReturn boolean_t
-falconsiena_filter_test_used(
- __in falconsiena_filter_tbl_t *fsftp,
+siena_filter_test_used(
+ __in siena_filter_tbl_t *sftp,
__in unsigned int index)
{
- EFSYS_ASSERT3P(fsftp->fsft_bitmap, !=, NULL);
- return ((fsftp->fsft_bitmap[index / 32] & (1 << (index % 32))) != 0);
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ return ((sftp->sft_bitmap[index / 32] & (1 << (index % 32))) != 0);
}
static void
-falconsiena_filter_set_used(
- __in falconsiena_filter_tbl_t *fsftp,
+siena_filter_set_used(
+ __in siena_filter_tbl_t *sftp,
__in unsigned int index)
{
- EFSYS_ASSERT3P(fsftp->fsft_bitmap, !=, NULL);
- fsftp->fsft_bitmap[index / 32] |= (1 << (index % 32));
- ++fsftp->fsft_used;
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ sftp->sft_bitmap[index / 32] |= (1 << (index % 32));
+ ++sftp->sft_used;
}
static void
-falconsiena_filter_clear_used(
- __in falconsiena_filter_tbl_t *fsftp,
+siena_filter_clear_used(
+ __in siena_filter_tbl_t *sftp,
__in unsigned int index)
{
- EFSYS_ASSERT3P(fsftp->fsft_bitmap, !=, NULL);
- fsftp->fsft_bitmap[index / 32] &= ~(1 << (index % 32));
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ sftp->sft_bitmap[index / 32] &= ~(1 << (index % 32));
- --fsftp->fsft_used;
- EFSYS_ASSERT3U(fsftp->fsft_used, >=, 0);
+ --sftp->sft_used;
+ EFSYS_ASSERT3U(sftp->sft_used, >=, 0);
}
-static falconsiena_filter_tbl_id_t
-falconsiena_filter_tbl_id(
- __in falconsiena_filter_type_t type)
+static siena_filter_tbl_id_t
+siena_filter_tbl_id(
+ __in siena_filter_type_t type)
{
- falconsiena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_id_t tbl_id;
switch (type) {
- case EFX_FS_FILTER_RX_TCP_FULL:
- case EFX_FS_FILTER_RX_TCP_WILD:
- case EFX_FS_FILTER_RX_UDP_FULL:
- case EFX_FS_FILTER_RX_UDP_WILD:
- tbl_id = EFX_FS_FILTER_TBL_RX_IP;
+ case EFX_SIENA_FILTER_RX_TCP_FULL:
+ case EFX_SIENA_FILTER_RX_TCP_WILD:
+ case EFX_SIENA_FILTER_RX_UDP_FULL:
+ case EFX_SIENA_FILTER_RX_UDP_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_RX_IP;
break;
-#if EFSYS_OPT_SIENA
- case EFX_FS_FILTER_RX_MAC_FULL:
- case EFX_FS_FILTER_RX_MAC_WILD:
- tbl_id = EFX_FS_FILTER_TBL_RX_MAC;
+ case EFX_SIENA_FILTER_RX_MAC_FULL:
+ case EFX_SIENA_FILTER_RX_MAC_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_RX_MAC;
break;
- case EFX_FS_FILTER_TX_TCP_FULL:
- case EFX_FS_FILTER_TX_TCP_WILD:
- case EFX_FS_FILTER_TX_UDP_FULL:
- case EFX_FS_FILTER_TX_UDP_WILD:
- tbl_id = EFX_FS_FILTER_TBL_TX_IP;
+ case EFX_SIENA_FILTER_TX_TCP_FULL:
+ case EFX_SIENA_FILTER_TX_TCP_WILD:
+ case EFX_SIENA_FILTER_TX_UDP_FULL:
+ case EFX_SIENA_FILTER_TX_UDP_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_TX_IP;
break;
- case EFX_FS_FILTER_TX_MAC_FULL:
- case EFX_FS_FILTER_TX_MAC_WILD:
- tbl_id = EFX_FS_FILTER_TBL_TX_MAC;
+ case EFX_SIENA_FILTER_TX_MAC_FULL:
+ case EFX_SIENA_FILTER_TX_MAC_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_TX_MAC;
break;
-#endif /* EFSYS_OPT_SIENA */
default:
EFSYS_ASSERT(B_FALSE);
- tbl_id = EFX_FS_FILTER_NTBLS;
+ tbl_id = EFX_SIENA_FILTER_NTBLS;
break;
}
return (tbl_id);
}
static void
-falconsiena_filter_reset_search_depth(
- __inout falconsiena_filter_t *fsfp,
- __in falconsiena_filter_tbl_id_t tbl_id)
+siena_filter_reset_search_depth(
+ __inout siena_filter_t *sfp,
+ __in siena_filter_tbl_id_t tbl_id)
{
switch (tbl_id) {
- case EFX_FS_FILTER_TBL_RX_IP:
- fsfp->fsf_depth[EFX_FS_FILTER_RX_TCP_FULL] = 0;
- fsfp->fsf_depth[EFX_FS_FILTER_RX_TCP_WILD] = 0;
- fsfp->fsf_depth[EFX_FS_FILTER_RX_UDP_FULL] = 0;
- fsfp->fsf_depth[EFX_FS_FILTER_RX_UDP_WILD] = 0;
+ case EFX_SIENA_FILTER_TBL_RX_IP:
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] = 0;
break;
-#if EFSYS_OPT_SIENA
- case EFX_FS_FILTER_TBL_RX_MAC:
- fsfp->fsf_depth[EFX_FS_FILTER_RX_MAC_FULL] = 0;
- fsfp->fsf_depth[EFX_FS_FILTER_RX_MAC_WILD] = 0;
+ case EFX_SIENA_FILTER_TBL_RX_MAC:
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] = 0;
break;
- case EFX_FS_FILTER_TBL_TX_IP:
- fsfp->fsf_depth[EFX_FS_FILTER_TX_TCP_FULL] = 0;
- fsfp->fsf_depth[EFX_FS_FILTER_TX_TCP_WILD] = 0;
- fsfp->fsf_depth[EFX_FS_FILTER_TX_UDP_FULL] = 0;
- fsfp->fsf_depth[EFX_FS_FILTER_TX_UDP_WILD] = 0;
+ case EFX_SIENA_FILTER_TBL_TX_IP:
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] = 0;
break;
- case EFX_FS_FILTER_TBL_TX_MAC:
- fsfp->fsf_depth[EFX_FS_FILTER_TX_MAC_FULL] = 0;
- fsfp->fsf_depth[EFX_FS_FILTER_TX_MAC_WILD] = 0;
+ case EFX_SIENA_FILTER_TBL_TX_MAC:
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] = 0;
break;
-#endif /* EFSYS_OPT_SIENA */
default:
EFSYS_ASSERT(B_FALSE);
@@ -745,79 +725,77 @@ falconsiena_filter_reset_search_depth(
}
static void
-falconsiena_filter_push_rx_limits(
+siena_filter_push_rx_limits(
__in efx_nic_t *enp)
{
- falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
efx_oword_t oword;
EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_FULL_SRCH_LIMIT,
- fsfp->fsf_depth[EFX_FS_FILTER_RX_TCP_FULL] +
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_WILD_SRCH_LIMIT,
- fsfp->fsf_depth[EFX_FS_FILTER_RX_TCP_WILD] +
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_FULL_SRCH_LIMIT,
- fsfp->fsf_depth[EFX_FS_FILTER_RX_UDP_FULL] +
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_WILD_SRCH_LIMIT,
- fsfp->fsf_depth[EFX_FS_FILTER_RX_UDP_WILD] +
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
-#if EFSYS_OPT_SIENA
- if (fsfp->fsf_tbl[EFX_FS_FILTER_TBL_RX_MAC].fsft_size) {
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC].sft_size) {
EFX_SET_OWORD_FIELD(oword,
FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
- fsfp->fsf_depth[EFX_FS_FILTER_RX_MAC_FULL] +
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(oword,
FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
- fsfp->fsf_depth[EFX_FS_FILTER_RX_MAC_WILD] +
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
}
-#endif /* EFSYS_OPT_SIENA */
EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
}
static void
-falconsiena_filter_push_tx_limits(
+siena_filter_push_tx_limits(
__in efx_nic_t *enp)
{
- falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
efx_oword_t oword;
EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword);
- if (fsfp->fsf_tbl[EFX_FS_FILTER_TBL_TX_IP].fsft_size != 0) {
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP].sft_size != 0) {
EFX_SET_OWORD_FIELD(oword,
FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE,
- fsfp->fsf_depth[EFX_FS_FILTER_TX_TCP_FULL] +
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(oword,
FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE,
- fsfp->fsf_depth[EFX_FS_FILTER_TX_TCP_WILD] +
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
EFX_SET_OWORD_FIELD(oword,
FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE,
- fsfp->fsf_depth[EFX_FS_FILTER_TX_UDP_FULL] +
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(oword,
FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE,
- fsfp->fsf_depth[EFX_FS_FILTER_TX_UDP_WILD] +
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
}
- if (fsfp->fsf_tbl[EFX_FS_FILTER_TBL_TX_MAC].fsft_size != 0) {
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC].sft_size != 0) {
EFX_SET_OWORD_FIELD(
oword, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
- fsfp->fsf_depth[EFX_FS_FILTER_TX_MAC_FULL] +
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(
oword, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
- fsfp->fsf_depth[EFX_FS_FILTER_TX_MAC_WILD] +
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
}
@@ -826,77 +804,73 @@ falconsiena_filter_push_tx_limits(
/* Build a filter entry and return its n-tuple key. */
static __checkReturn uint32_t
-falconsiena_filter_build(
+siena_filter_build(
__out efx_oword_t *filter,
- __in falconsiena_filter_spec_t *spec)
+ __in siena_filter_spec_t *spec)
{
uint32_t dword3;
uint32_t key;
- uint8_t type = spec->fsfs_type;
- uint32_t flags = spec->fsfs_flags;
+ uint8_t type = spec->sfs_type;
+ uint32_t flags = spec->sfs_flags;
- switch (falconsiena_filter_tbl_id(type)) {
- case EFX_FS_FILTER_TBL_RX_IP: {
- boolean_t is_udp = (type == EFX_FS_FILTER_RX_UDP_FULL ||
- type == EFX_FS_FILTER_RX_UDP_WILD);
+ switch (siena_filter_tbl_id(type)) {
+ case EFX_SIENA_FILTER_TBL_RX_IP: {
+ boolean_t is_udp = (type == EFX_SIENA_FILTER_RX_UDP_FULL ||
+ type == EFX_SIENA_FILTER_RX_UDP_WILD);
EFX_POPULATE_OWORD_7(*filter,
FRF_BZ_RSS_EN,
(flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0,
FRF_BZ_SCATTER_EN,
(flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0,
FRF_AZ_TCP_UDP, is_udp,
- FRF_AZ_RXQ_ID, spec->fsfs_dmaq_id,
- EFX_DWORD_2, spec->fsfs_dword[2],
- EFX_DWORD_1, spec->fsfs_dword[1],
- EFX_DWORD_0, spec->fsfs_dword[0]);
+ FRF_AZ_RXQ_ID, spec->sfs_dmaq_id,
+ EFX_DWORD_2, spec->sfs_dword[2],
+ EFX_DWORD_1, spec->sfs_dword[1],
+ EFX_DWORD_0, spec->sfs_dword[0]);
dword3 = is_udp;
break;
}
-#if EFSYS_OPT_SIENA
- case EFX_FS_FILTER_TBL_RX_MAC: {
- boolean_t is_wild = (type == EFX_FS_FILTER_RX_MAC_WILD);
+ case EFX_SIENA_FILTER_TBL_RX_MAC: {
+ boolean_t is_wild = (type == EFX_SIENA_FILTER_RX_MAC_WILD);
EFX_POPULATE_OWORD_7(*filter,
FRF_CZ_RMFT_RSS_EN,
(flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0,
FRF_CZ_RMFT_SCATTER_EN,
(flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0,
- FRF_CZ_RMFT_RXQ_ID, spec->fsfs_dmaq_id,
+ FRF_CZ_RMFT_RXQ_ID, spec->sfs_dmaq_id,
FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
- FRF_CZ_RMFT_DEST_MAC_DW1, spec->fsfs_dword[2],
- FRF_CZ_RMFT_DEST_MAC_DW0, spec->fsfs_dword[1],
- FRF_CZ_RMFT_VLAN_ID, spec->fsfs_dword[0]);
+ FRF_CZ_RMFT_DEST_MAC_DW1, spec->sfs_dword[2],
+ FRF_CZ_RMFT_DEST_MAC_DW0, spec->sfs_dword[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->sfs_dword[0]);
dword3 = is_wild;
break;
}
-#endif /* EFSYS_OPT_SIENA */
- case EFX_FS_FILTER_TBL_TX_IP: {
- boolean_t is_udp = (type == EFX_FS_FILTER_TX_UDP_FULL ||
- type == EFX_FS_FILTER_TX_UDP_WILD);
+ case EFX_SIENA_FILTER_TBL_TX_IP: {
+ boolean_t is_udp = (type == EFX_SIENA_FILTER_TX_UDP_FULL ||
+ type == EFX_SIENA_FILTER_TX_UDP_WILD);
EFX_POPULATE_OWORD_5(*filter,
FRF_CZ_TIFT_TCP_UDP, is_udp,
- FRF_CZ_TIFT_TXQ_ID, spec->fsfs_dmaq_id,
- EFX_DWORD_2, spec->fsfs_dword[2],
- EFX_DWORD_1, spec->fsfs_dword[1],
- EFX_DWORD_0, spec->fsfs_dword[0]);
- dword3 = is_udp | spec->fsfs_dmaq_id << 1;
+ FRF_CZ_TIFT_TXQ_ID, spec->sfs_dmaq_id,
+ EFX_DWORD_2, spec->sfs_dword[2],
+ EFX_DWORD_1, spec->sfs_dword[1],
+ EFX_DWORD_0, spec->sfs_dword[0]);
+ dword3 = is_udp | spec->sfs_dmaq_id << 1;
break;
}
-#if EFSYS_OPT_SIENA
- case EFX_FS_FILTER_TBL_TX_MAC: {
- boolean_t is_wild = (type == EFX_FS_FILTER_TX_MAC_WILD);
+ case EFX_SIENA_FILTER_TBL_TX_MAC: {
+ boolean_t is_wild = (type == EFX_SIENA_FILTER_TX_MAC_WILD);
EFX_POPULATE_OWORD_5(*filter,
- FRF_CZ_TMFT_TXQ_ID, spec->fsfs_dmaq_id,
+ FRF_CZ_TMFT_TXQ_ID, spec->sfs_dmaq_id,
FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
- FRF_CZ_TMFT_SRC_MAC_DW1, spec->fsfs_dword[2],
- FRF_CZ_TMFT_SRC_MAC_DW0, spec->fsfs_dword[1],
- FRF_CZ_TMFT_VLAN_ID, spec->fsfs_dword[0]);
- dword3 = is_wild | spec->fsfs_dmaq_id << 1;
+ FRF_CZ_TMFT_SRC_MAC_DW1, spec->sfs_dword[2],
+ FRF_CZ_TMFT_SRC_MAC_DW0, spec->sfs_dword[1],
+ FRF_CZ_TMFT_VLAN_ID, spec->sfs_dword[0]);
+ dword3 = is_wild | spec->sfs_dmaq_id << 1;
break;
}
-#endif /* EFSYS_OPT_SIENA */
default:
EFSYS_ASSERT(B_FALSE);
@@ -904,53 +878,51 @@ falconsiena_filter_build(
}
key =
- spec->fsfs_dword[0] ^
- spec->fsfs_dword[1] ^
- spec->fsfs_dword[2] ^
+ spec->sfs_dword[0] ^
+ spec->sfs_dword[1] ^
+ spec->sfs_dword[2] ^
dword3;
return (key);
}
static __checkReturn efx_rc_t
-falconsiena_filter_push_entry(
+siena_filter_push_entry(
__inout efx_nic_t *enp,
- __in falconsiena_filter_type_t type,
+ __in siena_filter_type_t type,
__in int index,
__in efx_oword_t *eop)
{
efx_rc_t rc;
switch (type) {
- case EFX_FS_FILTER_RX_TCP_FULL:
- case EFX_FS_FILTER_RX_TCP_WILD:
- case EFX_FS_FILTER_RX_UDP_FULL:
- case EFX_FS_FILTER_RX_UDP_WILD:
+ case EFX_SIENA_FILTER_RX_TCP_FULL:
+ case EFX_SIENA_FILTER_RX_TCP_WILD:
+ case EFX_SIENA_FILTER_RX_UDP_FULL:
+ case EFX_SIENA_FILTER_RX_UDP_WILD:
EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_FILTER_TBL0, index,
eop, B_TRUE);
break;
-#if EFSYS_OPT_SIENA
- case EFX_FS_FILTER_RX_MAC_FULL:
- case EFX_FS_FILTER_RX_MAC_WILD:
+ case EFX_SIENA_FILTER_RX_MAC_FULL:
+ case EFX_SIENA_FILTER_RX_MAC_WILD:
EFX_BAR_TBL_WRITEO(enp, FR_CZ_RX_MAC_FILTER_TBL0, index,
eop, B_TRUE);
break;
- case EFX_FS_FILTER_TX_TCP_FULL:
- case EFX_FS_FILTER_TX_TCP_WILD:
- case EFX_FS_FILTER_TX_UDP_FULL:
- case EFX_FS_FILTER_TX_UDP_WILD:
+ case EFX_SIENA_FILTER_TX_TCP_FULL:
+ case EFX_SIENA_FILTER_TX_TCP_WILD:
+ case EFX_SIENA_FILTER_TX_UDP_FULL:
+ case EFX_SIENA_FILTER_TX_UDP_WILD:
EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_FILTER_TBL0, index,
eop, B_TRUE);
break;
- case EFX_FS_FILTER_TX_MAC_FULL:
- case EFX_FS_FILTER_TX_MAC_WILD:
+ case EFX_SIENA_FILTER_TX_MAC_FULL:
+ case EFX_SIENA_FILTER_TX_MAC_WILD:
EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_MAC_FILTER_TBL0, index,
eop, B_TRUE);
break;
-#endif /* EFSYS_OPT_SIENA */
default:
EFSYS_ASSERT(B_FALSE);
@@ -965,34 +937,34 @@ fail1:
static __checkReturn boolean_t
-falconsiena_filter_equal(
- __in const falconsiena_filter_spec_t *left,
- __in const falconsiena_filter_spec_t *right)
+siena_filter_equal(
+ __in const siena_filter_spec_t *left,
+ __in const siena_filter_spec_t *right)
{
- falconsiena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_id_t tbl_id;
- tbl_id = falconsiena_filter_tbl_id(left->fsfs_type);
+ tbl_id = siena_filter_tbl_id(left->sfs_type);
- if (left->fsfs_type != right->fsfs_type)
+ if (left->sfs_type != right->sfs_type)
return (B_FALSE);
- if (memcmp(left->fsfs_dword, right->fsfs_dword,
- sizeof (left->fsfs_dword)))
+ if (memcmp(left->sfs_dword, right->sfs_dword,
+ sizeof (left->sfs_dword)))
return (B_FALSE);
- if ((tbl_id == EFX_FS_FILTER_TBL_TX_IP ||
- tbl_id == EFX_FS_FILTER_TBL_TX_MAC) &&
- left->fsfs_dmaq_id != right->fsfs_dmaq_id)
+ if ((tbl_id == EFX_SIENA_FILTER_TBL_TX_IP ||
+ tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC) &&
+ left->sfs_dmaq_id != right->sfs_dmaq_id)
return (B_FALSE);
return (B_TRUE);
}
static __checkReturn efx_rc_t
-falconsiena_filter_search(
- __in falconsiena_filter_tbl_t *fsftp,
- __in falconsiena_filter_spec_t *spec,
+siena_filter_search(
+ __in siena_filter_tbl_t *sftp,
+ __in siena_filter_spec_t *spec,
__in uint32_t key,
__in boolean_t for_insert,
__out int *filter_index,
@@ -1000,10 +972,10 @@ falconsiena_filter_search(
{
unsigned hash, incr, filter_idx, depth;
- hash = falconsiena_filter_tbl_hash(key);
- incr = falconsiena_filter_tbl_increment(key);
+ hash = siena_filter_tbl_hash(key);
+ incr = siena_filter_tbl_increment(key);
- filter_idx = hash & (fsftp->fsft_size - 1);
+ filter_idx = hash & (sftp->sft_size - 1);
depth = 1;
for (;;) {
@@ -1011,9 +983,9 @@ falconsiena_filter_search(
* Return success if entry is used and matches this spec
* or entry is unused and we are trying to insert.
*/
- if (falconsiena_filter_test_used(fsftp, filter_idx) ?
- falconsiena_filter_equal(spec,
- &fsftp->fsft_spec[filter_idx]) :
+ if (siena_filter_test_used(sftp, filter_idx) ?
+ siena_filter_equal(spec,
+ &sftp->sft_spec[filter_idx]) :
for_insert) {
*filter_index = filter_idx;
*depth_required = depth;
@@ -1024,128 +996,119 @@ falconsiena_filter_search(
if (depth == FILTER_CTL_SRCH_MAX)
return (for_insert ? EBUSY : ENOENT);
- filter_idx = (filter_idx + incr) & (fsftp->fsft_size - 1);
+ filter_idx = (filter_idx + incr) & (sftp->sft_size - 1);
++depth;
}
}
static void
-falconsiena_filter_clear_entry(
+siena_filter_clear_entry(
__in efx_nic_t *enp,
- __in falconsiena_filter_tbl_t *fsftp,
+ __in siena_filter_tbl_t *sftp,
__in int index)
{
efx_oword_t filter;
- if (falconsiena_filter_test_used(fsftp, index)) {
- falconsiena_filter_clear_used(fsftp, index);
+ if (siena_filter_test_used(sftp, index)) {
+ siena_filter_clear_used(sftp, index);
EFX_ZERO_OWORD(filter);
- falconsiena_filter_push_entry(enp,
- fsftp->fsft_spec[index].fsfs_type,
+ siena_filter_push_entry(enp,
+ sftp->sft_spec[index].sfs_type,
index, &filter);
- memset(&fsftp->fsft_spec[index],
- 0, sizeof (fsftp->fsft_spec[0]));
+ memset(&sftp->sft_spec[index],
+ 0, sizeof (sftp->sft_spec[0]));
}
}
void
-falconsiena_filter_tbl_clear(
+siena_filter_tbl_clear(
__in efx_nic_t *enp,
- __in falconsiena_filter_tbl_id_t tbl_id)
+ __in siena_filter_tbl_id_t tbl_id)
{
- falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter;
- falconsiena_filter_tbl_t *fsftp = &fsfp->fsf_tbl[tbl_id];
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id];
int index;
int state;
EFSYS_LOCK(enp->en_eslp, state);
- for (index = 0; index < fsftp->fsft_size; ++index) {
- falconsiena_filter_clear_entry(enp, fsftp, index);
+ for (index = 0; index < sftp->sft_size; ++index) {
+ siena_filter_clear_entry(enp, sftp, index);
}
- if (fsftp->fsft_used == 0)
- falconsiena_filter_reset_search_depth(fsfp, tbl_id);
+ if (sftp->sft_used == 0)
+ siena_filter_reset_search_depth(sfp, tbl_id);
EFSYS_UNLOCK(enp->en_eslp, state);
}
static __checkReturn efx_rc_t
-falconsiena_filter_init(
+siena_filter_init(
__in efx_nic_t *enp)
{
- falconsiena_filter_t *fsfp;
- falconsiena_filter_tbl_t *fsftp;
+ siena_filter_t *sfp;
+ siena_filter_tbl_t *sftp;
int tbl_id;
efx_rc_t rc;
- EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (falconsiena_filter_t), fsfp);
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (siena_filter_t), sfp);
- if (!fsfp) {
+ if (!sfp) {
rc = ENOMEM;
goto fail1;
}
- enp->en_filter.ef_falconsiena_filter = fsfp;
+ enp->en_filter.ef_siena_filter = sfp;
switch (enp->en_family) {
-#if EFSYS_OPT_FALCON
- case EFX_FAMILY_FALCON:
- fsftp = &fsfp->fsf_tbl[EFX_FS_FILTER_TBL_RX_IP];
- fsftp->fsft_size = FR_AZ_RX_FILTER_TBL0_ROWS;
- break;
-#endif /* EFSYS_OPT_FALCON */
-
-#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
- fsftp = &fsfp->fsf_tbl[EFX_FS_FILTER_TBL_RX_IP];
- fsftp->fsft_size = FR_AZ_RX_FILTER_TBL0_ROWS;
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_IP];
+ sftp->sft_size = FR_AZ_RX_FILTER_TBL0_ROWS;
- fsftp = &fsfp->fsf_tbl[EFX_FS_FILTER_TBL_RX_MAC];
- fsftp->fsft_size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC];
+ sftp->sft_size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
- fsftp = &fsfp->fsf_tbl[EFX_FS_FILTER_TBL_TX_IP];
- fsftp->fsft_size = FR_CZ_TX_FILTER_TBL0_ROWS;
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP];
+ sftp->sft_size = FR_CZ_TX_FILTER_TBL0_ROWS;
- fsftp = &fsfp->fsf_tbl[EFX_FS_FILTER_TBL_TX_MAC];
- fsftp->fsft_size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC];
+ sftp->sft_size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
break;
-#endif /* EFSYS_OPT_SIENA */
default:
rc = ENOTSUP;
goto fail2;
}
- for (tbl_id = 0; tbl_id < EFX_FS_FILTER_NTBLS; tbl_id++) {
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
unsigned int bitmap_size;
- fsftp = &fsfp->fsf_tbl[tbl_id];
- if (fsftp->fsft_size == 0)
+ sftp = &sfp->sf_tbl[tbl_id];
+ if (sftp->sft_size == 0)
continue;
- EFX_STATIC_ASSERT(sizeof (fsftp->fsft_bitmap[0]) ==
+ EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) ==
sizeof (uint32_t));
bitmap_size =
- (fsftp->fsft_size + (sizeof (uint32_t) * 8) - 1) / 8;
+ (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8;
- EFSYS_KMEM_ALLOC(enp->en_esip, bitmap_size, fsftp->fsft_bitmap);
- if (!fsftp->fsft_bitmap) {
+ EFSYS_KMEM_ALLOC(enp->en_esip, bitmap_size, sftp->sft_bitmap);
+ if (!sftp->sft_bitmap) {
rc = ENOMEM;
goto fail3;
}
EFSYS_KMEM_ALLOC(enp->en_esip,
- fsftp->fsft_size * sizeof (*fsftp->fsft_spec),
- fsftp->fsft_spec);
- if (!fsftp->fsft_spec) {
+ sftp->sft_size * sizeof (*sftp->sft_spec),
+ sftp->sft_spec);
+ if (!sftp->sft_spec) {
rc = ENOMEM;
goto fail4;
}
- memset(fsftp->fsft_spec, 0,
- fsftp->fsft_size * sizeof (*fsftp->fsft_spec));
+ memset(sftp->sft_spec, 0,
+ sftp->sft_size * sizeof (*sftp->sft_spec));
}
return (0);
@@ -1158,7 +1121,7 @@ fail3:
fail2:
EFSYS_PROBE(fail2);
- falconsiena_filter_fini(enp);
+ siena_filter_fini(enp);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -1166,53 +1129,53 @@ fail1:
}
static void
-falconsiena_filter_fini(
+siena_filter_fini(
__in efx_nic_t *enp)
{
- falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter;
- falconsiena_filter_tbl_id_t tbl_id;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
- if (fsfp == NULL)
+ if (sfp == NULL)
return;
- for (tbl_id = 0; tbl_id < EFX_FS_FILTER_NTBLS; tbl_id++) {
- falconsiena_filter_tbl_t *fsftp = &fsfp->fsf_tbl[tbl_id];
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id];
unsigned int bitmap_size;
- EFX_STATIC_ASSERT(sizeof (fsftp->fsft_bitmap[0]) ==
+ EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) ==
sizeof (uint32_t));
bitmap_size =
- (fsftp->fsft_size + (sizeof (uint32_t) * 8) - 1) / 8;
+ (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8;
- if (fsftp->fsft_bitmap != NULL) {
+ if (sftp->sft_bitmap != NULL) {
EFSYS_KMEM_FREE(enp->en_esip, bitmap_size,
- fsftp->fsft_bitmap);
- fsftp->fsft_bitmap = NULL;
+ sftp->sft_bitmap);
+ sftp->sft_bitmap = NULL;
}
- if (fsftp->fsft_spec != NULL) {
- EFSYS_KMEM_FREE(enp->en_esip, fsftp->fsft_size *
- sizeof (*fsftp->fsft_spec), fsftp->fsft_spec);
- fsftp->fsft_spec = NULL;
+ if (sftp->sft_spec != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, sftp->sft_size *
+ sizeof (*sftp->sft_spec), sftp->sft_spec);
+ sftp->sft_spec = NULL;
}
}
- EFSYS_KMEM_FREE(enp->en_esip, sizeof (falconsiena_filter_t),
- enp->en_filter.ef_falconsiena_filter);
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (siena_filter_t),
+ enp->en_filter.ef_siena_filter);
}
/* Restore filter state after a reset */
static __checkReturn efx_rc_t
-falconsiena_filter_restore(
+siena_filter_restore(
__in efx_nic_t *enp)
{
- falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter;
- falconsiena_filter_tbl_id_t tbl_id;
- falconsiena_filter_tbl_t *fsftp;
- falconsiena_filter_spec_t *spec;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ siena_filter_spec_t *spec;
efx_oword_t filter;
int filter_idx;
int state;
@@ -1220,25 +1183,25 @@ falconsiena_filter_restore(
EFSYS_LOCK(enp->en_eslp, state);
- for (tbl_id = 0; tbl_id < EFX_FS_FILTER_NTBLS; tbl_id++) {
- fsftp = &fsfp->fsf_tbl[tbl_id];
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ sftp = &sfp->sf_tbl[tbl_id];
for (filter_idx = 0;
- filter_idx < fsftp->fsft_size;
+ filter_idx < sftp->sft_size;
filter_idx++) {
- if (!falconsiena_filter_test_used(fsftp, filter_idx))
+ if (!siena_filter_test_used(sftp, filter_idx))
continue;
- spec = &fsftp->fsft_spec[filter_idx];
- if ((rc = falconsiena_filter_build(&filter, spec)) != 0)
+ spec = &sftp->sft_spec[filter_idx];
+ if ((rc = siena_filter_build(&filter, spec)) != 0)
goto fail1;
- if ((rc = falconsiena_filter_push_entry(enp,
- spec->fsfs_type, filter_idx, &filter)) != 0)
+ if ((rc = siena_filter_push_entry(enp,
+ spec->sfs_type, filter_idx, &filter)) != 0)
goto fail2;
}
}
- falconsiena_filter_push_rx_limits(enp);
- falconsiena_filter_push_tx_limits(enp);
+ siena_filter_push_rx_limits(enp);
+ siena_filter_push_tx_limits(enp);
EFSYS_UNLOCK(enp->en_eslp, state);
@@ -1256,17 +1219,17 @@ fail1:
}
static __checkReturn efx_rc_t
-falconsiena_filter_add(
+siena_filter_add(
__in efx_nic_t *enp,
__inout efx_filter_spec_t *spec,
__in boolean_t may_replace)
{
efx_rc_t rc;
- falconsiena_filter_spec_t fs_spec;
- falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter;
- falconsiena_filter_tbl_id_t tbl_id;
- falconsiena_filter_tbl_t *fsftp;
- falconsiena_filter_spec_t *saved_fs_spec;
+ siena_filter_spec_t sf_spec;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ siena_filter_spec_t *saved_sf_spec;
efx_oword_t filter;
int filter_idx;
unsigned int depth;
@@ -1276,48 +1239,48 @@ falconsiena_filter_add(
EFSYS_ASSERT3P(spec, !=, NULL);
- if ((rc = falconsiena_filter_spec_from_gen_spec(&fs_spec, spec)) != 0)
+ if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0)
goto fail1;
- tbl_id = falconsiena_filter_tbl_id(fs_spec.fsfs_type);
- fsftp = &fsfp->fsf_tbl[tbl_id];
+ tbl_id = siena_filter_tbl_id(sf_spec.sfs_type);
+ sftp = &sfp->sf_tbl[tbl_id];
- if (fsftp->fsft_size == 0) {
+ if (sftp->sft_size == 0) {
rc = EINVAL;
goto fail2;
}
- key = falconsiena_filter_build(&filter, &fs_spec);
+ key = siena_filter_build(&filter, &sf_spec);
EFSYS_LOCK(enp->en_eslp, state);
- rc = falconsiena_filter_search(fsftp, &fs_spec, key, B_TRUE,
+ rc = siena_filter_search(sftp, &sf_spec, key, B_TRUE,
&filter_idx, &depth);
if (rc != 0)
goto fail3;
- EFSYS_ASSERT3U(filter_idx, <, fsftp->fsft_size);
- saved_fs_spec = &fsftp->fsft_spec[filter_idx];
+ EFSYS_ASSERT3U(filter_idx, <, sftp->sft_size);
+ saved_sf_spec = &sftp->sft_spec[filter_idx];
- if (falconsiena_filter_test_used(fsftp, filter_idx)) {
+ if (siena_filter_test_used(sftp, filter_idx)) {
if (may_replace == B_FALSE) {
rc = EEXIST;
goto fail4;
}
}
- falconsiena_filter_set_used(fsftp, filter_idx);
- *saved_fs_spec = fs_spec;
-
- if (fsfp->fsf_depth[fs_spec.fsfs_type] < depth) {
- fsfp->fsf_depth[fs_spec.fsfs_type] = depth;
- if (tbl_id == EFX_FS_FILTER_TBL_TX_IP ||
- tbl_id == EFX_FS_FILTER_TBL_TX_MAC)
- falconsiena_filter_push_tx_limits(enp);
+ siena_filter_set_used(sftp, filter_idx);
+ *saved_sf_spec = sf_spec;
+
+ if (sfp->sf_depth[sf_spec.sfs_type] < depth) {
+ sfp->sf_depth[sf_spec.sfs_type] = depth;
+ if (tbl_id == EFX_SIENA_FILTER_TBL_TX_IP ||
+ tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC)
+ siena_filter_push_tx_limits(enp);
else
- falconsiena_filter_push_rx_limits(enp);
+ siena_filter_push_rx_limits(enp);
}
- falconsiena_filter_push_entry(enp, fs_spec.fsfs_type,
+ siena_filter_push_entry(enp, sf_spec.sfs_type,
filter_idx, &filter);
EFSYS_UNLOCK(enp->en_eslp, state);
@@ -1339,15 +1302,15 @@ fail1:
}
static __checkReturn efx_rc_t
-falconsiena_filter_delete(
+siena_filter_delete(
__in efx_nic_t *enp,
__inout efx_filter_spec_t *spec)
{
efx_rc_t rc;
- falconsiena_filter_spec_t fs_spec;
- falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter;
- falconsiena_filter_tbl_id_t tbl_id;
- falconsiena_filter_tbl_t *fsftp;
+ siena_filter_spec_t sf_spec;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
efx_oword_t filter;
int filter_idx;
unsigned int depth;
@@ -1356,24 +1319,24 @@ falconsiena_filter_delete(
EFSYS_ASSERT3P(spec, !=, NULL);
- if ((rc = falconsiena_filter_spec_from_gen_spec(&fs_spec, spec)) != 0)
+ if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0)
goto fail1;
- tbl_id = falconsiena_filter_tbl_id(fs_spec.fsfs_type);
- fsftp = &fsfp->fsf_tbl[tbl_id];
+ tbl_id = siena_filter_tbl_id(sf_spec.sfs_type);
+ sftp = &sfp->sf_tbl[tbl_id];
- key = falconsiena_filter_build(&filter, &fs_spec);
+ key = siena_filter_build(&filter, &sf_spec);
EFSYS_LOCK(enp->en_eslp, state);
- rc = falconsiena_filter_search(fsftp, &fs_spec, key, B_FALSE,
+ rc = siena_filter_search(sftp, &sf_spec, key, B_FALSE,
&filter_idx, &depth);
if (rc != 0)
goto fail2;
- falconsiena_filter_clear_entry(enp, fsftp, filter_idx);
- if (fsftp->fsft_used == 0)
- falconsiena_filter_reset_search_depth(fsfp, tbl_id);
+ siena_filter_clear_entry(enp, sftp, filter_idx);
+ if (sftp->sft_used == 0)
+ siena_filter_reset_search_depth(sfp, tbl_id);
EFSYS_UNLOCK(enp->en_eslp, state);
return (0);
@@ -1390,7 +1353,7 @@ fail1:
#define MAX_SUPPORTED 4
static __checkReturn efx_rc_t
-falconsiena_filter_supported_filters(
+siena_filter_supported_filters(
__in efx_nic_t *enp,
__out uint32_t *list,
__out size_t *length)
@@ -1434,6 +1397,6 @@ fail1:
#undef MAX_SUPPORTED
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
+#endif /* EFSYS_OPT_SIENA */
#endif /* EFSYS_OPT_FILTER */
diff --git a/sys/dev/sfxge/common/efx_hash.c b/sys/dev/sfxge/common/efx_hash.c
index db8b12c..808351f 100644
--- a/sys/dev/sfxge/common/efx_hash.c
+++ b/sys/dev/sfxge/common/efx_hash.c
@@ -10,7 +10,7 @@
* ...You can use this free for any purpose. It's in the public domain.
* It has no warranty."
*
- * Copyright (c) 2014-2015 Solarflare Communications Inc.
+ * Copyright (c) 2014-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/efx_impl.h b/sys/dev/sfxge/common/efx_impl.h
index 5495b15..43e9492 100644
--- a/sys/dev/sfxge/common/efx_impl.h
+++ b/sys/dev/sfxge/common/efx_impl.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,7 +33,6 @@
#ifndef _SYS_EFX_IMPL_H
#define _SYS_EFX_IMPL_H
-#include "efsys.h"
#include "efx.h"
#include "efx_regs.h"
#include "efx_regs_ef10.h"
@@ -43,12 +42,6 @@
#define ESE_DZ_EV_CODE_DRV_GEN_EV FSE_AZ_EV_CODE_DRV_GEN_EV
#endif
-#include "efx_check.h"
-
-
-#if EFSYS_OPT_FALCON
-#include "falcon_impl.h"
-#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
#include "siena_impl.h"
@@ -83,18 +76,14 @@ extern "C" {
#define EFX_MOD_MON 0x00000400
#define EFX_MOD_WOL 0x00000800
#define EFX_MOD_FILTER 0x00001000
-#define EFX_MOD_PKTFILTER 0x00002000
-#define EFX_MOD_LIC 0x00004000
+#define EFX_MOD_LIC 0x00002000
-#define EFX_RESET_MAC 0x00000001
-#define EFX_RESET_PHY 0x00000002
-#define EFX_RESET_RXQ_ERR 0x00000004
-#define EFX_RESET_TXQ_ERR 0x00000008
+#define EFX_RESET_PHY 0x00000001
+#define EFX_RESET_RXQ_ERR 0x00000002
+#define EFX_RESET_TXQ_ERR 0x00000004
typedef enum efx_mac_type_e {
EFX_MAC_INVALID = 0,
- EFX_MAC_FALCON_GMAC,
- EFX_MAC_FALCON_XMAC,
EFX_MAC_SIENA,
EFX_MAC_HUNTINGTON,
EFX_MAC_MEDFORD,
@@ -190,11 +179,11 @@ typedef struct efx_rx_ops_s {
} efx_rx_ops_t;
typedef struct efx_mac_ops_s {
- efx_rc_t (*emo_reset)(efx_nic_t *); /* optional */
efx_rc_t (*emo_poll)(efx_nic_t *, efx_link_mode_t *);
efx_rc_t (*emo_up)(efx_nic_t *, boolean_t *);
efx_rc_t (*emo_addr_set)(efx_nic_t *);
efx_rc_t (*emo_pdu_set)(efx_nic_t *);
+ efx_rc_t (*emo_pdu_get)(efx_nic_t *, size_t *);
efx_rc_t (*emo_reconfigure)(efx_nic_t *);
efx_rc_t (*emo_multicast_list_set)(efx_nic_t *);
efx_rc_t (*emo_filter_default_rxq_set)(efx_nic_t *,
@@ -218,23 +207,11 @@ typedef struct efx_phy_ops_s {
efx_rc_t (*epo_reset)(efx_nic_t *);
efx_rc_t (*epo_reconfigure)(efx_nic_t *);
efx_rc_t (*epo_verify)(efx_nic_t *);
- efx_rc_t (*epo_uplink_check)(efx_nic_t *,
- boolean_t *); /* optional */
- efx_rc_t (*epo_downlink_check)(efx_nic_t *, efx_link_mode_t *,
- unsigned int *, uint32_t *);
efx_rc_t (*epo_oui_get)(efx_nic_t *, uint32_t *);
#if EFSYS_OPT_PHY_STATS
efx_rc_t (*epo_stats_update)(efx_nic_t *, efsys_mem_t *,
uint32_t *);
#endif /* EFSYS_OPT_PHY_STATS */
-#if EFSYS_OPT_PHY_PROPS
-#if EFSYS_OPT_NAMES
- const char *(*epo_prop_name)(efx_nic_t *, unsigned int);
-#endif /* EFSYS_OPT_PHY_PROPS */
- efx_rc_t (*epo_prop_get)(efx_nic_t *, unsigned int, uint32_t,
- uint32_t *);
- efx_rc_t (*epo_prop_set)(efx_nic_t *, unsigned int, uint32_t);
-#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_BIST
efx_rc_t (*epo_bist_enable_offline)(efx_nic_t *);
efx_rc_t (*epo_bist_start)(efx_nic_t *, efx_bist_type_t);
@@ -256,7 +233,7 @@ typedef struct efx_filter_ops_s {
efx_rc_t (*efo_supported_filters)(efx_nic_t *, uint32_t *, size_t *);
efx_rc_t (*efo_reconfigure)(efx_nic_t *, uint8_t const *, boolean_t,
boolean_t, boolean_t, boolean_t,
- uint8_t const *, int);
+ uint8_t const *, uint32_t);
} efx_filter_ops_t;
extern __checkReturn efx_rc_t
@@ -268,7 +245,7 @@ efx_filter_reconfigure(
__in boolean_t all_mulcst,
__in boolean_t brdcst,
__in_ecount(6*count) uint8_t const *addrs,
- __in int count);
+ __in uint32_t count);
#endif /* EFSYS_OPT_FILTER */
@@ -306,31 +283,16 @@ typedef struct efx_port_s {
uint32_t ep_lp_cap_mask;
uint32_t ep_default_adv_cap_mask;
uint32_t ep_phy_cap_mask;
-#if EFSYS_OPT_PHY_TXC43128 || EFSYS_OPT_PHY_QT2025C
- union {
- struct {
- unsigned int bug10934_count;
- } ep_txc43128;
- struct {
- unsigned int bug17190_count;
- } ep_qt2025c;
- };
-#endif
- boolean_t ep_mac_poll_needed; /* falcon only */
- boolean_t ep_mac_up; /* falcon only */
- uint32_t ep_fwver; /* falcon only */
boolean_t ep_mac_drain;
boolean_t ep_mac_stats_pending;
#if EFSYS_OPT_BIST
efx_bist_type_t ep_current_bist;
#endif
- efx_mac_ops_t *ep_emop;
- efx_phy_ops_t *ep_epop;
+ const efx_mac_ops_t *ep_emop;
+ const efx_phy_ops_t *ep_epop;
} efx_port_t;
typedef struct efx_mon_ops_s {
- efx_rc_t (*emo_reset)(efx_nic_t *);
- efx_rc_t (*emo_reconfigure)(efx_nic_t *);
#if EFSYS_OPT_MON_STATS
efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
efx_mon_stat_value_t *);
@@ -338,8 +300,8 @@ typedef struct efx_mon_ops_s {
} efx_mon_ops_t;
typedef struct efx_mon_s {
- efx_mon_type_t em_type;
- efx_mon_ops_t *em_emop;
+ efx_mon_type_t em_type;
+ const efx_mon_ops_t *em_emop;
} efx_mon_t;
typedef struct efx_intr_ops_s {
@@ -356,10 +318,10 @@ typedef struct efx_intr_ops_s {
} efx_intr_ops_t;
typedef struct efx_intr_s {
- efx_intr_ops_t *ei_eiop;
- efsys_mem_t *ei_esmp;
- efx_intr_type_t ei_type;
- unsigned int ei_level;
+ const efx_intr_ops_t *ei_eiop;
+ efsys_mem_t *ei_esmp;
+ efx_intr_type_t ei_type;
+ unsigned int ei_level;
} efx_intr_t;
typedef struct efx_nic_ops_s {
@@ -372,7 +334,6 @@ typedef struct efx_nic_ops_s {
efx_rc_t (*eno_get_bar_region)(efx_nic_t *, efx_nic_region_t,
uint32_t *, size_t *);
#if EFSYS_OPT_DIAG
- efx_rc_t (*eno_sram_test)(efx_nic_t *, efx_sram_pattern_fn_t);
efx_rc_t (*eno_register_test)(efx_nic_t *);
#endif /* EFSYS_OPT_DIAG */
void (*eno_fini)(efx_nic_t *);
@@ -394,68 +355,64 @@ typedef struct efx_nic_ops_s {
#if EFSYS_OPT_FILTER
-typedef struct falconsiena_filter_spec_s {
- uint8_t fsfs_type;
- uint32_t fsfs_flags;
- uint32_t fsfs_dmaq_id;
- uint32_t fsfs_dword[3];
-} falconsiena_filter_spec_t;
-
-typedef enum falconsiena_filter_type_e {
- EFX_FS_FILTER_RX_TCP_FULL, /* TCP/IPv4 4-tuple {dIP,dTCP,sIP,sTCP} */
- EFX_FS_FILTER_RX_TCP_WILD, /* TCP/IPv4 dest {dIP,dTCP, -, -} */
- EFX_FS_FILTER_RX_UDP_FULL, /* UDP/IPv4 4-tuple {dIP,dUDP,sIP,sUDP} */
- EFX_FS_FILTER_RX_UDP_WILD, /* UDP/IPv4 dest {dIP,dUDP, -, -} */
+typedef struct siena_filter_spec_s {
+ uint8_t sfs_type;
+ uint32_t sfs_flags;
+ uint32_t sfs_dmaq_id;
+ uint32_t sfs_dword[3];
+} siena_filter_spec_t;
+
+typedef enum siena_filter_type_e {
+ EFX_SIENA_FILTER_RX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_RX_TCP_WILD, /* TCP/IPv4 {dIP,dTCP, -, -} */
+ EFX_SIENA_FILTER_RX_UDP_FULL, /* UDP/IPv4 {dIP,dUDP,sIP,sUDP} */
+ EFX_SIENA_FILTER_RX_UDP_WILD, /* UDP/IPv4 {dIP,dUDP, -, -} */
+ EFX_SIENA_FILTER_RX_MAC_FULL, /* Ethernet {dMAC,VLAN} */
+ EFX_SIENA_FILTER_RX_MAC_WILD, /* Ethernet {dMAC, -} */
+
+ EFX_SIENA_FILTER_TX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_TCP_WILD, /* TCP/IPv4 { -, -,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_UDP_FULL, /* UDP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_UDP_WILD, /* UDP/IPv4 { -, -,sIP,sUDP} */
+ EFX_SIENA_FILTER_TX_MAC_FULL, /* Ethernet {sMAC,VLAN} */
+ EFX_SIENA_FILTER_TX_MAC_WILD, /* Ethernet {sMAC, -} */
+
+ EFX_SIENA_FILTER_NTYPES
+} siena_filter_type_t;
+
+typedef enum siena_filter_tbl_id_e {
+ EFX_SIENA_FILTER_TBL_RX_IP = 0,
+ EFX_SIENA_FILTER_TBL_RX_MAC,
+ EFX_SIENA_FILTER_TBL_TX_IP,
+ EFX_SIENA_FILTER_TBL_TX_MAC,
+ EFX_SIENA_FILTER_NTBLS
+} siena_filter_tbl_id_t;
+
+typedef struct siena_filter_tbl_s {
+ int sft_size; /* number of entries */
+ int sft_used; /* active count */
+ uint32_t *sft_bitmap; /* active bitmap */
+ siena_filter_spec_t *sft_spec; /* array of saved specs */
+} siena_filter_tbl_t;
+
+typedef struct siena_filter_s {
+ siena_filter_tbl_t sf_tbl[EFX_SIENA_FILTER_NTBLS];
+ unsigned int sf_depth[EFX_SIENA_FILTER_NTYPES];
+} siena_filter_t;
+typedef struct efx_filter_s {
#if EFSYS_OPT_SIENA
- EFX_FS_FILTER_RX_MAC_FULL, /* Ethernet {dMAC,VLAN} */
- EFX_FS_FILTER_RX_MAC_WILD, /* Ethernet {dMAC, -} */
-
- EFX_FS_FILTER_TX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
- EFX_FS_FILTER_TX_TCP_WILD, /* TCP/IPv4 { -, -,sIP,sTCP} */
- EFX_FS_FILTER_TX_UDP_FULL, /* UDP/IPv4 {dIP,dTCP,sIP,sTCP} */
- EFX_FS_FILTER_TX_UDP_WILD, /* UDP/IPv4 source (host, port) */
-
- EFX_FS_FILTER_TX_MAC_FULL, /* Ethernet source (MAC address, VLAN ID) */
- EFX_FS_FILTER_TX_MAC_WILD, /* Ethernet source (MAC address) */
+ siena_filter_t *ef_siena_filter;
#endif /* EFSYS_OPT_SIENA */
-
- EFX_FS_FILTER_NTYPES
-} falconsiena_filter_type_t;
-
-typedef enum falconsiena_filter_tbl_id_e {
- EFX_FS_FILTER_TBL_RX_IP = 0,
- EFX_FS_FILTER_TBL_RX_MAC,
- EFX_FS_FILTER_TBL_TX_IP,
- EFX_FS_FILTER_TBL_TX_MAC,
- EFX_FS_FILTER_NTBLS
-} falconsiena_filter_tbl_id_t;
-
-typedef struct falconsiena_filter_tbl_s {
- int fsft_size; /* number of entries */
- int fsft_used; /* active count */
- uint32_t *fsft_bitmap; /* active bitmap */
- falconsiena_filter_spec_t *fsft_spec; /* array of saved specs */
-} falconsiena_filter_tbl_t;
-
-typedef struct falconsiena_filter_s {
- falconsiena_filter_tbl_t fsf_tbl[EFX_FS_FILTER_NTBLS];
- unsigned int fsf_depth[EFX_FS_FILTER_NTYPES];
-} falconsiena_filter_t;
-
-typedef struct efx_filter_s {
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
- falconsiena_filter_t *ef_falconsiena_filter;
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
ef10_filter_table_t *ef_ef10_filter_table;
#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
} efx_filter_t;
extern void
-falconsiena_filter_tbl_clear(
+siena_filter_tbl_clear(
__in efx_nic_t *enp,
- __in falconsiena_filter_tbl_id_t tbl);
+ __in siena_filter_tbl_id_t tbl);
#endif /* EFSYS_OPT_FILTER */
@@ -473,7 +430,7 @@ typedef struct efx_mcdi_ops_s {
} efx_mcdi_ops_t;
typedef struct efx_mcdi_s {
- efx_mcdi_ops_t *em_emcop;
+ const efx_mcdi_ops_t *em_emcop;
const efx_mcdi_transport_t *em_emtp;
efx_mcdi_iface_t em_emip;
} efx_mcdi_t;
@@ -500,6 +457,8 @@ typedef struct efx_nvram_ops_s {
uint32_t *, uint16_t *);
efx_rc_t (*envo_partn_set_version)(efx_nic_t *, uint32_t,
uint16_t *);
+ efx_rc_t (*envo_buffer_validate)(efx_nic_t *, uint32_t,
+ caddr_t, size_t);
} efx_nvram_ops_t;
#endif /* EFSYS_OPT_NVRAM */
@@ -602,6 +561,27 @@ typedef struct efx_lic_ops_s {
efx_rc_t (*elo_app_state)(efx_nic_t *, uint64_t, boolean_t *);
efx_rc_t (*elo_get_id)(efx_nic_t *, size_t, uint32_t *,
size_t *, uint8_t *);
+ efx_rc_t (*elo_find_start)
+ (efx_nic_t *, caddr_t, size_t, uint32_t *);
+ efx_rc_t (*elo_find_end)(efx_nic_t *, caddr_t, size_t,
+ uint32_t , uint32_t *);
+ boolean_t (*elo_find_key)(efx_nic_t *, caddr_t, size_t,
+ uint32_t, uint32_t *, uint32_t *);
+ boolean_t (*elo_validate_key)(efx_nic_t *,
+ caddr_t, uint32_t);
+ efx_rc_t (*elo_read_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t, uint32_t,
+ caddr_t, size_t, uint32_t *);
+ efx_rc_t (*elo_write_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t,
+ caddr_t, uint32_t, uint32_t *);
+ efx_rc_t (*elo_delete_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t,
+ uint32_t, uint32_t, uint32_t *);
+ efx_rc_t (*elo_create_partition)(efx_nic_t *,
+ caddr_t, size_t);
+ efx_rc_t (*elo_finish_partition)(efx_nic_t *,
+ caddr_t, size_t);
} efx_lic_ops_t;
#endif
@@ -631,23 +611,23 @@ struct efx_nic_s {
uint32_t en_ev_qcount;
uint32_t en_rx_qcount;
uint32_t en_tx_qcount;
- efx_nic_ops_t *en_enop;
- efx_ev_ops_t *en_eevop;
- efx_tx_ops_t *en_etxop;
- efx_rx_ops_t *en_erxop;
+ const efx_nic_ops_t *en_enop;
+ const efx_ev_ops_t *en_eevop;
+ const efx_tx_ops_t *en_etxop;
+ const efx_rx_ops_t *en_erxop;
#if EFSYS_OPT_FILTER
efx_filter_t en_filter;
- efx_filter_ops_t *en_efop;
+ const efx_filter_ops_t *en_efop;
#endif /* EFSYS_OPT_FILTER */
#if EFSYS_OPT_MCDI
efx_mcdi_t en_mcdi;
#endif /* EFSYS_OPT_MCDI */
#if EFSYS_OPT_NVRAM
efx_nvram_type_t en_nvram_locked;
- efx_nvram_ops_t *en_envop;
+ const efx_nvram_ops_t *en_envop;
#endif /* EFSYS_OPT_NVRAM */
#if EFSYS_OPT_VPD
- efx_vpd_ops_t *en_evpdop;
+ const efx_vpd_ops_t *en_evpdop;
#endif /* EFSYS_OPT_VPD */
#if EFSYS_OPT_RX_SCALE
efx_rx_hash_support_t en_hash_support;
@@ -656,27 +636,10 @@ struct efx_nic_s {
#endif /* EFSYS_OPT_RX_SCALE */
uint32_t en_vport_id;
#if EFSYS_OPT_LICENSING
- efx_lic_ops_t *en_elop;
+ const efx_lic_ops_t *en_elop;
+ boolean_t en_licensing_supported;
#endif
union {
-#if EFSYS_OPT_FALCON
- struct {
- falcon_spi_dev_t enu_fsd[FALCON_SPI_NTYPES];
- falcon_i2c_t enu_fip;
- boolean_t enu_i2c_locked;
-#if EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE
- const uint8_t *enu_forced_cfg;
-#endif /* EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE */
- uint8_t enu_mon_devid;
-#if EFSYS_OPT_PCIE_TUNE
- unsigned int enu_nlanes;
-#endif /* EFSYS_OPT_PCIE_TUNE */
- uint16_t enu_board_rev;
- boolean_t enu_internal_sram;
- uint8_t enu_sram_num_bank;
- uint8_t enu_sram_bank_size;
- } falcon;
-#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
struct {
#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
@@ -750,7 +713,6 @@ struct efx_evq_s {
#define EFX_EVQ_MAGIC 0x08081997
-#define EFX_EVQ_FALCON_TIMER_QUANTUM_NS 4968 /* 621 cycles */
#define EFX_EVQ_SIENA_TIMER_QUANTUM_NS 6144 /* 768 cycles */
struct efx_rxq_s {
@@ -814,10 +776,6 @@ struct efx_txq_s {
char rev; \
\
switch ((_enp)->en_family) { \
- case EFX_FAMILY_FALCON: \
- rev = 'B'; \
- break; \
- \
case EFX_FAMILY_SIENA: \
rev = 'C'; \
break; \
diff --git a/sys/dev/sfxge/common/efx_intr.c b/sys/dev/sfxge/common/efx_intr.c
index eb570fc..ee9301d 100644
--- a/sys/dev/sfxge/common/efx_intr.c
+++ b/sys/dev/sfxge/common/efx_intr.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,89 +35,75 @@ __FBSDID("$FreeBSD$");
#include "efx_impl.h"
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
+#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_intr_init(
+siena_intr_init(
__in efx_nic_t *enp,
__in efx_intr_type_t type,
__in efsys_mem_t *esmp);
static void
-falconsiena_intr_enable(
+siena_intr_enable(
__in efx_nic_t *enp);
static void
-falconsiena_intr_disable(
+siena_intr_disable(
__in efx_nic_t *enp);
static void
-falconsiena_intr_disable_unlocked(
+siena_intr_disable_unlocked(
__in efx_nic_t *enp);
static __checkReturn efx_rc_t
-falconsiena_intr_trigger(
+siena_intr_trigger(
__in efx_nic_t *enp,
__in unsigned int level);
static void
-falconsiena_intr_fini(
+siena_intr_fini(
__in efx_nic_t *enp);
static void
-falconsiena_intr_status_line(
+siena_intr_status_line(
__in efx_nic_t *enp,
__out boolean_t *fatalp,
__out uint32_t *qmaskp);
static void
-falconsiena_intr_status_message(
+siena_intr_status_message(
__in efx_nic_t *enp,
__in unsigned int message,
__out boolean_t *fatalp);
static void
-falconsiena_intr_fatal(
+siena_intr_fatal(
__in efx_nic_t *enp);
static __checkReturn boolean_t
-falconsiena_intr_check_fatal(
+siena_intr_check_fatal(
__in efx_nic_t *enp);
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
-
+#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_FALCON
-static efx_intr_ops_t __efx_intr_falcon_ops = {
- falconsiena_intr_init, /* eio_init */
- falconsiena_intr_enable, /* eio_enable */
- falconsiena_intr_disable, /* eio_disable */
- falconsiena_intr_disable_unlocked, /* eio_disable_unlocked */
- falconsiena_intr_trigger, /* eio_trigger */
- falconsiena_intr_status_line, /* eio_status_line */
- falconsiena_intr_status_message, /* eio_status_message */
- falconsiena_intr_fatal, /* eio_fatal */
- falconsiena_intr_fini, /* eio_fini */
-};
-#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
-static efx_intr_ops_t __efx_intr_siena_ops = {
- falconsiena_intr_init, /* eio_init */
- falconsiena_intr_enable, /* eio_enable */
- falconsiena_intr_disable, /* eio_disable */
- falconsiena_intr_disable_unlocked, /* eio_disable_unlocked */
- falconsiena_intr_trigger, /* eio_trigger */
- falconsiena_intr_status_line, /* eio_status_line */
- falconsiena_intr_status_message, /* eio_status_message */
- falconsiena_intr_fatal, /* eio_fatal */
- falconsiena_intr_fini, /* eio_fini */
+static const efx_intr_ops_t __efx_intr_siena_ops = {
+ siena_intr_init, /* eio_init */
+ siena_intr_enable, /* eio_enable */
+ siena_intr_disable, /* eio_disable */
+ siena_intr_disable_unlocked, /* eio_disable_unlocked */
+ siena_intr_trigger, /* eio_trigger */
+ siena_intr_status_line, /* eio_status_line */
+ siena_intr_status_message, /* eio_status_message */
+ siena_intr_fatal, /* eio_fatal */
+ siena_intr_fini, /* eio_fini */
};
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
-static efx_intr_ops_t __efx_intr_ef10_ops = {
+static const efx_intr_ops_t __efx_intr_ef10_ops = {
ef10_intr_init, /* eio_init */
ef10_intr_enable, /* eio_enable */
ef10_intr_disable, /* eio_disable */
@@ -137,7 +123,7 @@ efx_intr_init(
__in efsys_mem_t *esmp)
{
efx_intr_t *eip = &(enp->en_intr);
- efx_intr_ops_t *eiop;
+ const efx_intr_ops_t *eiop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -155,27 +141,21 @@ efx_intr_init(
enp->en_mod_flags |= EFX_MOD_INTR;
switch (enp->en_family) {
-#if EFSYS_OPT_FALCON
- case EFX_FAMILY_FALCON:
- eiop = (efx_intr_ops_t *)&__efx_intr_falcon_ops;
- break;
-#endif /* EFSYS_OPT_FALCON */
-
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
- eiop = (efx_intr_ops_t *)&__efx_intr_siena_ops;
+ eiop = &__efx_intr_siena_ops;
break;
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON
case EFX_FAMILY_HUNTINGTON:
- eiop = (efx_intr_ops_t *)&__efx_intr_ef10_ops;
+ eiop = &__efx_intr_ef10_ops;
break;
#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
- eiop = (efx_intr_ops_t *)&__efx_intr_ef10_ops;
+ eiop = &__efx_intr_ef10_ops;
break;
#endif /* EFSYS_OPT_MEDFORD */
@@ -207,7 +187,7 @@ efx_intr_fini(
__in efx_nic_t *enp)
{
efx_intr_t *eip = &(enp->en_intr);
- efx_intr_ops_t *eiop = eip->ei_eiop;
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
@@ -223,7 +203,7 @@ efx_intr_enable(
__in efx_nic_t *enp)
{
efx_intr_t *eip = &(enp->en_intr);
- efx_intr_ops_t *eiop = eip->ei_eiop;
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
@@ -236,7 +216,7 @@ efx_intr_disable(
__in efx_nic_t *enp)
{
efx_intr_t *eip = &(enp->en_intr);
- efx_intr_ops_t *eiop = eip->ei_eiop;
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
@@ -249,7 +229,7 @@ efx_intr_disable_unlocked(
__in efx_nic_t *enp)
{
efx_intr_t *eip = &(enp->en_intr);
- efx_intr_ops_t *eiop = eip->ei_eiop;
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
@@ -264,7 +244,7 @@ efx_intr_trigger(
__in unsigned int level)
{
efx_intr_t *eip = &(enp->en_intr);
- efx_intr_ops_t *eiop = eip->ei_eiop;
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
@@ -279,7 +259,7 @@ efx_intr_status_line(
__out uint32_t *qmaskp)
{
efx_intr_t *eip = &(enp->en_intr);
- efx_intr_ops_t *eiop = eip->ei_eiop;
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
@@ -294,7 +274,7 @@ efx_intr_status_message(
__out boolean_t *fatalp)
{
efx_intr_t *eip = &(enp->en_intr);
- efx_intr_ops_t *eiop = eip->ei_eiop;
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
@@ -307,7 +287,7 @@ efx_intr_fatal(
__in efx_nic_t *enp)
{
efx_intr_t *eip = &(enp->en_intr);
- efx_intr_ops_t *eiop = eip->ei_eiop;
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
@@ -320,10 +300,10 @@ efx_intr_fatal(
/* ************************************************************************* */
/* ************************************************************************* */
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
+#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_intr_init(
+siena_intr_init(
__in efx_nic_t *enp,
__in efx_intr_type_t type,
__in efsys_mem_t *esmp)
@@ -364,7 +344,7 @@ falconsiena_intr_init(
}
static void
-falconsiena_intr_enable(
+siena_intr_enable(
__in efx_nic_t *enp)
{
efx_intr_t *eip = &(enp->en_intr);
@@ -378,7 +358,7 @@ falconsiena_intr_enable(
}
static void
-falconsiena_intr_disable(
+siena_intr_disable(
__in efx_nic_t *enp)
{
efx_oword_t oword;
@@ -391,7 +371,7 @@ falconsiena_intr_disable(
}
static void
-falconsiena_intr_disable_unlocked(
+siena_intr_disable_unlocked(
__in efx_nic_t *enp)
{
efx_oword_t oword;
@@ -404,7 +384,7 @@ falconsiena_intr_disable_unlocked(
}
static __checkReturn efx_rc_t
-falconsiena_intr_trigger(
+siena_intr_trigger(
__in efx_nic_t *enp,
__in unsigned int level)
{
@@ -417,24 +397,9 @@ falconsiena_intr_trigger(
/* bug16757: No event queues can be initialized */
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
- switch (enp->en_family) {
- case EFX_FAMILY_FALCON:
- if (level >= EFX_NINTR_FALCON) {
- rc = EINVAL;
- goto fail1;
- }
- break;
-
- case EFX_FAMILY_SIENA:
- if (level >= EFX_NINTR_SIENA) {
- rc = EINVAL;
- goto fail1;
- }
- break;
-
- default:
- EFSYS_ASSERT(B_FALSE);
- break;
+ if (level >= EFX_NINTR_SIENA) {
+ rc = EINVAL;
+ goto fail1;
}
if (level > EFX_MASK32(FRF_AZ_KER_INT_LEVE_SEL))
@@ -472,7 +437,7 @@ fail1:
}
static __checkReturn boolean_t
-falconsiena_intr_check_fatal(
+siena_intr_check_fatal(
__in efx_nic_t *enp)
{
efx_intr_t *eip = &(enp->en_intr);
@@ -496,7 +461,7 @@ falconsiena_intr_check_fatal(
}
static void
-falconsiena_intr_status_line(
+siena_intr_status_line(
__in efx_nic_t *enp,
__out boolean_t *fatalp,
__out uint32_t *qmaskp)
@@ -517,13 +482,13 @@ falconsiena_intr_status_line(
EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
if (*qmaskp & (1U << eip->ei_level))
- *fatalp = falconsiena_intr_check_fatal(enp);
+ *fatalp = siena_intr_check_fatal(enp);
else
*fatalp = B_FALSE;
}
static void
-falconsiena_intr_status_message(
+siena_intr_status_message(
__in efx_nic_t *enp,
__in unsigned int message,
__out boolean_t *fatalp)
@@ -534,14 +499,14 @@ falconsiena_intr_status_message(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
if (message == eip->ei_level)
- *fatalp = falconsiena_intr_check_fatal(enp);
+ *fatalp = siena_intr_check_fatal(enp);
else
*fatalp = B_FALSE;
}
static void
-falconsiena_intr_fatal(
+siena_intr_fatal(
__in efx_nic_t *enp)
{
#if EFSYS_OPT_DECODE_INTR_FATAL
@@ -597,7 +562,7 @@ falconsiena_intr_fatal(
}
static void
-falconsiena_intr_fini(
+siena_intr_fini(
__in efx_nic_t *enp)
{
efx_oword_t oword;
@@ -607,4 +572,4 @@ falconsiena_intr_fini(
EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
}
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
+#endif /* EFSYS_OPT_SIENA */
diff --git a/sys/dev/sfxge/common/efx_lic.c b/sys/dev/sfxge/common/efx_lic.c
index 33c8aba..127ead95 100644
--- a/sys/dev/sfxge/common/efx_lic.c
+++ b/sys/dev/sfxge/common/efx_lic.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,6 +36,104 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_LICENSING
+#include "ef10_tlv_layout.h"
+
+#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
+
+
#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
@@ -47,11 +145,20 @@ efx_mcdi_fc_license_get_key_stats(
__in efx_nic_t *enp,
__out efx_key_stats_t *eksp);
-static efx_lic_ops_t __efx_lic_v1_ops = {
+static const efx_lic_ops_t __efx_lic_v1_ops = {
efx_mcdi_fc_license_update_license, /* elo_update_licenses */
efx_mcdi_fc_license_get_key_stats, /* elo_get_key_stats */
NULL, /* elo_app_state */
NULL, /* elo_get_id */
+ efx_lic_v1v2_find_start, /* elo_find_start */
+ efx_lic_v1v2_find_end, /* elo_find_end */
+ efx_lic_v1v2_find_key, /* elo_find_key */
+ efx_lic_v1v2_validate_key, /* elo_validate_key */
+ efx_lic_v1v2_read_key, /* elo_read_key */
+ efx_lic_v1v2_write_key, /* elo_write_key */
+ efx_lic_v1v2_delete_key, /* elo_delete_key */
+ efx_lic_v1v2_create_partition, /* elo_create_partition */
+ efx_lic_v1v2_finish_partition, /* elo_finish_partition */
};
#endif /* EFSYS_OPT_SIENA */
@@ -73,11 +180,20 @@ efx_mcdi_licensed_app_state(
__in uint64_t app_id,
__out boolean_t *licensedp);
-static efx_lic_ops_t __efx_lic_v2_ops = {
+static const efx_lic_ops_t __efx_lic_v2_ops = {
efx_mcdi_licensing_update_licenses, /* elo_update_licenses */
efx_mcdi_licensing_get_key_stats, /* elo_get_key_stats */
efx_mcdi_licensed_app_state, /* elo_app_state */
NULL, /* elo_get_id */
+ efx_lic_v1v2_find_start, /* elo_find_start */
+ efx_lic_v1v2_find_end, /* elo_find_end */
+ efx_lic_v1v2_find_key, /* elo_find_key */
+ efx_lic_v1v2_validate_key, /* elo_validate_key */
+ efx_lic_v1v2_read_key, /* elo_read_key */
+ efx_lic_v1v2_write_key, /* elo_write_key */
+ efx_lic_v1v2_delete_key, /* elo_delete_key */
+ efx_lic_v1v2_create_partition, /* elo_create_partition */
+ efx_lic_v1v2_finish_partition, /* elo_finish_partition */
};
#endif /* EFSYS_OPT_HUNTINGTON */
@@ -108,11 +224,111 @@ efx_mcdi_licensing_v3_get_id(
__out_bcount_part_opt(buffer_size, *lengthp)
uint8_t *bufferp);
-static efx_lic_ops_t __efx_lic_v3_ops = {
+ __checkReturn efx_rc_t
+efx_lic_v3_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+static const efx_lic_ops_t __efx_lic_v3_ops = {
efx_mcdi_licensing_v3_update_licenses, /* elo_update_licenses */
efx_mcdi_licensing_v3_report_license, /* elo_get_key_stats */
efx_mcdi_licensing_v3_app_state, /* elo_app_state */
efx_mcdi_licensing_v3_get_id, /* elo_get_id */
+ efx_lic_v3_find_start, /* elo_find_start*/
+ efx_lic_v3_find_end, /* elo_find_end */
+ efx_lic_v3_find_key, /* elo_find_key */
+ efx_lic_v3_validate_key, /* elo_validate_key */
+ efx_lic_v3_read_key, /* elo_read_key */
+ efx_lic_v3_write_key, /* elo_write_key */
+ efx_lic_v3_delete_key, /* elo_delete_key */
+ efx_lic_v3_create_partition, /* elo_create_partition */
+ efx_lic_v3_finish_partition, /* elo_finish_partition */
};
#endif /* EFSYS_OPT_MEDFORD */
@@ -133,12 +349,15 @@ efx_mcdi_fc_license_update_license(
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
(void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_FC_OP_LICENSE;
+ req.emr_cmd = MC_CMD_FC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
req.emr_out_buf = payload;
req.emr_out_length = 0;
+ MCDI_IN_SET_DWORD(req, FC_IN_CMD,
+ MC_CMD_FC_OP_LICENSE);
+
MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP,
MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE);
@@ -177,16 +396,19 @@ efx_mcdi_fc_license_get_key_stats(
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
(void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_FC_OP_LICENSE;
+ req.emr_cmd = MC_CMD_FC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_FC_OUT_LICENSE_LEN;
+ MCDI_IN_SET_DWORD(req, FC_IN_CMD,
+ MC_CMD_FC_OP_LICENSE);
+
MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP,
MC_CMD_FC_IN_LICENSE_GET_KEY_STATS);
- efx_mcdi_execute(enp, &req);
+ efx_mcdi_execute_quiet(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
@@ -223,6 +445,267 @@ fail1:
#endif /* EFSYS_OPT_SIENA */
+/* V1 and V2 Partition format - based on a 16-bit TLV format */
+
+#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
+
+/*
+ * V1/V2 format - defined in SF-108542-TC section 4.2:
+ * Type (T): 16bit - revision/HMAC algorithm
+ * Length (L): 16bit - value length in bytes
+ * Value (V): L bytes - payload
+ */
+#define EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX (256)
+#define EFX_LICENSE_V1V2_HEADER_LENGTH (2*sizeof(uint16_t))
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ *startp = 0;
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ *endp = offset + EFX_LICENSE_V1V2_HEADER_LENGTH;
+ return (0);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ boolean_t found;
+ uint16_t tlv_type;
+ uint16_t tlv_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if((size_t)buffer_size - offset < EFX_LICENSE_V1V2_HEADER_LENGTH)
+ goto fail1;
+
+ tlv_type = __LE_TO_CPU_16(((uint16_t*)&bufferp[offset])[0]);
+ tlv_length = __LE_TO_CPU_16(((uint16_t*)&bufferp[offset])[1]);
+ if ((tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) ||
+ (tlv_type == 0 && tlv_length == 0)) {
+ found = B_FALSE;
+ } else {
+ *startp = offset;
+ *lengthp = tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH;
+ found = B_TRUE;
+ }
+ return (found);
+
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+ uint16_t tlv_type;
+ uint16_t tlv_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (length < EFX_LICENSE_V1V2_HEADER_LENGTH) {
+ goto fail1;
+ }
+
+ tlv_type = __LE_TO_CPU_16(((uint16_t*)keyp)[0]);
+ tlv_length = __LE_TO_CPU_16(((uint16_t*)keyp)[1]);
+
+ if(tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) {
+ goto fail2;
+ }
+ if (tlv_type == 0) {
+ goto fail3;
+ }
+ if ((tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH) != length) {
+ goto fail4;
+ }
+
+ return (B_TRUE);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
+ EFX_LICENSE_V1V2_HEADER_LENGTH));
+
+ if (key_max_size < length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ memcpy(keyp, &bufferp[offset], length);
+
+ *lengthp = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
+ EFX_LICENSE_V1V2_HEADER_LENGTH));
+
+ // Ensure space for terminator remains
+ if ((offset + length) >
+ (buffer_size - EFX_LICENSE_V1V2_HEADER_LENGTH) ) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ memcpy(bufferp + offset, keyp, length);
+
+ *lengthp = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ efx_rc_t rc;
+ uint32_t move_start = offset + length;
+ uint32_t move_length = end - move_start;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(end <= buffer_size);
+
+ // Shift everything after the key down
+ memmove(bufferp + offset, bufferp + move_start, move_length);
+
+ *deltap = length;
+
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(EFX_LICENSE_V1V2_HEADER_LENGTH <= buffer_size);
+
+ // Write terminator
+ memset(bufferp, '\0', EFX_LICENSE_V1V2_HEADER_LENGTH);
+ return (0);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
+
+
/* V2 Licensing - used by Huntington family only. See SF-113611-TC */
#if EFSYS_OPT_HUNTINGTON
@@ -452,7 +935,7 @@ efx_mcdi_licensing_v3_report_license(
MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP,
MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
- efx_mcdi_execute(enp, &req);
+ efx_mcdi_execute_quiet(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
@@ -575,11 +1058,11 @@ efx_mcdi_licensing_v3_get_id(
req.emr_in_buf = bufferp;
req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
req.emr_out_buf = bufferp;
- req.emr_out_length = MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN);
+ req.emr_out_length = MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX);
(void) memset(bufferp, 0, req.emr_out_length);
}
- efx_mcdi_execute(enp, &req);
+ efx_mcdi_execute_quiet(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
@@ -617,6 +1100,228 @@ fail1:
return (rc);
}
+/* V3 format uses Huntington TLV format partition. See SF-108797-SW */
+#define EFX_LICENSE_V3_KEY_LENGTH_MIN (64)
+#define EFX_LICENSE_V3_KEY_LENGTH_MAX (160)
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_item_start(bufferp, buffer_size, startp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_end(bufferp, buffer_size, offset, endp);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_item(bufferp, buffer_size,
+ offset, startp, lengthp);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ // Check key is a valid V3 key
+ efx_rc_t rc;
+ uint8_t key_type;
+ uint8_t key_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (length < EFX_LICENSE_V3_KEY_LENGTH_MIN) {
+ goto fail1;
+ }
+
+ if (length > EFX_LICENSE_V3_KEY_LENGTH_MAX) {
+ goto fail2;
+ }
+
+ key_type = ((uint8_t*)keyp)[0];
+ key_length = ((uint8_t*)keyp)[1];
+
+ if (key_type < 3) {
+ goto fail3;
+ }
+ if (key_length > length) {
+ goto fail4;
+ }
+ return (B_TRUE);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_get_item(bufferp, buffer_size,
+ offset, length, keyp, key_max_size, lengthp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= EFX_LICENSE_V3_KEY_LENGTH_MAX);
+
+ return ef10_nvram_buffer_insert_item(bufferp, buffer_size,
+ offset, keyp, length, lengthp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((rc = ef10_nvram_buffer_delete_item(bufferp,
+ buffer_size, offset, length, end)) != 0) {
+ goto fail1;
+ }
+
+ *deltap = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ efx_rc_t rc;
+
+ // Construct empty partition
+ if ((rc = ef10_nvram_buffer_create(enp,
+ NVRAM_PARTITION_TYPE_LICENSE,
+ bufferp, buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ efx_rc_t rc;
+
+ if ((rc = ef10_nvram_buffer_finish(bufferp,
+ buffer_size)) != 0) {
+ goto fail1;
+ }
+
+ // Validate completed partition
+ if ((rc = ef10_nvram_buffer_validate(enp, NVRAM_PARTITION_TYPE_LICENSE,
+ bufferp, buffer_size)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
#endif /* EFSYS_OPT_MEDFORD */
@@ -624,7 +1329,8 @@ fail1:
efx_lic_init(
__in efx_nic_t *enp)
{
- efx_lic_ops_t *elop;
+ const efx_lic_ops_t *elop;
+ efx_key_stats_t eks;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -635,19 +1341,19 @@ efx_lic_init(
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
- elop = (efx_lic_ops_t *)&__efx_lic_v1_ops;
+ elop = &__efx_lic_v1_ops;
break;
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON
case EFX_FAMILY_HUNTINGTON:
- elop = (efx_lic_ops_t *)&__efx_lic_v2_ops;
+ elop = &__efx_lic_v2_ops;
break;
#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
- elop = (efx_lic_ops_t *)&__efx_lic_v3_ops;
+ elop = &__efx_lic_v3_ops;
break;
#endif /* EFSYS_OPT_MEDFORD */
@@ -660,6 +1366,13 @@ efx_lic_init(
enp->en_elop = elop;
enp->en_mod_flags |= EFX_MOD_LIC;
+ /* Probe for support */
+ if (efx_lic_get_key_stats(enp, &eks) == 0) {
+ enp->en_licensing_supported = B_TRUE;
+ } else {
+ enp->en_licensing_supported = B_FALSE;
+ }
+
return (0);
fail1:
@@ -668,11 +1381,22 @@ fail1:
return (rc);
}
+extern __checkReturn boolean_t
+efx_lic_check_support(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ return enp->en_licensing_supported;
+}
+
void
efx_lic_fini(
__in efx_nic_t *enp)
{
- efx_lic_ops_t *elop = enp->en_elop;
+ const efx_lic_ops_t *elop = enp->en_elop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
@@ -687,7 +1411,7 @@ efx_lic_fini(
efx_lic_update_licenses(
__in efx_nic_t *enp)
{
- efx_lic_ops_t *elop = enp->en_elop;
+ const efx_lic_ops_t *elop = enp->en_elop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -709,7 +1433,7 @@ efx_lic_get_key_stats(
__in efx_nic_t *enp,
__out efx_key_stats_t *eksp)
{
- efx_lic_ops_t *elop = enp->en_elop;
+ const efx_lic_ops_t *elop = enp->en_elop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -732,23 +1456,20 @@ efx_lic_app_state(
__in uint64_t app_id,
__out boolean_t *licensedp)
{
- efx_lic_ops_t *elop = enp->en_elop;
+ const efx_lic_ops_t *elop = enp->en_elop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
- if (elop->elo_app_state == NULL) {
- rc = ENOTSUP;
- goto fail1;
- }
+ if (elop->elo_app_state == NULL)
+ return (ENOTSUP);
+
if ((rc = elop->elo_app_state(enp, app_id, licensedp)) != 0)
- goto fail2;
+ goto fail1;
return (0);
-fail2:
- EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -764,25 +1485,274 @@ efx_lic_get_id(
__out_opt uint8_t *bufferp
)
{
- efx_lic_ops_t *elop = enp->en_elop;
+ const efx_lic_ops_t *elop = enp->en_elop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
- if (elop->elo_get_id == NULL) {
- rc = ENOTSUP;
- goto fail1;
- }
+ if (elop->elo_get_id == NULL)
+ return (ENOTSUP);
if ((rc = elop->elo_get_id(enp, buffer_size, typep,
lengthp, bufferp)) != 0)
- goto fail2;
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Buffer management API - abstracts varying TLV format used for License partition */
+
+ __checkReturn efx_rc_t
+efx_lic_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_find_start(enp, bufferp, buffer_size, startp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_find_end(enp, bufferp, buffer_size, offset, endp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ boolean_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ EFSYS_ASSERT(bufferp);
+ EFSYS_ASSERT(startp);
+ EFSYS_ASSERT(lengthp);
+
+ return (elop->elo_find_key(enp, bufferp, buffer_size, offset,
+ startp, lengthp));
+}
+
+
+/* Validate that the buffer contains a single key in a recognised format.
+** An empty or terminator buffer is not accepted as a valid key.
+*/
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ boolean_t rc;
+ uint16_t tlv_type;
+ uint16_t tlv_length;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_validate_key(enp, keyp, length)) == B_FALSE)
+ goto fail1;
+
+ return (B_TRUE);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_read_key(enp, bufferp, buffer_size, offset,
+ length, keyp, key_max_size, lengthp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_write_key(enp, bufferp, buffer_size, offset,
+ keyp, length, lengthp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_delete_key(enp, bufferp, buffer_size, offset,
+ length, end, deltap)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_create_partition(enp, bufferp, buffer_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_finish_partition(enp, bufferp, buffer_size)) != 0)
+ goto fail1;
return (0);
-fail2:
- EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
diff --git a/sys/dev/sfxge/common/efx_mac.c b/sys/dev/sfxge/common/efx_mac.c
index 4868c4b..0b17351 100644
--- a/sys/dev/sfxge/common/efx_mac.c
+++ b/sys/dev/sfxge/common/efx_mac.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,75 +34,23 @@ __FBSDID("$FreeBSD$");
#include "efx.h"
#include "efx_impl.h"
-#if EFSYS_OPT_MAC_FALCON_GMAC
-#include "falcon_gmac.h"
-#endif
-
-#if EFSYS_OPT_MAC_FALCON_XMAC
-#include "falcon_xmac.h"
-#endif
-
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
+#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_mac_multicast_list_set(
+siena_mac_multicast_list_set(
__in efx_nic_t *enp);
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
-
-#if EFSYS_OPT_MAC_FALCON_GMAC
-static efx_mac_ops_t __efx_falcon_gmac_ops = {
- falcon_gmac_reset, /* emo_reset */
- falcon_mac_poll, /* emo_poll */
- falcon_mac_up, /* emo_up */
- falcon_gmac_reconfigure, /* emo_addr_set */
- falcon_gmac_reconfigure, /* emo_pdu_set */
- falcon_gmac_reconfigure, /* emo_reconfigure */
- falconsiena_mac_multicast_list_set, /* emo_multicast_list_set */
- NULL, /* emo_filter_set_default_rxq */
- NULL, /* emo_filter_default_rxq_clear */
-#if EFSYS_OPT_LOOPBACK
- falcon_mac_loopback_set, /* emo_loopback_set */
-#endif /* EFSYS_OPT_LOOPBACK */
-#if EFSYS_OPT_MAC_STATS
- falcon_mac_stats_upload, /* emo_stats_upload */
- NULL, /* emo_stats_periodic */
- falcon_gmac_stats_update /* emo_stats_update */
-#endif /* EFSYS_OPT_MAC_STATS */
-};
-#endif /* EFSYS_OPT_MAC_FALCON_GMAC */
-
-#if EFSYS_OPT_MAC_FALCON_XMAC
-static efx_mac_ops_t __efx_falcon_xmac_ops = {
- falcon_xmac_reset, /* emo_reset */
- falcon_mac_poll, /* emo_poll */
- falcon_mac_up, /* emo_up */
- falcon_xmac_reconfigure, /* emo_addr_set */
- falcon_xmac_reconfigure, /* emo_pdu_set */
- falcon_xmac_reconfigure, /* emo_reconfigure */
- falconsiena_mac_multicast_list_set, /* emo_multicast_list_set */
- NULL, /* emo_filter_set_default_rxq */
- NULL, /* emo_filter_default_rxq_clear */
-#if EFSYS_OPT_LOOPBACK
- falcon_mac_loopback_set, /* emo_loopback_set */
-#endif /* EFSYS_OPT_LOOPBACK */
-#if EFSYS_OPT_MAC_STATS
- falcon_mac_stats_upload, /* emo_stats_upload */
- NULL, /* emo_stats_periodic */
- falcon_xmac_stats_update /* emo_stats_update */
-#endif /* EFSYS_OPT_MAC_STATS */
-};
-#endif /* EFSYS_OPT_MAC_FALCON_XMAC */
+#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_SIENA
-static efx_mac_ops_t __efx_siena_mac_ops = {
- NULL, /* emo_reset */
+static const efx_mac_ops_t __efx_siena_mac_ops = {
siena_mac_poll, /* emo_poll */
siena_mac_up, /* emo_up */
siena_mac_reconfigure, /* emo_addr_set */
siena_mac_reconfigure, /* emo_pdu_set */
+ siena_mac_pdu_get, /* emo_pdu_get */
siena_mac_reconfigure, /* emo_reconfigure */
- falconsiena_mac_multicast_list_set, /* emo_multicast_list_set */
+ siena_mac_multicast_list_set, /* emo_multicast_list_set */
NULL, /* emo_filter_set_default_rxq */
NULL, /* emo_filter_default_rxq_clear */
#if EFSYS_OPT_LOOPBACK
@@ -117,12 +65,12 @@ static efx_mac_ops_t __efx_siena_mac_ops = {
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
-static efx_mac_ops_t __efx_ef10_mac_ops = {
- NULL, /* emo_reset */
+static const efx_mac_ops_t __efx_ef10_mac_ops = {
ef10_mac_poll, /* emo_poll */
ef10_mac_up, /* emo_up */
ef10_mac_addr_set, /* emo_addr_set */
ef10_mac_pdu_set, /* emo_pdu_set */
+ ef10_mac_pdu_get, /* emo_pdu_get */
ef10_mac_reconfigure, /* emo_reconfigure */
ef10_mac_multicast_list_set, /* emo_multicast_list_set */
ef10_mac_filter_default_rxq_set, /* emo_filter_default_rxq_set */
@@ -139,48 +87,13 @@ static efx_mac_ops_t __efx_ef10_mac_ops = {
};
#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
-static efx_mac_ops_t *__efx_mac_ops[] = {
- /* [EFX_MAC_INVALID] */
- NULL,
- /* [EFX_MAC_FALCON_GMAC] */
-#if EFSYS_OPT_MAC_FALCON_GMAC
- &__efx_falcon_gmac_ops,
-#else
- NULL,
-#endif
- /* [EFX_MAC_FALCON_XMAC] */
-#if EFSYS_OPT_MAC_FALCON_XMAC
- &__efx_falcon_xmac_ops,
-#else
- NULL,
-#endif
- /* [EFX_MAC_SIENA] */
-#if EFSYS_OPT_SIENA
- &__efx_siena_mac_ops,
-#else
- NULL,
-#endif
- /* [EFX_MAC_HUNTINGTON] */
-#if EFSYS_OPT_HUNTINGTON
- &__efx_ef10_mac_ops,
-#else
- NULL,
-#endif
- /* [EFX_MAC_MEDFORD] */
-#if EFSYS_OPT_MEDFORD
- &__efx_ef10_mac_ops,
-#else
- NULL,
-#endif
-};
-
__checkReturn efx_rc_t
efx_mac_pdu_set(
__in efx_nic_t *enp,
__in size_t pdu)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
uint32_t old_pdu;
efx_rc_t rc;
@@ -218,13 +131,33 @@ fail1:
return (rc);
}
+ __checkReturn efx_rc_t
+efx_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ if ((rc = emop->emo_pdu_get(enp, pdu)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
__checkReturn efx_rc_t
efx_mac_addr_set(
__in efx_nic_t *enp,
__in uint8_t *addr)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
uint8_t old_addr[6];
uint32_t oui;
efx_rc_t rc;
@@ -272,7 +205,7 @@ efx_mac_filter_set(
__in boolean_t brdcst)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
boolean_t old_all_unicst;
boolean_t old_mulcst;
boolean_t old_all_mulcst;
@@ -314,7 +247,7 @@ efx_mac_drain(
__in boolean_t enabled)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -326,21 +259,11 @@ efx_mac_drain(
epp->ep_mac_drain = enabled;
- if (enabled && emop->emo_reset != NULL) {
- if ((rc = emop->emo_reset(enp)) != 0)
- goto fail1;
-
- EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_MAC);
- enp->en_reset_flags &= ~EFX_RESET_PHY;
- }
-
if ((rc = emop->emo_reconfigure(enp)) != 0)
- goto fail2;
+ goto fail1;
return (0);
-fail2:
- EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -353,7 +276,7 @@ efx_mac_up(
__out boolean_t *mac_upp)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -377,8 +300,8 @@ efx_mac_fcntl_set(
__in boolean_t autoneg)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
- efx_phy_ops_t *epop = epp->ep_epop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
unsigned int old_fcntl;
boolean_t old_autoneg;
unsigned int old_adv_cap;
@@ -477,7 +400,7 @@ efx_mac_multicast_list_set(
__in int count)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
uint8_t *old_mulcst_addr_list = NULL;
uint32_t old_mulcst_addr_count;
efx_rc_t rc;
@@ -553,7 +476,7 @@ efx_mac_filter_default_rxq_set(
__in boolean_t using_rss)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -578,7 +501,7 @@ efx_mac_filter_default_rxq_clear(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
@@ -698,7 +621,7 @@ efx_mac_stats_upload(
__in efsys_mem_t *esmp)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -731,7 +654,7 @@ efx_mac_stats_periodic(
__in boolean_t events)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -766,7 +689,7 @@ efx_mac_stats_update(
__inout_opt uint32_t *generationp)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -788,85 +711,43 @@ efx_mac_select(
{
efx_port_t *epp = &(enp->en_port);
efx_mac_type_t type = EFX_MAC_INVALID;
- efx_mac_ops_t *emop;
+ const efx_mac_ops_t *emop;
int rc = EINVAL;
+ switch (enp->en_family) {
#if EFSYS_OPT_SIENA
- if (enp->en_family == EFX_FAMILY_SIENA) {
+ case EFX_FAMILY_SIENA:
+ emop = &__efx_siena_mac_ops;
type = EFX_MAC_SIENA;
- goto chosen;
- }
-#endif
+ break;
+#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON
- if (enp->en_family == EFX_FAMILY_HUNTINGTON) {
+ case EFX_FAMILY_HUNTINGTON:
+ emop = &__efx_ef10_mac_ops;
type = EFX_MAC_HUNTINGTON;
- goto chosen;
- }
-#endif
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_MEDFORD
- if (enp->en_family == EFX_FAMILY_MEDFORD) {
+ case EFX_FAMILY_MEDFORD:
+ emop = &__efx_ef10_mac_ops;
type = EFX_MAC_MEDFORD;
- goto chosen;
- }
-#endif
-
-#if EFSYS_OPT_FALCON
- switch (epp->ep_link_mode) {
-#if EFSYS_OPT_MAC_FALCON_GMAC
- case EFX_LINK_100HDX:
- case EFX_LINK_100FDX:
- case EFX_LINK_1000HDX:
- case EFX_LINK_1000FDX:
- type = EFX_MAC_FALCON_GMAC;
- goto chosen;
-#endif /* EFSYS_OPT_FALCON_GMAC */
-
-#if EFSYS_OPT_MAC_FALCON_XMAC
- case EFX_LINK_10000FDX:
- type = EFX_MAC_FALCON_XMAC;
- goto chosen;
-#endif /* EFSYS_OPT_FALCON_XMAC */
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
default:
-#if EFSYS_OPT_MAC_FALCON_GMAC && EFSYS_OPT_MAC_FALCON_XMAC
- /* Only initialise a MAC supported by the PHY */
- if (epp->ep_phy_cap_mask &
- ((1 << EFX_PHY_CAP_1000FDX) |
- (1 << EFX_PHY_CAP_1000HDX) |
- (1 << EFX_PHY_CAP_100FDX) |
- (1 << EFX_PHY_CAP_100HDX) |
- (1 << EFX_PHY_CAP_10FDX) |
- (1 << EFX_PHY_CAP_10FDX)))
- type = EFX_MAC_FALCON_GMAC;
- else
- type = EFX_MAC_FALCON_XMAC;
-#elif EFSYS_OPT_MAC_FALCON_GMAC
- type = EFX_MAC_FALCON_GMAC;
-#else
- type = EFX_MAC_FALCON_XMAC;
-#endif
- goto chosen;
+ rc = EINVAL;
+ goto fail1;
}
-#endif /* EFSYS_OPT_FALCON */
-chosen:
EFSYS_ASSERT(type != EFX_MAC_INVALID);
EFSYS_ASSERT3U(type, <, EFX_MAC_NTYPES);
- emop = epp->ep_emop = (efx_mac_ops_t *)__efx_mac_ops[type];
EFSYS_ASSERT(emop != NULL);
+ epp->ep_emop = emop;
epp->ep_mac_type = type;
- if (emop->emo_reset != NULL) {
- if ((rc = emop->emo_reset(enp)) != 0)
- goto fail1;
-
- EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_MAC);
- enp->en_reset_flags &= ~EFX_RESET_MAC;
- }
-
return (0);
fail1:
@@ -876,13 +757,13 @@ fail1:
}
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
+#if EFSYS_OPT_SIENA
#define EFX_MAC_HASH_BITS (1 << 8)
/* Compute the multicast hash as used on Falcon and Siena. */
static void
-falconsiena_mac_multicast_hash_compute(
+siena_mac_multicast_hash_compute(
__in_ecount(6*count) uint8_t const *addrs,
__in int count,
__out efx_oword_t *hash_low,
@@ -912,11 +793,11 @@ falconsiena_mac_multicast_hash_compute(
}
static __checkReturn efx_rc_t
-falconsiena_mac_multicast_list_set(
+siena_mac_multicast_list_set(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
efx_oword_t old_hash[2];
efx_rc_t rc;
@@ -925,10 +806,11 @@ falconsiena_mac_multicast_list_set(
memcpy(old_hash, epp->ep_multicst_hash, sizeof (old_hash));
- falconsiena_mac_multicast_hash_compute(epp->ep_mulcst_addr_list,
- epp->ep_mulcst_addr_count,
- &epp->ep_multicst_hash[0],
- &epp->ep_multicst_hash[1]);
+ siena_mac_multicast_hash_compute(
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count,
+ &epp->ep_multicst_hash[0],
+ &epp->ep_multicst_hash[1]);
if ((rc = emop->emo_reconfigure(enp)) != 0)
goto fail1;
@@ -943,4 +825,4 @@ fail1:
return (rc);
}
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
+#endif /* EFSYS_OPT_SIENA */
diff --git a/sys/dev/sfxge/common/efx_mcdi.c b/sys/dev/sfxge/common/efx_mcdi.c
index bb9d9cf..e7e5f94 100644
--- a/sys/dev/sfxge/common/efx_mcdi.c
+++ b/sys/dev/sfxge/common/efx_mcdi.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2008-2015 Solarflare Communications Inc.
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -59,7 +59,7 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_SIENA
-static efx_mcdi_ops_t __efx_mcdi_siena_ops = {
+static const efx_mcdi_ops_t __efx_mcdi_siena_ops = {
siena_mcdi_init, /* emco_init */
siena_mcdi_send_request, /* emco_send_request */
siena_mcdi_poll_reboot, /* emco_poll_reboot */
@@ -73,7 +73,7 @@ static efx_mcdi_ops_t __efx_mcdi_siena_ops = {
#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
-static efx_mcdi_ops_t __efx_mcdi_ef10_ops = {
+static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = {
ef10_mcdi_init, /* emco_init */
ef10_mcdi_send_request, /* emco_send_request */
ef10_mcdi_poll_reboot, /* emco_poll_reboot */
@@ -92,35 +92,28 @@ efx_mcdi_init(
__in efx_nic_t *enp,
__in const efx_mcdi_transport_t *emtp)
{
- efx_mcdi_ops_t *emcop;
+ const efx_mcdi_ops_t *emcop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
switch (enp->en_family) {
-#if EFSYS_OPT_FALCON
- case EFX_FAMILY_FALCON:
- emcop = NULL;
- emtp = NULL;
- break;
-#endif /* EFSYS_OPT_FALCON */
-
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
- emcop = (efx_mcdi_ops_t *)&__efx_mcdi_siena_ops;
+ emcop = &__efx_mcdi_siena_ops;
break;
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON
case EFX_FAMILY_HUNTINGTON:
- emcop = (efx_mcdi_ops_t *)&__efx_mcdi_ef10_ops;
+ emcop = &__efx_mcdi_ef10_ops;
break;
#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
- emcop = (efx_mcdi_ops_t *)&__efx_mcdi_ef10_ops;
+ emcop = &__efx_mcdi_ef10_ops;
break;
#endif /* EFSYS_OPT_MEDFORD */
@@ -168,7 +161,7 @@ efx_mcdi_fini(
__in efx_nic_t *enp)
{
efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
- efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI);
@@ -204,7 +197,7 @@ efx_mcdi_send_request(
__in void *sdup,
__in size_t sdu_len)
{
- efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len);
}
@@ -213,7 +206,7 @@ static efx_rc_t
efx_mcdi_poll_reboot(
__in efx_nic_t *enp)
{
- efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
efx_rc_t rc;
rc = emcop->emco_poll_reboot(enp);
@@ -224,7 +217,7 @@ static boolean_t
efx_mcdi_poll_response(
__in efx_nic_t *enp)
{
- efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
boolean_t available;
available = emcop->emco_poll_response(enp);
@@ -238,7 +231,7 @@ efx_mcdi_read_response(
__in size_t offset,
__in size_t length)
{
- efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
emcop->emco_read_response(enp, bufferp, offset, length);
}
@@ -526,6 +519,11 @@ efx_mcdi_request_poll(
if ((rc = efx_mcdi_poll_reboot(enp)) != 0) {
emip->emi_pending_req = NULL;
EFSYS_UNLOCK(enp->en_eslp, state);
+
+ /* Reboot/Assertion */
+ if (rc == EIO || rc == EINTR)
+ efx_mcdi_raise_exception(enp, emrp, rc);
+
goto fail1;
}
}
@@ -542,6 +540,9 @@ efx_mcdi_request_poll(
/* Request complete */
emip->emi_pending_req = NULL;
+ /* Ensure stale MCDI requests fail after an MC reboot. */
+ emip->emi_new_epoch = B_FALSE;
+
EFSYS_UNLOCK(enp->en_eslp, state);
if ((rc = emrp->emr_rc) != 0)
@@ -557,10 +558,6 @@ fail1:
if (!emrp->emr_quiet)
EFSYS_PROBE1(fail1, efx_rc_t, rc);
- /* Reboot/Assertion */
- if (rc == EIO || rc == EINTR)
- efx_mcdi_raise_exception(enp, emrp, rc);
-
return (B_TRUE);
}
@@ -639,6 +636,8 @@ efx_mcdi_request_errcode(
return (EALREADY);
/* MCDI v2 */
+ case MC_CMD_ERR_EEXIST:
+ return (EEXIST);
#ifdef MC_CMD_ERR_EAGAIN
case MC_CMD_ERR_EAGAIN:
return (EAGAIN);
@@ -1435,10 +1434,6 @@ efx_mcdi_get_phy_cfg(
(1 << EFX_PHY_LED_ON));
#endif /* EFSYS_OPT_PHY_LED_CONTROL */
-#if EFSYS_OPT_PHY_PROPS
- encp->enc_phy_nprops = 0;
-#endif /* EFSYS_OPT_PHY_PROPS */
-
/* Get the media type of the fixed port, if recognised. */
EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI);
EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4);
@@ -1497,7 +1492,7 @@ efx_mcdi_firmware_update_supported(
__in efx_nic_t *enp,
__out boolean_t *supportedp)
{
- efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
efx_rc_t rc;
if (emcop != NULL) {
@@ -1522,7 +1517,7 @@ efx_mcdi_macaddr_change_supported(
__in efx_nic_t *enp,
__out boolean_t *supportedp)
{
- efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
efx_rc_t rc;
if (emcop != NULL) {
@@ -1547,7 +1542,7 @@ efx_mcdi_link_control_supported(
__in efx_nic_t *enp,
__out boolean_t *supportedp)
{
- efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
efx_rc_t rc;
if (emcop != NULL) {
@@ -1572,7 +1567,7 @@ efx_mcdi_mac_spoofing_supported(
__in efx_nic_t *enp,
__out boolean_t *supportedp)
{
- efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
efx_rc_t rc;
if (emcop != NULL) {
diff --git a/sys/dev/sfxge/common/efx_mcdi.h b/sys/dev/sfxge/common/efx_mcdi.h
index a96bd77..ffa50f1 100644
--- a/sys/dev/sfxge/common/efx_mcdi.h
+++ b/sys/dev/sfxge/common/efx_mcdi.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/efx_mon.c b/sys/dev/sfxge/common/efx_mon.c
index 9803b43..13aedac 100644
--- a/sys/dev/sfxge/common/efx_mon.c
+++ b/sys/dev/sfxge/common/efx_mon.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,18 +34,6 @@ __FBSDID("$FreeBSD$");
#include "efx.h"
#include "efx_impl.h"
-#if EFSYS_OPT_MON_NULL
-#include "nullmon.h"
-#endif
-
-#if EFSYS_OPT_MON_LM87
-#include "lm87.h"
-#endif
-
-#if EFSYS_OPT_MON_MAX6647
-#include "max6647.h"
-#endif
-
#if EFSYS_OPT_MON_MCDI
#include "mcdi_mon.h"
#endif
@@ -54,11 +42,8 @@ __FBSDID("$FreeBSD$");
static const char *__efx_mon_name[] = {
"",
- "nullmon",
- "lm87",
- "max6647",
"sfx90x0",
- "sfx91x0"
+ "sfx91x0",
"sfx92x0"
};
@@ -77,40 +62,8 @@ efx_mon_name(
#endif /* EFSYS_OPT_NAMES */
-#if EFSYS_OPT_MON_NULL
-static efx_mon_ops_t __efx_mon_null_ops = {
- nullmon_reset, /* emo_reset */
- nullmon_reconfigure, /* emo_reconfigure */
-#if EFSYS_OPT_MON_STATS
- nullmon_stats_update /* emo_stats_update */
-#endif /* EFSYS_OPT_MON_STATS */
-};
-#endif
-
-#if EFSYS_OPT_MON_LM87
-static efx_mon_ops_t __efx_mon_lm87_ops = {
- lm87_reset, /* emo_reset */
- lm87_reconfigure, /* emo_reconfigure */
-#if EFSYS_OPT_MON_STATS
- lm87_stats_update /* emo_stats_update */
-#endif /* EFSYS_OPT_MON_STATS */
-};
-#endif
-
-#if EFSYS_OPT_MON_MAX6647
-static efx_mon_ops_t __efx_mon_max6647_ops = {
- max6647_reset, /* emo_reset */
- max6647_reconfigure, /* emo_reconfigure */
-#if EFSYS_OPT_MON_STATS
- max6647_stats_update /* emo_stats_update */
-#endif /* EFSYS_OPT_MON_STATS */
-};
-#endif
-
#if EFSYS_OPT_MON_MCDI
-static efx_mon_ops_t __efx_mon_mcdi_ops = {
- NULL, /* emo_reset */
- NULL, /* emo_reconfigure */
+static const efx_mon_ops_t __efx_mon_mcdi_ops = {
#if EFSYS_OPT_MON_STATS
mcdi_mon_stats_update /* emo_stats_update */
#endif /* EFSYS_OPT_MON_STATS */
@@ -124,7 +77,7 @@ efx_mon_init(
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mon_t *emp = &(enp->en_mon);
- efx_mon_ops_t *emop;
+ const efx_mon_ops_t *emop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -141,21 +94,6 @@ efx_mon_init(
EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
switch (emp->em_type) {
-#if EFSYS_OPT_MON_NULL
- case EFX_MON_NULL:
- emop = &__efx_mon_null_ops;
- break;
-#endif
-#if EFSYS_OPT_MON_LM87
- case EFX_MON_LM87:
- emop = &__efx_mon_lm87_ops;
- break;
-#endif
-#if EFSYS_OPT_MON_MAX6647
- case EFX_MON_MAX6647:
- emop = &__efx_mon_max6647_ops;
- break;
-#endif
#if EFSYS_OPT_MON_MCDI
case EFX_MON_SFC90X0:
case EFX_MON_SFC91X0:
@@ -168,29 +106,11 @@ efx_mon_init(
goto fail2;
}
- if (emop->emo_reset != NULL) {
- if ((rc = emop->emo_reset(enp)) != 0)
- goto fail3;
- }
-
- if (emop->emo_reconfigure != NULL) {
- if ((rc = emop->emo_reconfigure(enp)) != 0)
- goto fail4;
- }
-
emp->em_emop = emop;
return (0);
-fail4:
- EFSYS_PROBE(fail5);
-
- if (emop->emo_reset != NULL)
- (void) emop->emo_reset(enp);
-
-fail3:
- EFSYS_PROBE(fail4);
fail2:
- EFSYS_PROBE(fail3);
+ EFSYS_PROBE(fail2);
emp->em_type = EFX_MON_INVALID;
@@ -206,7 +126,7 @@ fail1:
#if EFSYS_OPT_NAMES
-/* START MKCONFIG GENERATED MonitorStatNamesBlock 01ee3ea01f23a0c4 */
+/* START MKCONFIG GENERATED MonitorStatNamesBlock 31f437eafb0b0437 */
static const char *__mon_stat_name[] = {
"value_2_5v",
"value_vccp1",
@@ -283,6 +203,8 @@ static const char *__mon_stat_name[] = {
"phy0_vcc",
"phy1_vcc",
"controller_tdiode_temp",
+ "board_front_temp",
+ "board_back_temp",
};
/* END MKCONFIG GENERATED MonitorStatNamesBlock */
@@ -308,7 +230,7 @@ efx_mon_stats_update(
__inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
{
efx_mon_t *emp = &(enp->en_mon);
- efx_mon_ops_t *emop = emp->em_emop;
+ const efx_mon_ops_t *emop = emp->em_emop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
@@ -323,8 +245,6 @@ efx_mon_fini(
__in efx_nic_t *enp)
{
efx_mon_t *emp = &(enp->en_mon);
- efx_mon_ops_t *emop = emp->em_emop;
- efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
@@ -332,12 +252,6 @@ efx_mon_fini(
emp->em_emop = NULL;
- if (emop->emo_reset != NULL) {
- rc = emop->emo_reset(enp);
- if (rc != 0)
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
- }
-
emp->em_type = EFX_MON_INVALID;
enp->en_mod_flags &= ~EFX_MOD_MON;
diff --git a/sys/dev/sfxge/common/efx_nic.c b/sys/dev/sfxge/common/efx_nic.c
index dd28ece..18ce3f1 100644
--- a/sys/dev/sfxge/common/efx_nic.c
+++ b/sys/dev/sfxge/common/efx_nic.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,12 +42,6 @@ efx_family(
{
if (venid == EFX_PCI_VENID_SFC) {
switch (devid) {
-#if EFSYS_OPT_FALCON
- case EFX_PCI_DEVID_FALCON:
- *efp = EFX_FAMILY_FALCON;
- return (0);
-#endif /* EFSYS_OPT_FALCON */
-
#if EFSYS_OPT_SIENA
case EFX_PCI_DEVID_SIENA_F1_UNINIT:
/*
@@ -101,6 +95,7 @@ efx_family(
return (0);
#endif /* EFSYS_OPT_MEDFORD */
+ case EFX_PCI_DEVID_FALCON: /* Obsolete, not supported */
default:
break;
}
@@ -110,64 +105,6 @@ efx_family(
return (ENOTSUP);
}
-/*
- * To support clients which aren't provided with any PCI context infer
- * the hardware family by inspecting the hardware. Obviously the caller
- * must be damn sure they're really talking to a supported device.
- */
- __checkReturn efx_rc_t
-efx_infer_family(
- __in efsys_bar_t *esbp,
- __out efx_family_t *efp)
-{
- efx_family_t family;
- efx_oword_t oword;
- unsigned int portnum;
- efx_rc_t rc;
-
- EFSYS_BAR_READO(esbp, FR_AZ_CS_DEBUG_REG_OFST, &oword, B_TRUE);
- portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM);
- if ((portnum == 1) || (portnum == 2)) {
-#if EFSYS_OPT_SIENA
- family = EFX_FAMILY_SIENA;
- goto out;
-#endif
- } else if (portnum == 0) {
- efx_dword_t dword;
- uint32_t hw_rev;
-
- EFSYS_BAR_READD(esbp, ER_DZ_BIU_HW_REV_ID_REG_OFST, &dword,
- B_TRUE);
- hw_rev = EFX_DWORD_FIELD(dword, ERF_DZ_HW_REV_ID);
- if (hw_rev == ER_DZ_BIU_HW_REV_ID_REG_RESET) {
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
- /*
- * BIU_HW_REV_ID is the same for Huntington and Medford.
- * Assume Huntington, as Medford is very similar.
- */
- family = EFX_FAMILY_HUNTINGTON;
- goto out;
-#endif
- } else {
-#if EFSYS_OPT_FALCON
- family = EFX_FAMILY_FALCON;
- goto out;
-#endif
- }
- }
- rc = ENOTSUP;
- goto fail1;
-
-out:
- if (efp != NULL)
- *efp = family;
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
#define EFX_BIU_MAGIC0 0x01234567
#define EFX_BIU_MAGIC1 0xfedcba98
@@ -240,29 +177,9 @@ fail1:
return (rc);
}
-#if EFSYS_OPT_FALCON
-
-static efx_nic_ops_t __efx_nic_falcon_ops = {
- falcon_nic_probe, /* eno_probe */
- NULL, /* eno_board_cfg */
- NULL, /* eno_set_drv_limits */
- falcon_nic_reset, /* eno_reset */
- falcon_nic_init, /* eno_init */
- NULL, /* eno_get_vi_pool */
- NULL, /* eno_get_bar_region */
-#if EFSYS_OPT_DIAG
- falcon_sram_test, /* eno_sram_test */
- falcon_nic_register_test, /* eno_register_test */
-#endif /* EFSYS_OPT_DIAG */
- falcon_nic_fini, /* eno_fini */
- falcon_nic_unprobe, /* eno_unprobe */
-};
-
-#endif /* EFSYS_OPT_FALCON */
-
#if EFSYS_OPT_SIENA
-static efx_nic_ops_t __efx_nic_siena_ops = {
+static const efx_nic_ops_t __efx_nic_siena_ops = {
siena_nic_probe, /* eno_probe */
NULL, /* eno_board_cfg */
NULL, /* eno_set_drv_limits */
@@ -271,7 +188,6 @@ static efx_nic_ops_t __efx_nic_siena_ops = {
NULL, /* eno_get_vi_pool */
NULL, /* eno_get_bar_region */
#if EFSYS_OPT_DIAG
- siena_sram_test, /* eno_sram_test */
siena_nic_register_test, /* eno_register_test */
#endif /* EFSYS_OPT_DIAG */
siena_nic_fini, /* eno_fini */
@@ -282,7 +198,7 @@ static efx_nic_ops_t __efx_nic_siena_ops = {
#if EFSYS_OPT_HUNTINGTON
-static efx_nic_ops_t __efx_nic_hunt_ops = {
+static const efx_nic_ops_t __efx_nic_hunt_ops = {
ef10_nic_probe, /* eno_probe */
hunt_board_cfg, /* eno_board_cfg */
ef10_nic_set_drv_limits, /* eno_set_drv_limits */
@@ -291,7 +207,6 @@ static efx_nic_ops_t __efx_nic_hunt_ops = {
ef10_nic_get_vi_pool, /* eno_get_vi_pool */
ef10_nic_get_bar_region, /* eno_get_bar_region */
#if EFSYS_OPT_DIAG
- ef10_sram_test, /* eno_sram_test */
ef10_nic_register_test, /* eno_register_test */
#endif /* EFSYS_OPT_DIAG */
ef10_nic_fini, /* eno_fini */
@@ -302,7 +217,7 @@ static efx_nic_ops_t __efx_nic_hunt_ops = {
#if EFSYS_OPT_MEDFORD
-static efx_nic_ops_t __efx_nic_medford_ops = {
+static const efx_nic_ops_t __efx_nic_medford_ops = {
ef10_nic_probe, /* eno_probe */
medford_board_cfg, /* eno_board_cfg */
ef10_nic_set_drv_limits, /* eno_set_drv_limits */
@@ -311,7 +226,6 @@ static efx_nic_ops_t __efx_nic_medford_ops = {
ef10_nic_get_vi_pool, /* eno_get_vi_pool */
ef10_nic_get_bar_region, /* eno_get_bar_region */
#if EFSYS_OPT_DIAG
- ef10_sram_test, /* eno_sram_test */
ef10_nic_register_test, /* eno_register_test */
#endif /* EFSYS_OPT_DIAG */
ef10_nic_fini, /* eno_fini */
@@ -346,16 +260,9 @@ efx_nic_create(
enp->en_magic = EFX_NIC_MAGIC;
switch (family) {
-#if EFSYS_OPT_FALCON
- case EFX_FAMILY_FALCON:
- enp->en_enop = (efx_nic_ops_t *)&__efx_nic_falcon_ops;
- enp->en_features = 0;
- break;
-#endif /* EFSYS_OPT_FALCON */
-
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
- enp->en_enop = (efx_nic_ops_t *)&__efx_nic_siena_ops;
+ enp->en_enop = &__efx_nic_siena_ops;
enp->en_features =
EFX_FEATURE_IPV6 |
EFX_FEATURE_LFSR_HASH_INSERT |
@@ -371,7 +278,7 @@ efx_nic_create(
#if EFSYS_OPT_HUNTINGTON
case EFX_FAMILY_HUNTINGTON:
- enp->en_enop = (efx_nic_ops_t *)&__efx_nic_hunt_ops;
+ enp->en_enop = &__efx_nic_hunt_ops;
/* FIXME: Add WOL support */
enp->en_features =
EFX_FEATURE_IPV6 |
@@ -388,7 +295,7 @@ efx_nic_create(
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
- enp->en_enop = (efx_nic_ops_t *)&__efx_nic_medford_ops;
+ enp->en_enop = &__efx_nic_medford_ops;
/*
* FW_ASSISTED_TSO ommitted as Medford only supports firmware
* assisted TSO version 2, not the v1 scheme used on Huntington.
@@ -400,7 +307,8 @@ efx_nic_create(
EFX_FEATURE_MCDI |
EFX_FEATURE_MAC_HEADER_FILTERS |
EFX_FEATURE_MCDI_DMA |
- EFX_FEATURE_PIO_BUFFERS;
+ EFX_FEATURE_PIO_BUFFERS |
+ EFX_FEATURE_FW_ASSISTED_TSO_V2;
break;
#endif /* EFSYS_OPT_MEDFORD */
@@ -436,7 +344,7 @@ fail1:
efx_nic_probe(
__in efx_nic_t *enp)
{
- efx_nic_ops_t *enop;
+ const efx_nic_ops_t *enop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -467,48 +375,12 @@ fail1:
return (rc);
}
-#if EFSYS_OPT_PCIE_TUNE
-
- __checkReturn efx_rc_t
-efx_nic_pcie_tune(
- __in efx_nic_t *enp,
- unsigned int nlanes)
-{
- EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
- EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
- EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
-
-#if EFSYS_OPT_FALCON
- if (enp->en_family == EFX_FAMILY_FALCON)
- return (falcon_nic_pcie_tune(enp, nlanes));
-#endif
- return (ENOTSUP);
-}
-
- __checkReturn efx_rc_t
-efx_nic_pcie_extended_sync(
- __in efx_nic_t *enp)
-{
- EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
- EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
- EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
-
-#if EFSYS_OPT_SIENA
- if (enp->en_family == EFX_FAMILY_SIENA)
- return (siena_nic_pcie_extended_sync(enp));
-#endif
-
- return (ENOTSUP);
-}
-
-#endif /* EFSYS_OPT_PCIE_TUNE */
-
__checkReturn efx_rc_t
efx_nic_set_drv_limits(
__inout efx_nic_t *enp,
__in efx_drv_limits_t *edlp)
{
- efx_nic_ops_t *enop = enp->en_enop;
+ const efx_nic_ops_t *enop = enp->en_enop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -534,7 +406,7 @@ efx_nic_get_bar_region(
__out uint32_t *offsetp,
__out size_t *sizep)
{
- efx_nic_ops_t *enop = enp->en_enop;
+ const efx_nic_ops_t *enop = enp->en_enop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -569,7 +441,7 @@ efx_nic_get_vi_pool(
__out uint32_t *rxq_countp,
__out uint32_t *txq_countp)
{
- efx_nic_ops_t *enop = enp->en_enop;
+ const efx_nic_ops_t *enop = enp->en_enop;
efx_nic_cfg_t *encp = &enp->en_nic_cfg;
efx_rc_t rc;
@@ -606,7 +478,7 @@ fail1:
efx_nic_init(
__in efx_nic_t *enp)
{
- efx_nic_ops_t *enop = enp->en_enop;
+ const efx_nic_ops_t *enop = enp->en_enop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -636,7 +508,7 @@ fail1:
efx_nic_fini(
__in efx_nic_t *enp)
{
- efx_nic_ops_t *enop = enp->en_enop;
+ const efx_nic_ops_t *enop = enp->en_enop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
@@ -655,7 +527,7 @@ efx_nic_fini(
efx_nic_unprobe(
__in efx_nic_t *enp)
{
- efx_nic_ops_t *enop = enp->en_enop;
+ const efx_nic_ops_t *enop = enp->en_enop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
#if EFSYS_OPT_MCDI
@@ -701,14 +573,14 @@ efx_nic_destroy(
efx_nic_reset(
__in efx_nic_t *enp)
{
- efx_nic_ops_t *enop = enp->en_enop;
+ const efx_nic_ops_t *enop = enp->en_enop;
unsigned int mod_flags;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
/*
- * All modules except the MCDI, PROBE, NVRAM, VPD, MON, LIC
+ * All modules except the MCDI, PROBE, NVRAM, VPD, MON
* (which we do not reset here) must have been shut down or never
* initialized.
*
@@ -718,7 +590,7 @@ efx_nic_reset(
*/
mod_flags = enp->en_mod_flags;
mod_flags &= ~(EFX_MOD_MCDI | EFX_MOD_PROBE | EFX_MOD_NVRAM |
- EFX_MOD_VPD | EFX_MOD_MON | EFX_MOD_LIC);
+ EFX_MOD_VPD | EFX_MOD_MON);
EFSYS_ASSERT3U(mod_flags, ==, 0);
if (mod_flags != 0) {
rc = EINVAL;
@@ -728,8 +600,6 @@ efx_nic_reset(
if ((rc = enop->eno_reset(enp)) != 0)
goto fail2;
- enp->en_reset_flags |= EFX_RESET_MAC;
-
return (0);
fail2:
@@ -755,7 +625,7 @@ efx_nic_cfg_get(
efx_nic_register_test(
__in efx_nic_t *enp)
{
- efx_nic_ops_t *enop = enp->en_enop;
+ const efx_nic_ops_t *enop = enp->en_enop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -1095,3 +965,101 @@ fail1:
}
#endif /* EFSYS_OPT_LOOPBACK */
+
+ __checkReturn efx_rc_t
+efx_nic_calculate_pcie_link_bandwidth(
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t lane_bandwidth;
+ uint32_t total_bandwidth;
+ efx_rc_t rc;
+
+ if ((pcie_link_width == 0) || (pcie_link_width > 16) ||
+ !ISP2(pcie_link_width)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (pcie_link_gen) {
+ case EFX_PCIE_LINK_SPEED_GEN1:
+ /* 2.5 Gb/s raw bandwidth with 8b/10b encoding */
+ lane_bandwidth = 2000;
+ break;
+ case EFX_PCIE_LINK_SPEED_GEN2:
+ /* 5.0 Gb/s raw bandwidth with 8b/10b encoding */
+ lane_bandwidth = 4000;
+ break;
+ case EFX_PCIE_LINK_SPEED_GEN3:
+ /* 8.0 Gb/s raw bandwidth with 128b/130b encoding */
+ lane_bandwidth = 7877;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ total_bandwidth = lane_bandwidth * pcie_link_width;
+ *bandwidth_mbpsp = total_bandwidth;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_nic_check_pcie_link_speed(
+ __in efx_nic_t *enp,
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out efx_pcie_link_performance_t *resultp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t bandwidth;
+ efx_pcie_link_performance_t result;
+ efx_rc_t rc;
+
+ if ((encp->enc_required_pcie_bandwidth_mbps == 0) ||
+ (pcie_link_width == 0) || (pcie_link_width == 32) ||
+ (pcie_link_gen == 0)) {
+ /*
+ * No usable info on what is required and/or in use. In virtual
+ * machines, sometimes the PCIe link width is reported as 0 or
+ * 32, or the speed as 0.
+ */
+ result = EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH;
+ goto out;
+ }
+
+ /* Calculate the available bandwidth in megabits per second */
+ rc = efx_nic_calculate_pcie_link_bandwidth(pcie_link_width,
+ pcie_link_gen, &bandwidth);
+ if (rc != 0)
+ goto fail1;
+
+ if (bandwidth < encp->enc_required_pcie_bandwidth_mbps) {
+ result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH;
+ } else if (pcie_link_gen < encp->enc_max_pcie_link_gen) {
+ /* The link provides enough bandwidth but not optimal latency */
+ result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY;
+ } else {
+ result = EFX_PCIE_LINK_PERFORMANCE_OPTIMAL;
+ }
+
+out:
+ *resultp = result;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
diff --git a/sys/dev/sfxge/common/efx_nvram.c b/sys/dev/sfxge/common/efx_nvram.c
index 272e6c7..a17cd78 100644
--- a/sys/dev/sfxge/common/efx_nvram.c
+++ b/sys/dev/sfxge/common/efx_nvram.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,28 +36,9 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_NVRAM
-#if EFSYS_OPT_FALCON
-
-static efx_nvram_ops_t __efx_nvram_falcon_ops = {
-#if EFSYS_OPT_DIAG
- falcon_nvram_test, /* envo_test */
-#endif /* EFSYS_OPT_DIAG */
- falcon_nvram_type_to_partn, /* envo_type_to_partn */
- falcon_nvram_partn_size, /* envo_partn_size */
- falcon_nvram_partn_rw_start, /* envo_partn_rw_start */
- falcon_nvram_partn_read, /* envo_partn_read */
- falcon_nvram_partn_erase, /* envo_partn_erase */
- falcon_nvram_partn_write, /* envo_partn_write */
- falcon_nvram_partn_rw_finish, /* envo_partn_rw_finish */
- falcon_nvram_partn_get_version, /* envo_partn_get_version */
- falcon_nvram_partn_set_version, /* envo_partn_set_version */
-};
-
-#endif /* EFSYS_OPT_FALCON */
-
#if EFSYS_OPT_SIENA
-static efx_nvram_ops_t __efx_nvram_siena_ops = {
+static const efx_nvram_ops_t __efx_nvram_siena_ops = {
#if EFSYS_OPT_DIAG
siena_nvram_test, /* envo_test */
#endif /* EFSYS_OPT_DIAG */
@@ -70,13 +51,14 @@ static efx_nvram_ops_t __efx_nvram_siena_ops = {
siena_nvram_partn_rw_finish, /* envo_partn_rw_finish */
siena_nvram_partn_get_version, /* envo_partn_get_version */
siena_nvram_partn_set_version, /* envo_partn_set_version */
+ NULL, /* envo_partn_validate */
};
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
-static efx_nvram_ops_t __efx_nvram_ef10_ops = {
+static const efx_nvram_ops_t __efx_nvram_ef10_ops = {
#if EFSYS_OPT_DIAG
ef10_nvram_test, /* envo_test */
#endif /* EFSYS_OPT_DIAG */
@@ -89,6 +71,7 @@ static efx_nvram_ops_t __efx_nvram_ef10_ops = {
ef10_nvram_partn_rw_finish, /* envo_partn_rw_finish */
ef10_nvram_partn_get_version, /* envo_partn_get_version */
ef10_nvram_partn_set_version, /* envo_partn_set_version */
+ ef10_nvram_buffer_validate, /* envo_buffer_validate */
};
#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
@@ -97,7 +80,7 @@ static efx_nvram_ops_t __efx_nvram_ef10_ops = {
efx_nvram_init(
__in efx_nic_t *enp)
{
- efx_nvram_ops_t *envop;
+ const efx_nvram_ops_t *envop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -105,27 +88,21 @@ efx_nvram_init(
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NVRAM));
switch (enp->en_family) {
-#if EFSYS_OPT_FALCON
- case EFX_FAMILY_FALCON:
- envop = (efx_nvram_ops_t *)&__efx_nvram_falcon_ops;
- break;
-#endif /* EFSYS_OPT_FALCON */
-
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
- envop = (efx_nvram_ops_t *)&__efx_nvram_siena_ops;
+ envop = &__efx_nvram_siena_ops;
break;
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON
case EFX_FAMILY_HUNTINGTON:
- envop = (efx_nvram_ops_t *)&__efx_nvram_ef10_ops;
+ envop = &__efx_nvram_ef10_ops;
break;
#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
- envop = (efx_nvram_ops_t *)&__efx_nvram_ef10_ops;
+ envop = &__efx_nvram_ef10_ops;
break;
#endif /* EFSYS_OPT_MEDFORD */
@@ -152,7 +129,7 @@ fail1:
efx_nvram_test(
__in efx_nic_t *enp)
{
- efx_nvram_ops_t *envop = enp->en_envop;
+ const efx_nvram_ops_t *envop = enp->en_envop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -177,7 +154,7 @@ efx_nvram_size(
__in efx_nvram_type_t type,
__out size_t *sizep)
{
- efx_nvram_ops_t *envop = enp->en_envop;
+ const efx_nvram_ops_t *envop = enp->en_envop;
uint32_t partn;
efx_rc_t rc;
@@ -210,7 +187,7 @@ efx_nvram_get_version(
__out uint32_t *subtypep,
__out_ecount(4) uint16_t version[4])
{
- efx_nvram_ops_t *envop = enp->en_envop;
+ const efx_nvram_ops_t *envop = enp->en_envop;
uint32_t partn;
efx_rc_t rc;
@@ -243,7 +220,7 @@ efx_nvram_rw_start(
__in efx_nvram_type_t type,
__out_opt size_t *chunk_sizep)
{
- efx_nvram_ops_t *envop = enp->en_envop;
+ const efx_nvram_ops_t *envop = enp->en_envop;
uint32_t partn;
efx_rc_t rc;
@@ -281,7 +258,7 @@ efx_nvram_read_chunk(
__out_bcount(size) caddr_t data,
__in size_t size)
{
- efx_nvram_ops_t *envop = enp->en_envop;
+ const efx_nvram_ops_t *envop = enp->en_envop;
uint32_t partn;
efx_rc_t rc;
@@ -314,7 +291,7 @@ efx_nvram_erase(
__in efx_nic_t *enp,
__in efx_nvram_type_t type)
{
- efx_nvram_ops_t *envop = enp->en_envop;
+ const efx_nvram_ops_t *envop = enp->en_envop;
unsigned int offset = 0;
size_t size = 0;
uint32_t partn;
@@ -357,7 +334,7 @@ efx_nvram_write_chunk(
__in_bcount(size) caddr_t data,
__in size_t size)
{
- efx_nvram_ops_t *envop = enp->en_envop;
+ const efx_nvram_ops_t *envop = enp->en_envop;
uint32_t partn;
efx_rc_t rc;
@@ -390,7 +367,7 @@ efx_nvram_rw_finish(
__in efx_nic_t *enp,
__in efx_nvram_type_t type)
{
- efx_nvram_ops_t *envop = enp->en_envop;
+ const efx_nvram_ops_t *envop = enp->en_envop;
uint32_t partn;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -413,7 +390,7 @@ efx_nvram_set_version(
__in efx_nvram_type_t type,
__in_ecount(4) uint16_t version[4])
{
- efx_nvram_ops_t *envop = enp->en_envop;
+ const efx_nvram_ops_t *envop = enp->en_envop;
uint32_t partn;
efx_rc_t rc;
@@ -446,6 +423,44 @@ fail1:
return (rc);
}
+/* Validate buffer contents (before writing to flash) */
+ __checkReturn efx_rc_t
+efx_nvram_validate(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if (envop->envo_type_to_partn != NULL &&
+ ((rc = envop->envo_buffer_validate(enp, partn,
+ partn_data, partn_size)) != 0))
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
void
efx_nvram_fini(
__in efx_nic_t *enp)
diff --git a/sys/dev/sfxge/common/efx_phy.c b/sys/dev/sfxge/common/efx_phy.c
index 3fd4e08..a265d7c 100644
--- a/sys/dev/sfxge/common/efx_phy.c
+++ b/sys/dev/sfxge/common/efx_phy.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,229 +33,18 @@ __FBSDID("$FreeBSD$");
#include "efx.h"
#include "efx_impl.h"
-#if EFSYS_OPT_FALCON
-#include "falcon_nvram.h"
-#endif
-
-#if EFSYS_OPT_MAC_FALCON_XMAC
-#include "falcon_xmac.h"
-#endif
-
-#if EFSYS_OPT_MAC_FALCON_GMAC
-#include "falcon_gmac.h"
-#endif
-
-#if EFSYS_OPT_PHY_NULL
-#include "nullphy.h"
-#endif
-
-#if EFSYS_OPT_PHY_QT2022C2
-#include "qt2022c2.h"
-#endif
-
-#if EFSYS_OPT_PHY_SFX7101
-#include "sfx7101.h"
-#endif
-
-#if EFSYS_OPT_PHY_TXC43128
-#include "txc43128.h"
-#endif
-
-#if EFSYS_OPT_PHY_SFT9001
-#include "sft9001.h"
-#endif
-
-#if EFSYS_OPT_PHY_QT2025C
-#include "qt2025c.h"
-#endif
-
-#if EFSYS_OPT_PHY_NULL
-static efx_phy_ops_t __efx_phy_null_ops = {
- NULL, /* epo_power */
- nullphy_reset, /* epo_reset */
- nullphy_reconfigure, /* epo_reconfigure */
- nullphy_verify, /* epo_verify */
- NULL, /* epo_uplink_check */
- nullphy_downlink_check, /* epo_downlink_check */
- nullphy_oui_get, /* epo_oui_get */
-#if EFSYS_OPT_PHY_STATS
- nullphy_stats_update, /* epo_stats_update */
-#endif /* EFSYS_OPT_PHY_STATS */
-#if EFSYS_OPT_PHY_PROPS
-#if EFSYS_OPT_NAMES
- nullphy_prop_name, /* epo_prop_name */
-#endif
- nullphy_prop_get, /* epo_prop_get */
- nullphy_prop_set, /* epo_prop_set */
-#endif /* EFSYS_OPT_PHY_PROPS */
-#if EFSYS_OPT_BIST
- NULL, /* epo_bist_enable_offline */
- NULL, /* epo_bist_start */
- NULL, /* epo_bist_poll */
- NULL, /* epo_bist_stop */
-#endif /* EFSYS_OPT_BIST */
-};
-#endif /* EFSYS_OPT_PHY_NULL */
-
-#if EFSYS_OPT_PHY_QT2022C2
-static efx_phy_ops_t __efx_phy_qt2022c2_ops = {
- NULL, /* epo_power */
- qt2022c2_reset, /* epo_reset */
- qt2022c2_reconfigure, /* epo_reconfigure */
- qt2022c2_verify, /* epo_verify */
- qt2022c2_uplink_check, /* epo_uplink_check */
- qt2022c2_downlink_check, /* epo_downlink_check */
- qt2022c2_oui_get, /* epo_oui_get */
-#if EFSYS_OPT_PHY_STATS
- qt2022c2_stats_update, /* epo_stats_update */
-#endif /* EFSYS_OPT_PHY_STATS */
-#if EFSYS_OPT_PHY_PROPS
-#if EFSYS_OPT_NAMES
- qt2022c2_prop_name, /* epo_prop_name */
-#endif
- qt2022c2_prop_get, /* epo_prop_get */
- qt2022c2_prop_set, /* epo_prop_set */
-#endif /* EFSYS_OPT_PHY_PROPS */
-#if EFSYS_OPT_BIST
- NULL, /* epo_bist_enable_offline */
- NULL, /* epo_bist_start */
- NULL, /* epo_bist_poll */
- NULL, /* epo_bist_stop */
-#endif /* EFSYS_OPT_BIST */
-};
-#endif /* EFSYS_OPT_PHY_QT2022C2 */
-
-#if EFSYS_OPT_PHY_SFX7101
-static efx_phy_ops_t __efx_phy_sfx7101_ops = {
- sfx7101_power, /* epo_power */
- sfx7101_reset, /* epo_reset */
- sfx7101_reconfigure, /* epo_reconfigure */
- sfx7101_verify, /* epo_verify */
- sfx7101_uplink_check, /* epo_uplink_check */
- sfx7101_downlink_check, /* epo_downlink_check */
- sfx7101_oui_get, /* epo_oui_get */
-#if EFSYS_OPT_PHY_STATS
- sfx7101_stats_update, /* epo_stats_update */
-#endif /* EFSYS_OPT_PHY_STATS */
-#if EFSYS_OPT_PHY_PROPS
-#if EFSYS_OPT_NAMES
- sfx7101_prop_name, /* epo_prop_name */
-#endif
- sfx7101_prop_get, /* epo_prop_get */
- sfx7101_prop_set, /* epo_prop_set */
-#endif /* EFSYS_OPT_PHY_PROPS */
-#if EFSYS_OPT_BIST
- NULL, /* epo_bist_enable_offline */
- NULL, /* epo_bist_start */
- NULL, /* epo_bist_poll */
- NULL, /* epo_bist_stop */
-#endif /* EFSYS_OPT_BIST */
-};
-#endif /* EFSYS_OPT_PHY_SFX7101 */
-
-#if EFSYS_OPT_PHY_TXC43128
-static efx_phy_ops_t __efx_phy_txc43128_ops = {
- NULL, /* epo_power */
- txc43128_reset, /* epo_reset */
- txc43128_reconfigure, /* epo_reconfigure */
- txc43128_verify, /* epo_verify */
- txc43128_uplink_check, /* epo_uplink_check */
- txc43128_downlink_check, /* epo_downlink_check */
- txc43128_oui_get, /* epo_oui_get */
-#if EFSYS_OPT_PHY_STATS
- txc43128_stats_update, /* epo_stats_update */
-#endif /* EFSYS_OPT_PHY_STATS */
-#if EFSYS_OPT_PHY_PROPS
-#if EFSYS_OPT_NAMES
- txc43128_prop_name, /* epo_prop_name */
-#endif
- txc43128_prop_get, /* epo_prop_get */
- txc43128_prop_set, /* epo_prop_set */
-#endif /* EFSYS_OPT_PHY_PROPS */
-#if EFSYS_OPT_BIST
- NULL, /* epo_bist_enable_offline */
- NULL, /* epo_bist_start */
- NULL, /* epo_bist_poll */
- NULL, /* epo_bist_stop */
-#endif /* EFSYS_OPT_BIST */
-};
-#endif /* EFSYS_OPT_PHY_TXC43128 */
-
-#if EFSYS_OPT_PHY_SFT9001
-static efx_phy_ops_t __efx_phy_sft9001_ops = {
- NULL, /* epo_power */
- sft9001_reset, /* epo_reset */
- sft9001_reconfigure, /* epo_reconfigure */
- sft9001_verify, /* epo_verify */
- sft9001_uplink_check, /* epo_uplink_check */
- sft9001_downlink_check, /* epo_downlink_check */
- sft9001_oui_get, /* epo_oui_get */
-#if EFSYS_OPT_PHY_STATS
- sft9001_stats_update, /* epo_stats_update */
-#endif /* EFSYS_OPT_PHY_STATS */
-#if EFSYS_OPT_PHY_PROPS
-#if EFSYS_OPT_NAMES
- sft9001_prop_name, /* epo_prop_name */
-#endif
- sft9001_prop_get, /* epo_prop_get */
- sft9001_prop_set, /* epo_prop_set */
-#endif /* EFSYS_OPT_PHY_PROPS */
-#if EFSYS_OPT_BIST
- NULL, /* epo_bist_enable_offline */
- sft9001_bist_start, /* epo_bist_start */
- sft9001_bist_poll, /* epo_bist_poll */
- sft9001_bist_stop, /* epo_bist_stop */
-#endif /* EFSYS_OPT_BIST */
-};
-#endif /* EFSYS_OPT_PHY_SFT9001 */
-
-#if EFSYS_OPT_PHY_QT2025C
-static efx_phy_ops_t __efx_phy_qt2025c_ops = {
- NULL, /* epo_power */
- qt2025c_reset, /* epo_reset */
- qt2025c_reconfigure, /* epo_reconfigure */
- qt2025c_verify, /* epo_verify */
- qt2025c_uplink_check, /* epo_uplink_check */
- qt2025c_downlink_check, /* epo_downlink_check */
- qt2025c_oui_get, /* epo_oui_get */
-#if EFSYS_OPT_PHY_STATS
- qt2025c_stats_update, /* epo_stats_update */
-#endif /* EFSYS_OPT_PHY_STATS */
-#if EFSYS_OPT_PHY_PROPS
-#if EFSYS_OPT_NAMES
- qt2025c_prop_name, /* epo_prop_name */
-#endif
- qt2025c_prop_get, /* epo_prop_get */
- qt2025c_prop_set, /* epo_prop_set */
-#endif /* EFSYS_OPT_PHY_PROPS */
-#if EFSYS_OPT_BIST
- NULL, /* epo_bist_enable_offline */
- NULL, /* epo_bist_start */
- NULL, /* epo_bist_poll */
- NULL, /* epo_bist_stop */
-#endif /* EFSYS_OPT_BIST */
-};
-#endif /* EFSYS_OPT_PHY_QT2025C */
+
#if EFSYS_OPT_SIENA
-static efx_phy_ops_t __efx_phy_siena_ops = {
+static const efx_phy_ops_t __efx_phy_siena_ops = {
siena_phy_power, /* epo_power */
NULL, /* epo_reset */
siena_phy_reconfigure, /* epo_reconfigure */
siena_phy_verify, /* epo_verify */
- NULL, /* epo_uplink_check */
- NULL, /* epo_downlink_check */
siena_phy_oui_get, /* epo_oui_get */
#if EFSYS_OPT_PHY_STATS
siena_phy_stats_update, /* epo_stats_update */
#endif /* EFSYS_OPT_PHY_STATS */
-#if EFSYS_OPT_PHY_PROPS
-#if EFSYS_OPT_NAMES
- siena_phy_prop_name, /* epo_prop_name */
-#endif
- siena_phy_prop_get, /* epo_prop_get */
- siena_phy_prop_set, /* epo_prop_set */
-#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_BIST
NULL, /* epo_bist_enable_offline */
siena_phy_bist_start, /* epo_bist_start */
@@ -266,24 +55,15 @@ static efx_phy_ops_t __efx_phy_siena_ops = {
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
-static efx_phy_ops_t __efx_phy_ef10_ops = {
+static const efx_phy_ops_t __efx_phy_ef10_ops = {
ef10_phy_power, /* epo_power */
NULL, /* epo_reset */
ef10_phy_reconfigure, /* epo_reconfigure */
ef10_phy_verify, /* epo_verify */
- NULL, /* epo_uplink_check */
- NULL, /* epo_downlink_check */
ef10_phy_oui_get, /* epo_oui_get */
#if EFSYS_OPT_PHY_STATS
ef10_phy_stats_update, /* epo_stats_update */
#endif /* EFSYS_OPT_PHY_STATS */
-#if EFSYS_OPT_PHY_PROPS
-#if EFSYS_OPT_NAMES
- ef10_phy_prop_name, /* epo_prop_name */
-#endif
- ef10_phy_prop_get, /* epo_prop_get */
- ef10_phy_prop_set, /* epo_prop_set */
-#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_BIST
/* FIXME: Are these BIST methods appropriate for Medford? */
hunt_bist_enable_offline, /* epo_bist_enable_offline */
@@ -300,7 +80,7 @@ efx_phy_probe(
{
efx_port_t *epp = &(enp->en_port);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- efx_phy_ops_t *epop;
+ const efx_phy_ops_t *epop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -310,59 +90,19 @@ efx_phy_probe(
/* Hook in operations structure */
switch (enp->en_family) {
-#if EFSYS_OPT_FALCON
- case EFX_FAMILY_FALCON:
- switch (epp->ep_phy_type) {
-#if EFSYS_OPT_PHY_NULL
- case PHY_TYPE_NONE_DECODE:
- epop = (efx_phy_ops_t *)&__efx_phy_null_ops;
- break;
-#endif
-#if EFSYS_OPT_PHY_QT2022C2
- case PHY_TYPE_QT2022C2_DECODE:
- epop = (efx_phy_ops_t *)&__efx_phy_qt2022c2_ops;
- break;
-#endif
-#if EFSYS_OPT_PHY_SFX7101
- case PHY_TYPE_SFX7101_DECODE:
- epop = (efx_phy_ops_t *)&__efx_phy_sfx7101_ops;
- break;
-#endif
-#if EFSYS_OPT_PHY_TXC43128
- case PHY_TYPE_TXC43128_DECODE:
- epop = (efx_phy_ops_t *)&__efx_phy_txc43128_ops;
- break;
-#endif
-#if EFSYS_OPT_PHY_SFT9001
- case PHY_TYPE_SFT9001A_DECODE:
- case PHY_TYPE_SFT9001B_DECODE:
- epop = (efx_phy_ops_t *)&__efx_phy_sft9001_ops;
- break;
-#endif
-#if EFSYS_OPT_PHY_QT2025C
- case EFX_PHY_QT2025C:
- epop = (efx_phy_ops_t *)&__efx_phy_qt2025c_ops;
- break;
-#endif
- default:
- rc = ENOTSUP;
- goto fail1;
- }
- break;
-#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
- epop = (efx_phy_ops_t *)&__efx_phy_siena_ops;
+ epop = &__efx_phy_siena_ops;
break;
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON
case EFX_FAMILY_HUNTINGTON:
- epop = (efx_phy_ops_t *)&__efx_phy_ef10_ops;
+ epop = &__efx_phy_ef10_ops;
break;
#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
- epop = (efx_phy_ops_t *)&__efx_phy_ef10_ops;
+ epop = &__efx_phy_ef10_ops;
break;
#endif /* EFSYS_OPT_MEDFORD */
default:
@@ -388,7 +128,7 @@ efx_phy_verify(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
@@ -405,7 +145,7 @@ efx_phy_led_set(
{
efx_nic_cfg_t *encp = (&enp->en_nic_cfg);
efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
uint32_t mask;
efx_rc_t rc;
@@ -474,7 +214,7 @@ efx_phy_adv_cap_set(
__in uint32_t mask)
{
efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
uint32_t old_mask;
efx_rc_t rc;
@@ -536,7 +276,7 @@ efx_phy_oui_get(
__out uint32_t *ouip)
{
efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
@@ -669,7 +409,7 @@ efx_phy_stats_update(
__inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
{
efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
@@ -679,55 +419,6 @@ efx_phy_stats_update(
#endif /* EFSYS_OPT_PHY_STATS */
-#if EFSYS_OPT_PHY_PROPS
-
-#if EFSYS_OPT_NAMES
- const char *
-efx_phy_prop_name(
- __in efx_nic_t *enp,
- __in unsigned int id)
-{
- efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
-
- EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
- EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
-
- return (epop->epo_prop_name(enp, id));
-}
-#endif /* EFSYS_OPT_NAMES */
-
- __checkReturn efx_rc_t
-efx_phy_prop_get(
- __in efx_nic_t *enp,
- __in unsigned int id,
- __in uint32_t flags,
- __out uint32_t *valp)
-{
- efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
-
- EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
- EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
-
- return (epop->epo_prop_get(enp, id, flags, valp));
-}
-
- __checkReturn efx_rc_t
-efx_phy_prop_set(
- __in efx_nic_t *enp,
- __in unsigned int id,
- __in uint32_t val)
-{
- efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
-
- EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
- EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
-
- return (epop->epo_prop_set(enp, id, val));
-}
-#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_BIST
@@ -736,7 +427,7 @@ efx_bist_enable_offline(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -766,7 +457,7 @@ efx_bist_start(
__in efx_bist_type_t type)
{
efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -805,7 +496,7 @@ efx_bist_poll(
__in size_t count)
{
efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -840,7 +531,7 @@ efx_bist_stop(
__in efx_bist_type_t type)
{
efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
diff --git a/sys/dev/sfxge/common/efx_phy_ids.h b/sys/dev/sfxge/common/efx_phy_ids.h
index e062519..d3d0235 100644
--- a/sys/dev/sfxge/common/efx_phy_ids.h
+++ b/sys/dev/sfxge/common/efx_phy_ids.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2013-2015 Solarflare Communications Inc.
+ * Copyright (c) 2013-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/efx_port.c b/sys/dev/sfxge/common/efx_port.c
index ca2a69b..440f0a0 100644
--- a/sys/dev/sfxge/common/efx_port.c
+++ b/sys/dev/sfxge/common/efx_port.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,7 +39,7 @@ efx_port_init(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -55,7 +55,6 @@ efx_port_init(
epp->ep_mac_type = EFX_MAC_INVALID;
epp->ep_link_mode = EFX_LINK_UNKNOWN;
- epp->ep_mac_poll_needed = B_TRUE;
epp->ep_mac_drain = B_TRUE;
/* Configure the MAC */
@@ -105,7 +104,7 @@ efx_port_poll(
__out_opt efx_link_mode_t *link_modep)
{
efx_port_t *epp = &(enp->en_port);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
efx_link_mode_t ignore_link_mode;
efx_rc_t rc;
@@ -139,7 +138,7 @@ efx_port_loopback_set(
{
efx_port_t *epp = &(enp->en_port);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_mac_ops_t *emop = epp->ep_emop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -235,7 +234,7 @@ efx_port_fini(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
@@ -247,7 +246,6 @@ efx_port_fini(
epp->ep_emop = NULL;
epp->ep_mac_type = EFX_MAC_INVALID;
epp->ep_mac_drain = B_FALSE;
- epp->ep_mac_poll_needed = B_FALSE;
/* Turn off the PHY */
if (epop->epo_power != NULL)
diff --git a/sys/dev/sfxge/common/efx_regs.h b/sys/dev/sfxge/common/efx_regs.h
index 5ece431..b817db7 100644
--- a/sys/dev/sfxge/common/efx_regs.h
+++ b/sys/dev/sfxge/common/efx_regs.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/efx_regs_ef10.h b/sys/dev/sfxge/common/efx_regs_ef10.h
index 43745e5..df03a96 100644
--- a/sys/dev/sfxge/common/efx_regs_ef10.h
+++ b/sys/dev/sfxge/common/efx_regs_ef10.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/efx_regs_mcdi.h b/sys/dev/sfxge/common/efx_regs_mcdi.h
index a1d76ab..00a80b4 100644
--- a/sys/dev/sfxge/common/efx_regs_mcdi.h
+++ b/sys/dev/sfxge/common/efx_regs_mcdi.h
@@ -721,6 +721,12 @@
#define FCDI_EVENT_CODE_PTP_STATUS 0x9
/* enum: Port id config to map MC-FC port idx */
#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
+/* enum: Boot result or error code */
+#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
+#define FCDI_EVENT_REBOOT_SRC_LBN 36
+#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
+#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
+#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -752,6 +758,11 @@
#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
+#define FCDI_EVENT_BOOT_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
+#define FCDI_EVENT_BOOT_RESULT_LBN 0
+#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
* to the MC. Note that this structure | is overlayed over a normal FCDI event
@@ -3350,6 +3361,8 @@
#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4
/* enum: Bad BSP */
#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5
+/* enum: Flash mode is invalid */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6
/* enum: FC application loaded and execution attempted */
#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80
/* enum: FC application Started */
@@ -5480,6 +5493,14 @@
/* MC_CMD_SET_MAC_OUT msgresponse */
#define MC_CMD_SET_MAC_OUT_LEN 0
+/* MC_CMD_SET_MAC_V2_OUT msgresponse */
+#define MC_CMD_SET_MAC_V2_OUT_LEN 4
+/* MTU as configured after processing the request. See comment at
+ * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL
+ * to 0.
+ */
+#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+
/***********************************/
/* MC_CMD_PHY_STATS
@@ -6552,6 +6573,10 @@
#define MC_CMD_SENSOR_PHY1_VCC 0x4d
/* enum: Controller die temperature (TDIODE): degC */
#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
+/* enum: Board temperature (front): degC */
+#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
+/* enum: Board temperature (back): degC */
+#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
@@ -7904,6 +7929,8 @@
#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
#define LICENSED_FEATURES_MASK_LBN 0
#define LICENSED_FEATURES_MASK_WIDTH 64
@@ -8072,6 +8099,8 @@
#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
@@ -9197,6 +9226,15 @@
* client
*/
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
+/* enum: read properties relating to security rules (Medford-only; for use by
+ * SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
+/* enum: read the list of supported RX filter matches for VXLAN/NVGRE
+ * encapsulated frames, which follow a different match sequence to normal
+ * frames (Medford only)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -9227,6 +9265,39 @@
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse:
+ * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* a version number representing the set of rule lookups that are implemented
+ * by the currently running firmware
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4
+/* enum: implements lookup sequences described in SF-114946-SW draft C */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
+/* the number of nodes in the subnet map */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8
+/* the number of entries in one subnet map node */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12
+/* minimum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16
+/* maximum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20
+/* the number of entries in the local and remote port range maps */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24
+/* minimum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28
+/* maximum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32
+
/***********************************/
/* MC_CMD_PARSER_DISP_RW
@@ -10084,6 +10155,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
/* enum: TXDP Test firmware image 2 */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
@@ -10162,7 +10235,7 @@
#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
@@ -10267,6 +10340,8 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
/* enum: TXDP Test firmware image 2 */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
@@ -10346,11 +10421,67 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
/***********************************/
@@ -13762,4 +13893,661 @@
/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
+
+/***********************************/
+/* MC_CMD_SET_SECURITY_RULE
+ * Set blacklist and/or whitelist action for a particular match criteria.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_SET_SECURITY_RULE 0x10f
+#undef MC_CMD_0x10f_PRIVILEGE_CTG
+
+#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SECURITY_RULE_IN msgrequest */
+#define MC_CMD_SET_SECURITY_RULE_IN_LEN 92
+/* fields to include in match criteria */
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_OFST 0
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_LBN 0
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_LBN 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_LBN 2
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_LBN 3
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_LBN 4
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_LBN 5
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_LBN 10
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_LBN 11
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_LBN 12
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_LBN 13
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_LBN 14
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_WIDTH 1
+/* remote MAC address to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_OFST 4
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_LEN 6
+/* remote port to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_OFST 10
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_LEN 2
+/* local MAC address to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_OFST 12
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_LEN 6
+/* local port to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_OFST 18
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_OFST 20
+#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_OFST 22
+#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_OFST 24
+#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_OFST 26
+#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_LEN 2
+/* Physical port to match (as little-endian 32-bit value) */
+#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_OFST 28
+/* Reserved; set to 0 */
+#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_OFST 32
+/* remote IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_OFST 36
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_LEN 16
+/* local IP address to match (as bytes in network order; set last 12 bytes to 0
+ * for IPv4 address)
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_OFST 52
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_LEN 16
+/* remote subnet ID to match (as little-endian 32-bit value); note that remote
+ * subnets are matched by mapping the remote IP address to a "subnet ID" via a
+ * data structure which must already have been configured using
+ * MC_CMD_SUBNET_MAP_SET_NODE appropriately
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_OFST 68
+/* remote portrange ID to match (as little-endian 32-bit value); note that
+ * remote port ranges are matched by mapping the remote port to a "portrange
+ * ID" via a data structure which must already have been configured using
+ * MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_OFST 72
+/* local portrange ID to match (as little-endian 32-bit value); note that local
+ * port ranges are matched by mapping the local port to a "portrange ID" via a
+ * data structure which must already have been configured using
+ * MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_OFST 76
+/* set the action for transmitted packets matching this rule */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_OFST 80
+/* enum: make no decision */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0
+/* enum: decide to accept the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1
+/* enum: decide to drop the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2
+/* enum: do not change the current TX action */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff
+/* set the action for received packets matching this rule */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_OFST 84
+/* enum: make no decision */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0
+/* enum: decide to accept the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1
+/* enum: decide to drop the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2
+/* enum: do not change the current RX action */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff
+/* counter ID to associate with this rule; IDs are allocated using
+ * MC_CMD_SECURITY_RULE_COUNTER_ALLOC
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_OFST 88
+/* enum: special value for the null counter ID */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0
+
+/* MC_CMD_SET_SECURITY_RULE_OUT msgresponse */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 28
+/* new reference count for uses of counter ID */
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_OFST 0
+/* constructed match bits for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_OFST 4
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_LEN 12
+/* constructed discriminator bits for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_OFST 16
+/* base location for probes for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_OFST 20
+/* step for probes for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_OFST 24
+
+
+/***********************************/
+/* MC_CMD_RESET_SECURITY_RULES
+ * Reset all blacklist and whitelist actions for a particular physical port, or
+ * all ports. (Medford-only; for use by SolarSecure apps, not directly by
+ * drivers. See SF-114946-SW.) NOTE - this message definition is provisional.
+ * It has not yet been used in any released code and may change during
+ * development. This note will be removed once it is regarded as stable.
+ */
+#define MC_CMD_RESET_SECURITY_RULES 0x110
+#undef MC_CMD_0x110_PRIVILEGE_CTG
+
+#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RESET_SECURITY_RULES_IN msgrequest */
+#define MC_CMD_RESET_SECURITY_RULES_IN_LEN 4
+/* index of physical port to reset (or ALL_PHYSICAL_PORTS to reset all) */
+#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_OFST 0
+/* enum: special value to reset all physical ports */
+#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff
+
+/* MC_CMD_RESET_SECURITY_RULES_OUT msgresponse */
+#define MC_CMD_RESET_SECURITY_RULES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SECURITY_RULESET_VERSION
+ * Return a large hash value representing a "version" of the complete set of
+ * currently active blacklist / whitelist rules and associated data structures.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION 0x111
+#undef MC_CMD_0x111_PRIVILEGE_CTG
+
+#define MC_CMD_0x111_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_SECURITY_RULESET_VERSION_IN msgrequest */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_SECURITY_RULESET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMIN 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMAX 252
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LEN(num) (0+1*(num))
+/* Opaque hash value; length may vary depending on the hash scheme used */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_OFST 0
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_LEN 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MINNUM 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC
+ * Allocate counters for use with blacklist / whitelist rules. (Medford-only;
+ * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ * NOTE - this message definition is provisional. It has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC 0x112
+#undef MC_CMD_0x112_PRIVILEGE_CTG
+
+#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN msgrequest */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_LEN 4
+/* the number of new counter IDs to request */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_OFST 0
+
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT msgresponse */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMIN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMAX 252
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LEN(num) (4+4*(num))
+/* the number of new counter IDs allocated (may be less than the number
+ * requested if resources are unavailable)
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_OFST 0
+/* new counter ID(s) */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MINNUM 0
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE
+ * Allocate counters for use with blacklist / whitelist rules. (Medford-only;
+ * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ * NOTE - this message definition is provisional. It has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE 0x113
+#undef MC_CMD_0x113_PRIVILEGE_CTG
+
+#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE_IN msgrequest */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMIN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMAX 252
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LEN(num) (4+4*(num))
+/* the number of counter IDs to free */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_OFST 0
+/* the counter ID(s) to free */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_OFST 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_LEN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MINNUM 0
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MAXNUM 62
+
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT msgresponse */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SUBNET_MAP_SET_NODE
+ * Atomically update a trie node in the map of subnets to subnet IDs. The
+ * constants in the descriptions of the fields of this message may be retrieved
+ * by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO. (Medford-
+ * only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_SUBNET_MAP_SET_NODE 0x114
+#undef MC_CMD_0x114_PRIVILEGE_CTG
+
+#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SUBNET_MAP_SET_NODE_IN msgrequest */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMIN 6
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMAX 252
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LEN(num) (4+2*(num))
+/* node to update in the range 0 .. SUBNET_MAP_NUM_NODES-1 */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_OFST 0
+/* SUBNET_MAP_NUM_ENTRIES_PER_NODE new entries; each entry is either a pointer
+ * to the next node, expressed as an offset in the trie memory (i.e. node ID
+ * multiplied by SUBNET_MAP_NUM_ENTRIES_PER_NODE), or a leaf value in the range
+ * SUBNET_ID_MIN .. SUBNET_ID_MAX
+ */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_OFST 4
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_LEN 2
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MINNUM 1
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MAXNUM 124
+
+/* MC_CMD_SUBNET_MAP_SET_NODE_OUT msgresponse */
+#define MC_CMD_SUBNET_MAP_SET_NODE_OUT_LEN 0
+
+/* PORTRANGE_TREE_ENTRY structuredef */
+#define PORTRANGE_TREE_ENTRY_LEN 4
+/* key for branch nodes (<= key takes left branch, > key takes right branch),
+ * or magic value for leaf nodes
+ */
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_OFST 0
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LEN 2
+#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LBN 0
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_WIDTH 16
+/* final portrange ID for leaf nodes (don't care for branch nodes) */
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_OFST 2
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LEN 2
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LBN 16
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE
+ * Atomically update the entire tree mapping remote port ranges to portrange
+ * IDs. The constants in the descriptions of the fields of this message may be
+ * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE 0x115
+#undef MC_CMD_0x115_PRIVILEGE_CTG
+
+#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN msgrequest */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num))
+/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a
+ * PORTRANGE_TREE_ENTRY
+ */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63
+
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT msgresponse */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE
+ * Atomically update the entire tree mapping remote port ranges to portrange
+ * IDs. The constants in the descriptions of the fields of this message may be
+ * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE 0x116
+#undef MC_CMD_0x116_PRIVILEGE_CTG
+
+#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN msgrequest */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num))
+/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a
+ * PORTRANGE_TREE_ENTRY
+ */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63
+
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT msgresponse */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT_LEN 0
+
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
+ * Configure UDP ports for tunnel encapsulation hardware acceleration. The
+ * parser-dispatcher will attempt to parse traffic on these ports as tunnel
+ * encapsulation PDUs and filter them using the tunnel encapsulation filter
+ * chain rather than the standard filter chain. Note that this command can
+ * cause all functions to see a reset. (Available on Medford only.)
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117
+#undef MC_CMD_0x117_PRIVILEGE_CTG
+
+#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num))
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1
+/* The number of entries in the ENTRIES array */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2
+/* Entries defining the UDP port to protocol mapping, each laid out as a
+ * TUNNEL_ENCAP_UDP_PORT_ENTRY
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_RX_BALANCING
+ * Configure a port upconverter to distribute the packets on both RX engines.
+ * Packets are distributed based on a table with the destination vFIFO. The
+ * index of the table is a hash of source and destination of IPV4 and VLAN
+ * priority.
+ */
+#define MC_CMD_RX_BALANCING 0x118
+#undef MC_CMD_0x118_PRIVILEGE_CTG
+
+#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RX_BALANCING_IN msgrequest */
+#define MC_CMD_RX_BALANCING_IN_LEN 4
+/* The RX port whose upconverter table will be modified */
+#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define MC_CMD_RX_BALANCING_IN_PORT_LEN 1
+/* The VLAN priority associated to the table index and vFIFO */
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 1
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 1
+/* The resulting bit of SRC^DST for indexing the table */
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 2
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 1
+/* The RX engine to which the vFIFO in the table entry will point to */
+#define MC_CMD_RX_BALANCING_IN_ENG_OFST 3
+#define MC_CMD_RX_BALANCING_IN_ENG_LEN 1
+
+/* MC_CMD_RX_BALANCING_OUT msgresponse */
+#define MC_CMD_RX_BALANCING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSA_BIND
+ * TSAN - TSAC binding communication protocol. Refer to SF-115479-TC for more
+ * info in respect to the binding protocol. Note- This MCDI command is only
+ * available over a TLS secure connection between the TSAN and TSAC, and is not
+ * available to host software.
+ */
+#define MC_CMD_TSA_BIND 0x119
+
+/* MC_CMD_TSA_BIND_IN msgrequest: Protocol operation code */
+#define MC_CMD_TSA_BIND_IN_LEN 4
+#define MC_CMD_TSA_BIND_IN_OP_OFST 0
+/* enum: Retrieve the TSAN ID from a TSAN. TSAN ID is a unique identifier for
+ * the network adapter. More specifically, TSAN ID equals the MAC address of
+ * the network adapter. TSAN ID is used as part of the TSAN authentication
+ * protocol. Refer to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_OP_GET_ID 0x1
+/* enum: Get a binding ticket from the TSAN. The binding ticket is used as part
+ * of the binding procedure to authorize the binding of an adapter to a TSAID.
+ * Refer to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_OP_GET_TICKET 0x2
+/* enum: Opcode associated with the propagation of a private key that TSAN uses
+ * as part of post-binding authentication procedure. More specifically, TSAN
+ * uses this key for a signing operation. TSAC uses the counterpart public key
+ * to verify the signature. Note - The post-binding authentication occurs when
+ * the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer to
+ * SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_OP_SET_KEY 0x3
+/* enum: Request an unbinding operation. Note- TSAN clears the binding ticket
+ * from the Nvram section.
+ */
+#define MC_CMD_TSA_BIND_OP_UNBIND 0x4
+
+/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest */
+#define MC_CMD_TSA_BIND_IN_GET_ID_LEN 20
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_GET_ID_OP_OFST 0
+/* Cryptographic nonce that TSAC generates and sends to TSAN. TSAC generates
+ * the nonce every time as part of the TSAN post-binding authentication
+ * procedure when the TSAN-TSAC connection terminates and TSAN does need to re-
+ * connect to the TSAC. Refer to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_ID_NONCE_OFST 4
+#define MC_CMD_TSA_BIND_IN_GET_ID_NONCE_LEN 16
+
+/* MC_CMD_TSA_BIND_IN_GET_TICKET msgrequest */
+#define MC_CMD_TSA_BIND_IN_GET_TICKET_LEN 4
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_OFST 0
+
+/* MC_CMD_TSA_BIND_IN_SET_KEY msgrequest */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMIN 5
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMAX 252
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LEN(num) (4+1*(num))
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_OFST 0
+/* This data blob contains the private key generated by the TSAC. TSAN uses
+ * this key for a signing operation. Note- This private key is used in
+ * conjunction with the post-binding TSAN authentication procedure that occurs
+ * when the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer
+ * to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_OFST 4
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_LEN 1
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MAXNUM 248
+
+/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Asks for the un-binding procedure */
+#define MC_CMD_TSA_BIND_IN_UNBIND_LEN 6
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_OFST 0
+#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_LEN 6
+
+/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMIN 11
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMAX 252
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LEN(num) (10+1*(num))
+/* The operation completion code. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_OFST 0
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_TSANID_OFST 4
+#define MC_CMD_TSA_BIND_OUT_GET_ID_TSANID_LEN 6
+/* The signature data blob. The signature is computed against the message
+ * formed by TSAN ID concatenated with the NONCE value. Refer to SF-115479-TC
+ * for more information also in respect to the private keys that are used to
+ * sign the message based on TSAN pre/post-binding authentication procedure.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_OFST 10
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_LEN 1
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MAXNUM 242
+
+/* MC_CMD_TSA_BIND_OUT_GET_TICKET msgresponse */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMIN 5
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMAX 252
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LEN(num) (4+1*(num))
+/* The operation completion code. */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_OFST 0
+/* The ticket represents the data blob construct that TSAN sends to TSAC as
+ * part of the binding protocol. From the TSAN perspective the ticket is an
+ * opaque construct. For more info refer to SF-115479-TC.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_OFST 4
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_LEN 1
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MINNUM 1
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MAXNUM 248
+
+/* MC_CMD_TSA_BIND_OUT_SET_KEY msgresponse */
+#define MC_CMD_TSA_BIND_OUT_SET_KEY_LEN 4
+/* The operation completion code. */
+#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_OFST 0
+
+/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_OFST 0
+/* Extra status information */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_OFST 4
+/* enum: Unbind successful. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0
+/* enum: TSANID mismatch */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1
+/* enum: Unable to remove the binding ticket from persistent storage. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2
+/* enum: TSAN is not bound to a binding ticket. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3
+
+
+/***********************************/
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE
+ * Manage the persistent NVRAM cache of security rules created with
+ * MC_CMD_SET_SECURITY_RULE. Note that the cache is not automatically updated
+ * as rules are added or removed; the active ruleset must be explicitly
+ * committed to the cache. The cache may also be explicitly invalidated,
+ * without affecting the currently active ruleset. When the cache is valid, it
+ * will be loaded at power on or MC reboot, instead of the default ruleset.
+ * Rollback of the currently active ruleset to the cached version (when it is
+ * valid) is also supported. (Medford-only; for use by SolarSecure apps, not
+ * directly by drivers. See SF-114946-SW.) NOTE - this message definition is
+ * provisional. It has not yet been used in any released code and may change
+ * during development. This note will be removed once it is regarded as stable.
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE 0x11a
+#undef MC_CMD_0x11a_PRIVILEGE_CTG
+
+#define MC_CMD_0x11a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN msgrequest */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_LEN 4
+/* the operation to perform */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_OFST 0
+/* enum: reports the ruleset version that is cached in persistent storage but
+ * performs no other action
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0
+/* enum: rolls back the active state to the cached version. (May fail with
+ * ENOENT if there is no valid cached version.)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1
+/* enum: commits the active state to the persistent cache */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2
+/* enum: invalidates the persistent cache without affecting the active state */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3
+
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT msgresponse */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMIN 5
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMAX 252
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LEN(num) (4+1*(num))
+/* indicates whether the persistent cache is valid (after completion of the
+ * requested operation in the case of rollback, commit, or invalidate)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_OFST 0
+/* enum: persistent cache is invalid (the VERSION field will be empty in this
+ * case)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0
+/* enum: persistent cache is valid */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1
+/* cached ruleset version (after completion of the requested operation, in the
+ * case of rollback, commit, or invalidate) as an opaque hash value in the same
+ * form as MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_OFST 4
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_LEN 1
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MINNUM 1
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MAXNUM 248
+
#endif /* _SIENA_MC_DRIVER_PCOL_H */
diff --git a/sys/dev/sfxge/common/efx_regs_pci.h b/sys/dev/sfxge/common/efx_regs_pci.h
index a5fd45b..f274b0b 100644
--- a/sys/dev/sfxge/common/efx_regs_pci.h
+++ b/sys/dev/sfxge/common/efx_regs_pci.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/efx_rx.c b/sys/dev/sfxge/common/efx_rx.c
index 8ebe205..4a3b76e 100644
--- a/sys/dev/sfxge/common/efx_rx.c
+++ b/sys/dev/sfxge/common/efx_rx.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,45 +35,45 @@ __FBSDID("$FreeBSD$");
#include "efx_impl.h"
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
+#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_rx_init(
+siena_rx_init(
__in efx_nic_t *enp);
static void
-falconsiena_rx_fini(
+siena_rx_fini(
__in efx_nic_t *enp);
#if EFSYS_OPT_RX_SCATTER
static __checkReturn efx_rc_t
-falconsiena_rx_scatter_enable(
+siena_rx_scatter_enable(
__in efx_nic_t *enp,
__in unsigned int buf_size);
#endif /* EFSYS_OPT_RX_SCATTER */
#if EFSYS_OPT_RX_SCALE
static __checkReturn efx_rc_t
-falconsiena_rx_scale_mode_set(
+siena_rx_scale_mode_set(
__in efx_nic_t *enp,
__in efx_rx_hash_alg_t alg,
__in efx_rx_hash_type_t type,
__in boolean_t insert);
static __checkReturn efx_rc_t
-falconsiena_rx_scale_key_set(
+siena_rx_scale_key_set(
__in efx_nic_t *enp,
__in_ecount(n) uint8_t *key,
__in size_t n);
static __checkReturn efx_rc_t
-falconsiena_rx_scale_tbl_set(
+siena_rx_scale_tbl_set(
__in efx_nic_t *enp,
__in_ecount(n) unsigned int *table,
__in size_t n);
static __checkReturn uint32_t
-falconsiena_rx_prefix_hash(
+siena_rx_prefix_hash(
__in efx_nic_t *enp,
__in efx_rx_hash_alg_t func,
__in uint8_t *buffer);
@@ -81,13 +81,13 @@ falconsiena_rx_prefix_hash(
#endif /* EFSYS_OPT_RX_SCALE */
static __checkReturn efx_rc_t
-falconsiena_rx_prefix_pktlen(
+siena_rx_prefix_pktlen(
__in efx_nic_t *enp,
__in uint8_t *buffer,
__out uint16_t *lengthp);
static void
-falconsiena_rx_qpost(
+siena_rx_qpost(
__in efx_rxq_t *erp,
__in_ecount(n) efsys_dma_addr_t *addrp,
__in size_t size,
@@ -96,21 +96,21 @@ falconsiena_rx_qpost(
__in unsigned int added);
static void
-falconsiena_rx_qpush(
+siena_rx_qpush(
__in efx_rxq_t *erp,
__in unsigned int added,
__inout unsigned int *pushedp);
static __checkReturn efx_rc_t
-falconsiena_rx_qflush(
+siena_rx_qflush(
__in efx_rxq_t *erp);
static void
-falconsiena_rx_qenable(
+siena_rx_qenable(
__in efx_rxq_t *erp);
static __checkReturn efx_rc_t
-falconsiena_rx_qcreate(
+siena_rx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
@@ -122,60 +122,37 @@ falconsiena_rx_qcreate(
__in efx_rxq_t *erp);
static void
-falconsiena_rx_qdestroy(
+siena_rx_qdestroy(
__in efx_rxq_t *erp);
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
-
+#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_FALCON
-static efx_rx_ops_t __efx_rx_falcon_ops = {
- falconsiena_rx_init, /* erxo_init */
- falconsiena_rx_fini, /* erxo_fini */
-#if EFSYS_OPT_RX_SCATTER
- falconsiena_rx_scatter_enable, /* erxo_scatter_enable */
-#endif
-#if EFSYS_OPT_RX_SCALE
- falconsiena_rx_scale_mode_set, /* erxo_scale_mode_set */
- falconsiena_rx_scale_key_set, /* erxo_scale_key_set */
- falconsiena_rx_scale_tbl_set, /* erxo_scale_tbl_set */
- falconsiena_rx_prefix_hash, /* erxo_prefix_hash */
-#endif
- falconsiena_rx_prefix_pktlen, /* erxo_prefix_pktlen */
- falconsiena_rx_qpost, /* erxo_qpost */
- falconsiena_rx_qpush, /* erxo_qpush */
- falconsiena_rx_qflush, /* erxo_qflush */
- falconsiena_rx_qenable, /* erxo_qenable */
- falconsiena_rx_qcreate, /* erxo_qcreate */
- falconsiena_rx_qdestroy, /* erxo_qdestroy */
-};
-#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
-static efx_rx_ops_t __efx_rx_siena_ops = {
- falconsiena_rx_init, /* erxo_init */
- falconsiena_rx_fini, /* erxo_fini */
+static const efx_rx_ops_t __efx_rx_siena_ops = {
+ siena_rx_init, /* erxo_init */
+ siena_rx_fini, /* erxo_fini */
#if EFSYS_OPT_RX_SCATTER
- falconsiena_rx_scatter_enable, /* erxo_scatter_enable */
+ siena_rx_scatter_enable, /* erxo_scatter_enable */
#endif
#if EFSYS_OPT_RX_SCALE
- falconsiena_rx_scale_mode_set, /* erxo_scale_mode_set */
- falconsiena_rx_scale_key_set, /* erxo_scale_key_set */
- falconsiena_rx_scale_tbl_set, /* erxo_scale_tbl_set */
- falconsiena_rx_prefix_hash, /* erxo_prefix_hash */
+ siena_rx_scale_mode_set, /* erxo_scale_mode_set */
+ siena_rx_scale_key_set, /* erxo_scale_key_set */
+ siena_rx_scale_tbl_set, /* erxo_scale_tbl_set */
+ siena_rx_prefix_hash, /* erxo_prefix_hash */
#endif
- falconsiena_rx_prefix_pktlen, /* erxo_prefix_pktlen */
- falconsiena_rx_qpost, /* erxo_qpost */
- falconsiena_rx_qpush, /* erxo_qpush */
- falconsiena_rx_qflush, /* erxo_qflush */
- falconsiena_rx_qenable, /* erxo_qenable */
- falconsiena_rx_qcreate, /* erxo_qcreate */
- falconsiena_rx_qdestroy, /* erxo_qdestroy */
+ siena_rx_prefix_pktlen, /* erxo_prefix_pktlen */
+ siena_rx_qpost, /* erxo_qpost */
+ siena_rx_qpush, /* erxo_qpush */
+ siena_rx_qflush, /* erxo_qflush */
+ siena_rx_qenable, /* erxo_qenable */
+ siena_rx_qcreate, /* erxo_qcreate */
+ siena_rx_qdestroy, /* erxo_qdestroy */
};
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
-static efx_rx_ops_t __efx_rx_ef10_ops = {
+static const efx_rx_ops_t __efx_rx_ef10_ops = {
ef10_rx_init, /* erxo_init */
ef10_rx_fini, /* erxo_fini */
#if EFSYS_OPT_RX_SCATTER
@@ -202,7 +179,7 @@ static efx_rx_ops_t __efx_rx_ef10_ops = {
efx_rx_init(
__inout efx_nic_t *enp)
{
- efx_rx_ops_t *erxop;
+ const efx_rx_ops_t *erxop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -219,27 +196,21 @@ efx_rx_init(
}
switch (enp->en_family) {
-#if EFSYS_OPT_FALCON
- case EFX_FAMILY_FALCON:
- erxop = (efx_rx_ops_t *)&__efx_rx_falcon_ops;
- break;
-#endif /* EFSYS_OPT_FALCON */
-
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
- erxop = (efx_rx_ops_t *)&__efx_rx_siena_ops;
+ erxop = &__efx_rx_siena_ops;
break;
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON
case EFX_FAMILY_HUNTINGTON:
- erxop = (efx_rx_ops_t *)&__efx_rx_ef10_ops;
+ erxop = &__efx_rx_ef10_ops;
break;
#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
- erxop = (efx_rx_ops_t *)&__efx_rx_ef10_ops;
+ erxop = &__efx_rx_ef10_ops;
break;
#endif /* EFSYS_OPT_MEDFORD */
@@ -274,7 +245,7 @@ fail1:
efx_rx_fini(
__in efx_nic_t *enp)
{
- efx_rx_ops_t *erxop = enp->en_erxop;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
@@ -293,7 +264,7 @@ efx_rx_scatter_enable(
__in efx_nic_t *enp,
__in unsigned int buf_size)
{
- efx_rx_ops_t *erxop = enp->en_erxop;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -370,7 +341,7 @@ efx_rx_scale_mode_set(
__in efx_rx_hash_type_t type,
__in boolean_t insert)
{
- efx_rx_ops_t *erxop = enp->en_erxop;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -397,7 +368,7 @@ efx_rx_scale_key_set(
__in_ecount(n) uint8_t *key,
__in size_t n)
{
- efx_rx_ops_t *erxop = enp->en_erxop;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -422,7 +393,7 @@ efx_rx_scale_tbl_set(
__in_ecount(n) unsigned int *table,
__in size_t n)
{
- efx_rx_ops_t *erxop = enp->en_erxop;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -450,7 +421,7 @@ efx_rx_qpost(
__in unsigned int added)
{
efx_nic_t *enp = erp->er_enp;
- efx_rx_ops_t *erxop = enp->en_erxop;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
@@ -464,7 +435,7 @@ efx_rx_qpush(
__inout unsigned int *pushedp)
{
efx_nic_t *enp = erp->er_enp;
- efx_rx_ops_t *erxop = enp->en_erxop;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
@@ -476,7 +447,7 @@ efx_rx_qflush(
__in efx_rxq_t *erp)
{
efx_nic_t *enp = erp->er_enp;
- efx_rx_ops_t *erxop = enp->en_erxop;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
efx_rc_t rc;
EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
@@ -497,7 +468,7 @@ efx_rx_qenable(
__in efx_rxq_t *erp)
{
efx_nic_t *enp = erp->er_enp;
- efx_rx_ops_t *erxop = enp->en_erxop;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
@@ -516,7 +487,7 @@ efx_rx_qcreate(
__in efx_evq_t *eep,
__deref_out efx_rxq_t **erpp)
{
- efx_rx_ops_t *erxop = enp->en_erxop;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
efx_rxq_t *erp;
efx_rc_t rc;
@@ -561,7 +532,7 @@ efx_rx_qdestroy(
__in efx_rxq_t *erp)
{
efx_nic_t *enp = erp->er_enp;
- efx_rx_ops_t *erxop = enp->en_erxop;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
@@ -574,7 +545,7 @@ efx_psuedo_hdr_pkt_length_get(
__in uint8_t *buffer,
__out uint16_t *lengthp)
{
- efx_rx_ops_t *erxop = enp->en_erxop;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
return (erxop->erxo_prefix_pktlen(enp, buffer, lengthp));
}
@@ -586,17 +557,17 @@ efx_psuedo_hdr_hash_get(
__in efx_rx_hash_alg_t func,
__in uint8_t *buffer)
{
- efx_rx_ops_t *erxop = enp->en_erxop;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
EFSYS_ASSERT3U(enp->en_hash_support, ==, EFX_RX_HASH_AVAILABLE);
return (erxop->erxo_prefix_hash(enp, func, buffer));
}
#endif /* EFSYS_OPT_RX_SCALE */
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
+#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_rx_init(
+siena_rx_init(
__in efx_nic_t *enp)
{
efx_oword_t oword;
@@ -633,7 +604,7 @@ falconsiena_rx_init(
#if EFSYS_OPT_RX_SCATTER
static __checkReturn efx_rc_t
-falconsiena_rx_scatter_enable(
+siena_rx_scatter_enable(
__in efx_nic_t *enp,
__in unsigned int buf_size)
{
@@ -721,11 +692,6 @@ fail1:
do { \
efx_oword_t oword; \
\
- if ((_enp)->en_family == EFX_FAMILY_FALCON) { \
- (_rc) = ((_ip) || (_tcp)) ? ENOTSUP : 0; \
- break; \
- } \
- \
EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
EFX_SET_OWORD_FIELD(oword, \
FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1); \
@@ -744,7 +710,7 @@ fail1:
#if EFSYS_OPT_RX_SCALE
static __checkReturn efx_rc_t
-falconsiena_rx_scale_mode_set(
+siena_rx_scale_mode_set(
__in efx_nic_t *enp,
__in efx_rx_hash_alg_t alg,
__in efx_rx_hash_type_t type,
@@ -791,7 +757,7 @@ fail1:
#if EFSYS_OPT_RX_SCALE
static __checkReturn efx_rc_t
-falconsiena_rx_scale_key_set(
+siena_rx_scale_key_set(
__in efx_nic_t *enp,
__in_ecount(n) uint8_t *key,
__in size_t n)
@@ -828,8 +794,6 @@ falconsiena_rx_scale_key_set(
if ((enp->en_features & EFX_FEATURE_IPV6) == 0)
goto done;
- EFSYS_ASSERT3U(enp->en_family, !=, EFX_FAMILY_FALCON);
-
byte = 0;
/* Write Toeplitz IPv6 hash key 3 */
@@ -918,7 +882,7 @@ fail1:
#if EFSYS_OPT_RX_SCALE
static __checkReturn efx_rc_t
-falconsiena_rx_scale_tbl_set(
+siena_rx_scale_tbl_set(
__in efx_nic_t *enp,
__in_ecount(n) unsigned int *table,
__in size_t n)
@@ -996,11 +960,13 @@ fail1:
#if EFSYS_OPT_RX_SCALE
static __checkReturn uint32_t
-falconsiena_rx_prefix_hash(
+siena_rx_prefix_hash(
__in efx_nic_t *enp,
__in efx_rx_hash_alg_t func,
__in uint8_t *buffer)
{
+ _NOTE(ARGUNUSED(enp))
+
switch (func) {
case EFX_RX_HASHALG_TOEPLITZ:
return ((buffer[12] << 24) |
@@ -1019,11 +985,13 @@ falconsiena_rx_prefix_hash(
#endif /* EFSYS_OPT_RX_SCALE */
static __checkReturn efx_rc_t
-falconsiena_rx_prefix_pktlen(
+siena_rx_prefix_pktlen(
__in efx_nic_t *enp,
__in uint8_t *buffer,
__out uint16_t *lengthp)
{
+ _NOTE(ARGUNUSED(enp, buffer, lengthp))
+
/* Not supported by Falcon/Siena hardware */
EFSYS_ASSERT(0);
return (ENOTSUP);
@@ -1031,7 +999,7 @@ falconsiena_rx_prefix_pktlen(
static void
-falconsiena_rx_qpost(
+siena_rx_qpost(
__in efx_rxq_t *erp,
__in_ecount(n) efsys_dma_addr_t *addrp,
__in size_t size,
@@ -1069,7 +1037,7 @@ falconsiena_rx_qpost(
}
static void
-falconsiena_rx_qpush(
+siena_rx_qpush(
__in efx_rxq_t *erp,
__in unsigned int added,
__inout unsigned int *pushedp)
@@ -1101,7 +1069,7 @@ falconsiena_rx_qpush(
}
static __checkReturn efx_rc_t
-falconsiena_rx_qflush(
+siena_rx_qflush(
__in efx_rxq_t *erp)
{
efx_nic_t *enp = erp->er_enp;
@@ -1119,7 +1087,7 @@ falconsiena_rx_qflush(
}
static void
-falconsiena_rx_qenable(
+siena_rx_qenable(
__in efx_rxq_t *erp)
{
efx_nic_t *enp = erp->er_enp;
@@ -1139,7 +1107,7 @@ falconsiena_rx_qenable(
}
static __checkReturn efx_rc_t
-falconsiena_rx_qcreate(
+siena_rx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
@@ -1156,6 +1124,8 @@ falconsiena_rx_qcreate(
boolean_t jumbo;
efx_rc_t rc;
+ _NOTE(ARGUNUSED(esmp))
+
EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS ==
(1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH));
EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
@@ -1229,7 +1199,7 @@ fail1:
}
static void
-falconsiena_rx_qdestroy(
+siena_rx_qdestroy(
__in efx_rxq_t *erp)
{
efx_nic_t *enp = erp->er_enp;
@@ -1249,10 +1219,10 @@ falconsiena_rx_qdestroy(
}
static void
-falconsiena_rx_fini(
+siena_rx_fini(
__in efx_nic_t *enp)
{
_NOTE(ARGUNUSED(enp))
}
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
+#endif /* EFSYS_OPT_SIENA */
diff --git a/sys/dev/sfxge/common/efx_sram.c b/sys/dev/sfxge/common/efx_sram.c
index f2a7b78..beb24a4 100644
--- a/sys/dev/sfxge/common/efx_sram.c
+++ b/sys/dev/sfxge/common/efx_sram.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -310,7 +310,6 @@ efx_sram_test(
__in efx_nic_t *enp,
__in efx_pattern_type_t type)
{
- efx_nic_ops_t *enop = enp->en_enop;
efx_sram_pattern_fn_t func;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -321,11 +320,15 @@ efx_sram_test(
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+ /* SRAM testing is only available on Siena. */
+ if (enp->en_family != EFX_FAMILY_SIENA)
+ return (0);
+
/* Select pattern generator */
EFSYS_ASSERT3U(type, <, EFX_PATTERN_NTYPES);
func = __efx_sram_pattern_fns[type];
- return (enop->eno_sram_test(enp, func));
+ return (siena_sram_test(enp, func));
}
#endif /* EFSYS_OPT_DIAG */
diff --git a/sys/dev/sfxge/common/efx_tx.c b/sys/dev/sfxge/common/efx_tx.c
index 168fde4..c270784 100644
--- a/sys/dev/sfxge/common/efx_tx.c
+++ b/sys/dev/sfxge/common/efx_tx.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,18 +44,18 @@ __FBSDID("$FreeBSD$");
#define EFX_TX_QSTAT_INCR(_etp, _stat)
#endif
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
+#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_tx_init(
+siena_tx_init(
__in efx_nic_t *enp);
static void
-falconsiena_tx_fini(
+siena_tx_fini(
__in efx_nic_t *enp);
static __checkReturn efx_rc_t
-falconsiena_tx_qcreate(
+siena_tx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
@@ -68,11 +68,11 @@ falconsiena_tx_qcreate(
__out unsigned int *addedp);
static void
-falconsiena_tx_qdestroy(
+siena_tx_qdestroy(
__in efx_txq_t *etp);
static __checkReturn efx_rc_t
-falconsiena_tx_qpost(
+siena_tx_qpost(
__in efx_txq_t *etp,
__in_ecount(n) efx_buffer_t *eb,
__in unsigned int n,
@@ -80,26 +80,26 @@ falconsiena_tx_qpost(
__inout unsigned int *addedp);
static void
-falconsiena_tx_qpush(
+siena_tx_qpush(
__in efx_txq_t *etp,
__in unsigned int added,
__in unsigned int pushed);
static __checkReturn efx_rc_t
-falconsiena_tx_qpace(
+siena_tx_qpace(
__in efx_txq_t *etp,
__in unsigned int ns);
static __checkReturn efx_rc_t
-falconsiena_tx_qflush(
+siena_tx_qflush(
__in efx_txq_t *etp);
static void
-falconsiena_tx_qenable(
+siena_tx_qenable(
__in efx_txq_t *etp);
__checkReturn efx_rc_t
-falconsiena_tx_qdesc_post(
+siena_tx_qdesc_post(
__in efx_txq_t *etp,
__in_ecount(n) efx_desc_t *ed,
__in unsigned int n,
@@ -107,7 +107,7 @@ falconsiena_tx_qdesc_post(
__inout unsigned int *addedp);
void
-falconsiena_tx_qdesc_dma_create(
+siena_tx_qdesc_dma_create(
__in efx_txq_t *etp,
__in efsys_dma_addr_t addr,
__in size_t size,
@@ -116,68 +116,42 @@ falconsiena_tx_qdesc_dma_create(
#if EFSYS_OPT_QSTATS
static void
-falconsiena_tx_qstats_update(
+siena_tx_qstats_update(
__in efx_txq_t *etp,
__inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
#endif
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
-
+#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_FALCON
-static efx_tx_ops_t __efx_tx_falcon_ops = {
- falconsiena_tx_init, /* etxo_init */
- falconsiena_tx_fini, /* etxo_fini */
- falconsiena_tx_qcreate, /* etxo_qcreate */
- falconsiena_tx_qdestroy, /* etxo_qdestroy */
- falconsiena_tx_qpost, /* etxo_qpost */
- falconsiena_tx_qpush, /* etxo_qpush */
- falconsiena_tx_qpace, /* etxo_qpace */
- falconsiena_tx_qflush, /* etxo_qflush */
- falconsiena_tx_qenable, /* etxo_qenable */
- NULL, /* etxo_qpio_enable */
- NULL, /* etxo_qpio_disable */
- NULL, /* etxo_qpio_write */
- NULL, /* etxo_qpio_post */
- falconsiena_tx_qdesc_post, /* etxo_qdesc_post */
- falconsiena_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
- NULL, /* etxo_qdesc_tso_create */
- NULL, /* etxo_qdesc_tso2_create */
- NULL, /* etxo_qdesc_vlantci_create */
-#if EFSYS_OPT_QSTATS
- falconsiena_tx_qstats_update, /* etxo_qstats_update */
-#endif
-};
-#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
-static efx_tx_ops_t __efx_tx_siena_ops = {
- falconsiena_tx_init, /* etxo_init */
- falconsiena_tx_fini, /* etxo_fini */
- falconsiena_tx_qcreate, /* etxo_qcreate */
- falconsiena_tx_qdestroy, /* etxo_qdestroy */
- falconsiena_tx_qpost, /* etxo_qpost */
- falconsiena_tx_qpush, /* etxo_qpush */
- falconsiena_tx_qpace, /* etxo_qpace */
- falconsiena_tx_qflush, /* etxo_qflush */
- falconsiena_tx_qenable, /* etxo_qenable */
+static const efx_tx_ops_t __efx_tx_siena_ops = {
+ siena_tx_init, /* etxo_init */
+ siena_tx_fini, /* etxo_fini */
+ siena_tx_qcreate, /* etxo_qcreate */
+ siena_tx_qdestroy, /* etxo_qdestroy */
+ siena_tx_qpost, /* etxo_qpost */
+ siena_tx_qpush, /* etxo_qpush */
+ siena_tx_qpace, /* etxo_qpace */
+ siena_tx_qflush, /* etxo_qflush */
+ siena_tx_qenable, /* etxo_qenable */
NULL, /* etxo_qpio_enable */
NULL, /* etxo_qpio_disable */
NULL, /* etxo_qpio_write */
NULL, /* etxo_qpio_post */
- falconsiena_tx_qdesc_post, /* etxo_qdesc_post */
- falconsiena_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ siena_tx_qdesc_post, /* etxo_qdesc_post */
+ siena_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
NULL, /* etxo_qdesc_tso_create */
NULL, /* etxo_qdesc_tso2_create */
NULL, /* etxo_qdesc_vlantci_create */
#if EFSYS_OPT_QSTATS
- falconsiena_tx_qstats_update, /* etxo_qstats_update */
+ siena_tx_qstats_update, /* etxo_qstats_update */
#endif
};
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON
-static efx_tx_ops_t __efx_tx_hunt_ops = {
+static const efx_tx_ops_t __efx_tx_hunt_ops = {
ef10_tx_init, /* etxo_init */
ef10_tx_fini, /* etxo_fini */
ef10_tx_qcreate, /* etxo_qcreate */
@@ -193,7 +167,7 @@ static efx_tx_ops_t __efx_tx_hunt_ops = {
ef10_tx_qpio_post, /* etxo_qpio_post */
ef10_tx_qdesc_post, /* etxo_qdesc_post */
ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
- hunt_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */
ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
#if EFSYS_OPT_QSTATS
@@ -203,7 +177,7 @@ static efx_tx_ops_t __efx_tx_hunt_ops = {
#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_MEDFORD
-static efx_tx_ops_t __efx_tx_medford_ops = {
+static const efx_tx_ops_t __efx_tx_medford_ops = {
ef10_tx_init, /* etxo_init */
ef10_tx_fini, /* etxo_fini */
ef10_tx_qcreate, /* etxo_qcreate */
@@ -232,7 +206,7 @@ static efx_tx_ops_t __efx_tx_medford_ops = {
efx_tx_init(
__in efx_nic_t *enp)
{
- efx_tx_ops_t *etxop;
+ const efx_tx_ops_t *etxop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -249,27 +223,21 @@ efx_tx_init(
}
switch (enp->en_family) {
-#if EFSYS_OPT_FALCON
- case EFX_FAMILY_FALCON:
- etxop = (efx_tx_ops_t *)&__efx_tx_falcon_ops;
- break;
-#endif /* EFSYS_OPT_FALCON */
-
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
- etxop = (efx_tx_ops_t *)&__efx_tx_siena_ops;
+ etxop = &__efx_tx_siena_ops;
break;
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON
case EFX_FAMILY_HUNTINGTON:
- etxop = (efx_tx_ops_t *)&__efx_tx_hunt_ops;
+ etxop = &__efx_tx_hunt_ops;
break;
#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
- etxop = (efx_tx_ops_t *)&__efx_tx_medford_ops;
+ etxop = &__efx_tx_medford_ops;
break;
#endif /* EFSYS_OPT_MEDFORD */
@@ -306,7 +274,7 @@ fail1:
efx_tx_fini(
__in efx_nic_t *enp)
{
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
@@ -332,7 +300,7 @@ efx_tx_qcreate(
__deref_out efx_txq_t **etpp,
__out unsigned int *addedp)
{
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_txq_t *etp;
efx_rc_t rc;
@@ -381,7 +349,7 @@ efx_tx_qdestroy(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
@@ -403,7 +371,7 @@ efx_tx_qpost(
__inout unsigned int *addedp)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
efx_rc_t rc;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
@@ -426,7 +394,7 @@ efx_tx_qpush(
__in unsigned int pushed)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
@@ -439,7 +407,7 @@ efx_tx_qpace(
__in unsigned int ns)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
efx_rc_t rc;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
@@ -459,7 +427,7 @@ efx_tx_qflush(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
efx_rc_t rc;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
@@ -479,7 +447,7 @@ efx_tx_qenable(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
@@ -491,7 +459,7 @@ efx_tx_qpio_enable(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
efx_rc_t rc;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
@@ -523,7 +491,7 @@ efx_tx_qpio_disable(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
@@ -539,7 +507,7 @@ efx_tx_qpio_write(
__in size_t pio_buf_offset)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
efx_rc_t rc;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
@@ -566,7 +534,7 @@ efx_tx_qpio_post(
__inout unsigned int *addedp)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
efx_rc_t rc;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
@@ -594,7 +562,7 @@ efx_tx_qdesc_post(
__inout unsigned int *addedp)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
efx_rc_t rc;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
@@ -619,7 +587,7 @@ efx_tx_qdesc_dma_create(
__out efx_desc_t *edp)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
EFSYS_ASSERT(etxop->etxo_qdesc_dma_create != NULL);
@@ -636,7 +604,7 @@ efx_tx_qdesc_tso_create(
__out efx_desc_t *edp)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
EFSYS_ASSERT(etxop->etxo_qdesc_tso_create != NULL);
@@ -654,7 +622,7 @@ efx_tx_qdesc_tso2_create(
__in int count)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
EFSYS_ASSERT(etxop->etxo_qdesc_tso2_create != NULL);
@@ -669,7 +637,7 @@ efx_tx_qdesc_vlantci_create(
__out efx_desc_t *edp)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
EFSYS_ASSERT(etxop->etxo_qdesc_vlantci_create != NULL);
@@ -685,7 +653,7 @@ efx_tx_qstats_update(
__inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
{
efx_nic_t *enp = etp->et_enp;
- efx_tx_ops_t *etxop = enp->en_etxop;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
@@ -694,10 +662,10 @@ efx_tx_qstats_update(
#endif
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
+#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_tx_init(
+siena_tx_init(
__in efx_nic_t *enp)
{
efx_oword_t oword;
@@ -760,7 +728,7 @@ falconsiena_tx_init(
} while (B_FALSE)
static __checkReturn efx_rc_t
-falconsiena_tx_qpost(
+siena_tx_qpost(
__in efx_txq_t *etp,
__in_ecount(n) efx_buffer_t *eb,
__in unsigned int n,
@@ -798,7 +766,7 @@ fail1:
}
static void
-falconsiena_tx_qpush(
+siena_tx_qpush(
__in efx_txq_t *etp,
__in unsigned int added,
__in unsigned int pushed)
@@ -829,7 +797,7 @@ falconsiena_tx_qpush(
#define EFX_TX_PACE_CLOCK_BASE 104
static __checkReturn efx_rc_t
-falconsiena_tx_qpace(
+siena_tx_qpace(
__in efx_txq_t *etp,
__in unsigned int ns)
{
@@ -872,7 +840,7 @@ fail1:
}
static __checkReturn efx_rc_t
-falconsiena_tx_qflush(
+siena_tx_qflush(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
@@ -892,7 +860,7 @@ falconsiena_tx_qflush(
}
static void
-falconsiena_tx_qenable(
+siena_tx_qenable(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
@@ -916,7 +884,7 @@ falconsiena_tx_qenable(
}
static __checkReturn efx_rc_t
-falconsiena_tx_qcreate(
+siena_tx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
@@ -933,6 +901,8 @@ falconsiena_tx_qcreate(
uint32_t size;
efx_rc_t rc;
+ _NOTE(ARGUNUSED(esmp))
+
EFX_STATIC_ASSERT(EFX_EV_TX_NLABELS ==
(1 << FRF_AZ_TX_DESCQ_LABEL_WIDTH));
EFSYS_ASSERT3U(label, <, EFX_EV_TX_NLABELS);
@@ -991,7 +961,7 @@ fail1:
}
__checkReturn efx_rc_t
-falconsiena_tx_qdesc_post(
+siena_tx_qdesc_post(
__in efx_txq_t *etp,
__in_ecount(n) efx_desc_t *ed,
__in unsigned int n,
@@ -1032,7 +1002,7 @@ fail1:
}
void
-falconsiena_tx_qdesc_dma_create(
+siena_tx_qdesc_dma_create(
__in efx_txq_t *etp,
__in efsys_dma_addr_t addr,
__in size_t size,
@@ -1055,7 +1025,7 @@ falconsiena_tx_qdesc_dma_create(
(uint32_t)(addr >> 32));
}
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
+#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_QSTATS
#if EFSYS_OPT_NAMES
@@ -1080,11 +1050,11 @@ efx_tx_qstat_name(
#endif /* EFSYS_OPT_NAMES */
#endif /* EFSYS_OPT_QSTATS */
-#if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA
+#if EFSYS_OPT_SIENA
#if EFSYS_OPT_QSTATS
static void
-falconsiena_tx_qstats_update(
+siena_tx_qstats_update(
__in efx_txq_t *etp,
__inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
{
@@ -1100,7 +1070,7 @@ falconsiena_tx_qstats_update(
#endif /* EFSYS_OPT_QSTATS */
static void
-falconsiena_tx_qdestroy(
+siena_tx_qdestroy(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
@@ -1114,10 +1084,10 @@ falconsiena_tx_qdestroy(
}
static void
-falconsiena_tx_fini(
+siena_tx_fini(
__in efx_nic_t *enp)
{
_NOTE(ARGUNUSED(enp))
}
-#endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */
+#endif /* EFSYS_OPT_SIENA */
diff --git a/sys/dev/sfxge/common/efx_types.h b/sys/dev/sfxge/common/efx_types.h
index ae4c6d9..4289145 100644
--- a/sys/dev/sfxge/common/efx_types.h
+++ b/sys/dev/sfxge/common/efx_types.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/efx_vpd.c b/sys/dev/sfxge/common/efx_vpd.c
index ebeddfe..091dfb5 100644
--- a/sys/dev/sfxge/common/efx_vpd.c
+++ b/sys/dev/sfxge/common/efx_vpd.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -54,26 +54,9 @@ __FBSDID("$FreeBSD$");
#define TAG_NAME_VPD_R_DECODE 0x10
#define TAG_NAME_VPD_W_DECODE 0x11
-#if EFSYS_OPT_FALCON
-
-static efx_vpd_ops_t __efx_vpd_falcon_ops = {
- NULL, /* evpdo_init */
- falcon_vpd_size, /* evpdo_size */
- falcon_vpd_read, /* evpdo_read */
- falcon_vpd_verify, /* evpdo_verify */
- NULL, /* evpdo_reinit */
- falcon_vpd_get, /* evpdo_get */
- falcon_vpd_set, /* evpdo_set */
- falcon_vpd_next, /* evpdo_next */
- falcon_vpd_write, /* evpdo_write */
- NULL, /* evpdo_fini */
-};
-
-#endif /* EFSYS_OPT_FALCON */
-
#if EFSYS_OPT_SIENA
-static efx_vpd_ops_t __efx_vpd_siena_ops = {
+static const efx_vpd_ops_t __efx_vpd_siena_ops = {
siena_vpd_init, /* evpdo_init */
siena_vpd_size, /* evpdo_size */
siena_vpd_read, /* evpdo_read */
@@ -90,7 +73,7 @@ static efx_vpd_ops_t __efx_vpd_siena_ops = {
#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
-static efx_vpd_ops_t __efx_vpd_ef10_ops = {
+static const efx_vpd_ops_t __efx_vpd_ef10_ops = {
ef10_vpd_init, /* evpdo_init */
ef10_vpd_size, /* evpdo_size */
ef10_vpd_read, /* evpdo_read */
@@ -109,7 +92,7 @@ static efx_vpd_ops_t __efx_vpd_ef10_ops = {
efx_vpd_init(
__in efx_nic_t *enp)
{
- efx_vpd_ops_t *evpdop;
+ const efx_vpd_ops_t *evpdop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -117,27 +100,21 @@ efx_vpd_init(
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_VPD));
switch (enp->en_family) {
-#if EFSYS_OPT_FALCON
- case EFX_FAMILY_FALCON:
- evpdop = (efx_vpd_ops_t *)&__efx_vpd_falcon_ops;
- break;
-#endif /* EFSYS_OPT_FALCON */
-
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
- evpdop = (efx_vpd_ops_t *)&__efx_vpd_siena_ops;
+ evpdop = &__efx_vpd_siena_ops;
break;
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON
case EFX_FAMILY_HUNTINGTON:
- evpdop = (efx_vpd_ops_t *)&__efx_vpd_ef10_ops;
+ evpdop = &__efx_vpd_ef10_ops;
break;
#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
- evpdop = (efx_vpd_ops_t *)&__efx_vpd_ef10_ops;
+ evpdop = &__efx_vpd_ef10_ops;
break;
#endif /* EFSYS_OPT_MEDFORD */
@@ -170,7 +147,7 @@ efx_vpd_size(
__in efx_nic_t *enp,
__out size_t *sizep)
{
- efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -193,7 +170,7 @@ efx_vpd_read(
__out_bcount(size) caddr_t data,
__in size_t size)
{
- efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -216,7 +193,7 @@ efx_vpd_verify(
__in_bcount(size) caddr_t data,
__in size_t size)
{
- efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -239,7 +216,7 @@ efx_vpd_reinit(
__in_bcount(size) caddr_t data,
__in size_t size)
{
- efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -270,14 +247,18 @@ efx_vpd_get(
__in size_t size,
__inout efx_vpd_value_t *evvp)
{
- efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
- if ((rc = evpdop->evpdo_get(enp, data, size, evvp)) != 0)
+ if ((rc = evpdop->evpdo_get(enp, data, size, evvp)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
+
goto fail1;
+ }
return (0);
@@ -294,7 +275,7 @@ efx_vpd_set(
__in size_t size,
__in efx_vpd_value_t *evvp)
{
- efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -319,7 +300,7 @@ efx_vpd_next(
__out efx_vpd_value_t *evvp,
__inout unsigned int *contp)
{
- efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -342,7 +323,7 @@ efx_vpd_write(
__in_bcount(size) caddr_t data,
__in size_t size)
{
- efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
@@ -1022,7 +1003,7 @@ fail1:
efx_vpd_fini(
__in efx_nic_t *enp)
{
- efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
diff --git a/sys/dev/sfxge/common/efx_wol.c b/sys/dev/sfxge/common/efx_wol.c
index aea3c55..dd1a85f 100644
--- a/sys/dev/sfxge/common/efx_wol.c
+++ b/sys/dev/sfxge/common/efx_wol.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/hunt_impl.h b/sys/dev/sfxge/common/hunt_impl.h
index 49ecbea..1ec2909 100644
--- a/sys/dev/sfxge/common/hunt_impl.h
+++ b/sys/dev/sfxge/common/hunt_impl.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,503 +42,36 @@
extern "C" {
#endif
-/*
- * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could
- * possibly be increased, or the write size reported by newer firmware used
- * instead.
- */
-#define EF10_NVRAM_CHUNK 0x80
-
-/* Alignment requirement for value written to RX WPTR:
- * the WPTR must be aligned to an 8 descriptor boundary
- */
-#define EF10_RX_WPTR_ALIGN 8
-
-/*
- * Max byte offset into the packet the TCP header must start for the hardware
- * to be able to parse the packet correctly.
- * FIXME: Move to ef10_impl.h when it is included in all driver builds.
- */
-#define EF10_TCP_HEADER_OFFSET_LIMIT 208
-
-/* Invalid RSS context handle */
-#define EF10_RSS_CONTEXT_INVALID (0xffffffff)
-
-
-/* EV */
-
- __checkReturn efx_rc_t
-ef10_ev_init(
- __in efx_nic_t *enp);
-
- void
-ef10_ev_fini(
- __in efx_nic_t *enp);
-
- __checkReturn efx_rc_t
-ef10_ev_qcreate(
- __in efx_nic_t *enp,
- __in unsigned int index,
- __in efsys_mem_t *esmp,
- __in size_t n,
- __in uint32_t id,
- __in efx_evq_t *eep);
-
- void
-ef10_ev_qdestroy(
- __in efx_evq_t *eep);
-
- __checkReturn efx_rc_t
-ef10_ev_qprime(
- __in efx_evq_t *eep,
- __in unsigned int count);
-
- void
-ef10_ev_qpost(
- __in efx_evq_t *eep,
- __in uint16_t data);
-
- __checkReturn efx_rc_t
-ef10_ev_qmoderate(
- __in efx_evq_t *eep,
- __in unsigned int us);
-
-#if EFSYS_OPT_QSTATS
- void
-ef10_ev_qstats_update(
- __in efx_evq_t *eep,
- __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
-#endif /* EFSYS_OPT_QSTATS */
-
- void
-ef10_ev_rxlabel_init(
- __in efx_evq_t *eep,
- __in efx_rxq_t *erp,
- __in unsigned int label);
-
- void
-ef10_ev_rxlabel_fini(
- __in efx_evq_t *eep,
- __in unsigned int label);
-
-/* INTR */
-
- __checkReturn efx_rc_t
-ef10_intr_init(
- __in efx_nic_t *enp,
- __in efx_intr_type_t type,
- __in efsys_mem_t *esmp);
-
- void
-ef10_intr_enable(
- __in efx_nic_t *enp);
-
- void
-ef10_intr_disable(
- __in efx_nic_t *enp);
-
- void
-ef10_intr_disable_unlocked(
- __in efx_nic_t *enp);
+/* Missing register definitions */
+#ifndef ER_DZ_TX_PIOBUF_OFST
+#define ER_DZ_TX_PIOBUF_OFST 0x00001000
+#endif
+#ifndef ER_DZ_TX_PIOBUF_STEP
+#define ER_DZ_TX_PIOBUF_STEP 8192
+#endif
+#ifndef ER_DZ_TX_PIOBUF_ROWS
+#define ER_DZ_TX_PIOBUF_ROWS 2048
+#endif
- __checkReturn efx_rc_t
-ef10_intr_trigger(
- __in efx_nic_t *enp,
- __in unsigned int level);
+#ifndef ER_DZ_TX_PIOBUF_SIZE
+#define ER_DZ_TX_PIOBUF_SIZE 2048
+#endif
- void
-ef10_intr_status_line(
- __in efx_nic_t *enp,
- __out boolean_t *fatalp,
- __out uint32_t *qmaskp);
+#define HUNT_PIOBUF_NBUFS (16)
+#define HUNT_PIOBUF_SIZE (ER_DZ_TX_PIOBUF_SIZE)
- void
-ef10_intr_status_message(
- __in efx_nic_t *enp,
- __in unsigned int message,
- __out boolean_t *fatalp);
+#define HUNT_MIN_PIO_ALLOC_SIZE (HUNT_PIOBUF_SIZE / 32)
- void
-ef10_intr_fatal(
- __in efx_nic_t *enp);
- void
-ef10_intr_fini(
- __in efx_nic_t *enp);
/* NIC */
extern __checkReturn efx_rc_t
-ef10_nic_probe(
- __in efx_nic_t *enp);
-
-extern __checkReturn efx_rc_t
hunt_board_cfg(
__in efx_nic_t *enp);
-extern __checkReturn efx_rc_t
-ef10_nic_set_drv_limits(
- __inout efx_nic_t *enp,
- __in efx_drv_limits_t *edlp);
-
-extern __checkReturn efx_rc_t
-ef10_nic_get_vi_pool(
- __in efx_nic_t *enp,
- __out uint32_t *vi_countp);
-
-extern __checkReturn efx_rc_t
-ef10_nic_get_bar_region(
- __in efx_nic_t *enp,
- __in efx_nic_region_t region,
- __out uint32_t *offsetp,
- __out size_t *sizep);
-
-extern __checkReturn efx_rc_t
-ef10_nic_reset(
- __in efx_nic_t *enp);
-
-extern __checkReturn efx_rc_t
-ef10_nic_init(
- __in efx_nic_t *enp);
-
-#if EFSYS_OPT_DIAG
-
-extern __checkReturn efx_rc_t
-ef10_nic_register_test(
- __in efx_nic_t *enp);
-
-#endif /* EFSYS_OPT_DIAG */
-
-extern void
-ef10_nic_fini(
- __in efx_nic_t *enp);
-
-extern void
-ef10_nic_unprobe(
- __in efx_nic_t *enp);
-
-
-/* MAC */
-
-extern __checkReturn efx_rc_t
-ef10_mac_poll(
- __in efx_nic_t *enp,
- __out efx_link_mode_t *link_modep);
-
-extern __checkReturn efx_rc_t
-ef10_mac_up(
- __in efx_nic_t *enp,
- __out boolean_t *mac_upp);
-
-extern __checkReturn efx_rc_t
-ef10_mac_addr_set(
- __in efx_nic_t *enp);
-
-extern __checkReturn efx_rc_t
-ef10_mac_pdu_set(
- __in efx_nic_t *enp);
-
-extern __checkReturn efx_rc_t
-ef10_mac_reconfigure(
- __in efx_nic_t *enp);
-
-extern __checkReturn efx_rc_t
-ef10_mac_multicast_list_set(
- __in efx_nic_t *enp);
-
-extern __checkReturn efx_rc_t
-ef10_mac_filter_default_rxq_set(
- __in efx_nic_t *enp,
- __in efx_rxq_t *erp,
- __in boolean_t using_rss);
-
-extern void
-ef10_mac_filter_default_rxq_clear(
- __in efx_nic_t *enp);
-
-#if EFSYS_OPT_LOOPBACK
-
-extern __checkReturn efx_rc_t
-ef10_mac_loopback_set(
- __in efx_nic_t *enp,
- __in efx_link_mode_t link_mode,
- __in efx_loopback_type_t loopback_type);
-
-#endif /* EFSYS_OPT_LOOPBACK */
-
-#if EFSYS_OPT_MAC_STATS
-
-extern __checkReturn efx_rc_t
-ef10_mac_stats_update(
- __in efx_nic_t *enp,
- __in efsys_mem_t *esmp,
- __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
- __inout_opt uint32_t *generationp);
-
-#endif /* EFSYS_OPT_MAC_STATS */
-
-
-/* MCDI */
-
-#if EFSYS_OPT_MCDI
-
-extern __checkReturn efx_rc_t
-ef10_mcdi_init(
- __in efx_nic_t *enp,
- __in const efx_mcdi_transport_t *mtp);
-
-extern void
-ef10_mcdi_fini(
- __in efx_nic_t *enp);
-
-extern void
-ef10_mcdi_send_request(
- __in efx_nic_t *enp,
- __in void *hdrp,
- __in size_t hdr_len,
- __in void *sdup,
- __in size_t sdu_len);
-
-extern __checkReturn boolean_t
-ef10_mcdi_poll_response(
- __in efx_nic_t *enp);
-
-extern void
-ef10_mcdi_read_response(
- __in efx_nic_t *enp,
- __out_bcount(length) void *bufferp,
- __in size_t offset,
- __in size_t length);
-
-extern efx_rc_t
-ef10_mcdi_poll_reboot(
- __in efx_nic_t *enp);
-
-extern __checkReturn efx_rc_t
-ef10_mcdi_feature_supported(
- __in efx_nic_t *enp,
- __in efx_mcdi_feature_id_t id,
- __out boolean_t *supportedp);
-
-#endif /* EFSYS_OPT_MCDI */
-
-/* NVRAM */
-
-#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
-
-extern __checkReturn efx_rc_t
-ef10_nvram_buf_read_tlv(
- __in efx_nic_t *enp,
- __in_bcount(max_seg_size) caddr_t seg_data,
- __in size_t max_seg_size,
- __in uint32_t tag,
- __deref_out_bcount_opt(*sizep) caddr_t *datap,
- __out size_t *sizep);
-
-extern __checkReturn efx_rc_t
-ef10_nvram_buf_write_tlv(
- __inout_bcount(partn_size) caddr_t partn_data,
- __in size_t partn_size,
- __in uint32_t tag,
- __in_bcount(tag_size) caddr_t tag_data,
- __in size_t tag_size,
- __out size_t *total_lengthp);
-
-extern __checkReturn efx_rc_t
-ef10_nvram_partn_read_tlv(
- __in efx_nic_t *enp,
- __in uint32_t partn,
- __in uint32_t tag,
- __deref_out_bcount_opt(*sizep) caddr_t *datap,
- __out size_t *sizep);
-
-extern __checkReturn efx_rc_t
-ef10_nvram_partn_write_tlv(
- __in efx_nic_t *enp,
- __in uint32_t partn,
- __in uint32_t tag,
- __in_bcount(size) caddr_t data,
- __in size_t size);
-
-extern __checkReturn efx_rc_t
-ef10_nvram_partn_write_segment_tlv(
- __in efx_nic_t *enp,
- __in uint32_t partn,
- __in uint32_t tag,
- __in_bcount(size) caddr_t data,
- __in size_t size,
- __in boolean_t all_segments);
-
-extern __checkReturn efx_rc_t
-ef10_nvram_partn_lock(
- __in efx_nic_t *enp,
- __in uint32_t partn);
-
-extern void
-ef10_nvram_partn_unlock(
- __in efx_nic_t *enp,
- __in uint32_t partn);
-
-#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
-
-#if EFSYS_OPT_NVRAM
-
-#if EFSYS_OPT_DIAG
-
-extern __checkReturn efx_rc_t
-ef10_nvram_test(
- __in efx_nic_t *enp);
-
-#endif /* EFSYS_OPT_DIAG */
-
-extern __checkReturn efx_rc_t
-ef10_nvram_type_to_partn(
- __in efx_nic_t *enp,
- __in efx_nvram_type_t type,
- __out uint32_t *partnp);
-
-extern __checkReturn efx_rc_t
-ef10_nvram_partn_size(
- __in efx_nic_t *enp,
- __in uint32_t partn,
- __out size_t *sizep);
-
-extern __checkReturn efx_rc_t
-ef10_nvram_partn_rw_start(
- __in efx_nic_t *enp,
- __in uint32_t partn,
- __out size_t *chunk_sizep);
-
-extern __checkReturn efx_rc_t
-ef10_nvram_partn_read_mode(
- __in efx_nic_t *enp,
- __in uint32_t partn,
- __in unsigned int offset,
- __out_bcount(size) caddr_t data,
- __in size_t size,
- __in uint32_t mode);
-
-extern __checkReturn efx_rc_t
-ef10_nvram_partn_read(
- __in efx_nic_t *enp,
- __in uint32_t partn,
- __in unsigned int offset,
- __out_bcount(size) caddr_t data,
- __in size_t size);
-
-extern __checkReturn efx_rc_t
-ef10_nvram_partn_erase(
- __in efx_nic_t *enp,
- __in uint32_t partn,
- __in unsigned int offset,
- __in size_t size);
-
-extern __checkReturn efx_rc_t
-ef10_nvram_partn_write(
- __in efx_nic_t *enp,
- __in uint32_t partn,
- __in unsigned int offset,
- __out_bcount(size) caddr_t data,
- __in size_t size);
-
-extern void
-ef10_nvram_partn_rw_finish(
- __in efx_nic_t *enp,
- __in uint32_t partn);
-
-extern __checkReturn efx_rc_t
-ef10_nvram_partn_get_version(
- __in efx_nic_t *enp,
- __in uint32_t partn,
- __out uint32_t *subtypep,
- __out_ecount(4) uint16_t version[4]);
-
-extern __checkReturn efx_rc_t
-ef10_nvram_partn_set_version(
- __in efx_nic_t *enp,
- __in uint32_t partn,
- __in_ecount(4) uint16_t version[4]);
-
-#endif /* EFSYS_OPT_NVRAM */
-
/* PHY */
-typedef struct ef10_link_state_s {
- uint32_t els_adv_cap_mask;
- uint32_t els_lp_cap_mask;
- unsigned int els_fcntl;
- efx_link_mode_t els_link_mode;
-#if EFSYS_OPT_LOOPBACK
- efx_loopback_type_t els_loopback;
-#endif
- boolean_t els_mac_up;
-} ef10_link_state_t;
-
-extern void
-ef10_phy_link_ev(
- __in efx_nic_t *enp,
- __in efx_qword_t *eqp,
- __out efx_link_mode_t *link_modep);
-
-extern __checkReturn efx_rc_t
-ef10_phy_get_link(
- __in efx_nic_t *enp,
- __out ef10_link_state_t *elsp);
-
-extern __checkReturn efx_rc_t
-ef10_phy_power(
- __in efx_nic_t *enp,
- __in boolean_t on);
-
-extern __checkReturn efx_rc_t
-ef10_phy_reconfigure(
- __in efx_nic_t *enp);
-
-extern __checkReturn efx_rc_t
-ef10_phy_verify(
- __in efx_nic_t *enp);
-
-extern __checkReturn efx_rc_t
-ef10_phy_oui_get(
- __in efx_nic_t *enp,
- __out uint32_t *ouip);
-
-#if EFSYS_OPT_PHY_STATS
-
-extern __checkReturn efx_rc_t
-ef10_phy_stats_update(
- __in efx_nic_t *enp,
- __in efsys_mem_t *esmp,
- __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
-
-#endif /* EFSYS_OPT_PHY_STATS */
-
-#if EFSYS_OPT_PHY_PROPS
-
-#if EFSYS_OPT_NAMES
-
-extern const char *
-ef10_phy_prop_name(
- __in efx_nic_t *enp,
- __in unsigned int id);
-
-#endif /* EFSYS_OPT_NAMES */
-
-extern __checkReturn efx_rc_t
-ef10_phy_prop_get(
- __in efx_nic_t *enp,
- __in unsigned int id,
- __in uint32_t flags,
- __out uint32_t *valp);
-
-extern __checkReturn efx_rc_t
-ef10_phy_prop_set(
- __in efx_nic_t *enp,
- __in unsigned int id,
- __in uint32_t val);
-
-#endif /* EFSYS_OPT_PHY_PROPS */
-
#if EFSYS_OPT_BIST
extern __checkReturn efx_rc_t
@@ -569,481 +102,6 @@ hunt_bist_stop(
#endif /* EFSYS_OPT_BIST */
-/* SRAM */
-
-#if EFSYS_OPT_DIAG
-
-extern __checkReturn efx_rc_t
-ef10_sram_test(
- __in efx_nic_t *enp,
- __in efx_sram_pattern_fn_t func);
-
-#endif /* EFSYS_OPT_DIAG */
-
-
-/* TX */
-
-extern __checkReturn efx_rc_t
-ef10_tx_init(
- __in efx_nic_t *enp);
-
-extern void
-ef10_tx_fini(
- __in efx_nic_t *enp);
-
-extern __checkReturn efx_rc_t
-ef10_tx_qcreate(
- __in efx_nic_t *enp,
- __in unsigned int index,
- __in unsigned int label,
- __in efsys_mem_t *esmp,
- __in size_t n,
- __in uint32_t id,
- __in uint16_t flags,
- __in efx_evq_t *eep,
- __in efx_txq_t *etp,
- __out unsigned int *addedp);
-
-extern void
-ef10_tx_qdestroy(
- __in efx_txq_t *etp);
-
-extern __checkReturn efx_rc_t
-ef10_tx_qpost(
- __in efx_txq_t *etp,
- __in_ecount(n) efx_buffer_t *eb,
- __in unsigned int n,
- __in unsigned int completed,
- __inout unsigned int *addedp);
-
-extern void
-ef10_tx_qpush(
- __in efx_txq_t *etp,
- __in unsigned int added,
- __in unsigned int pushed);
-
-extern __checkReturn efx_rc_t
-ef10_tx_qpace(
- __in efx_txq_t *etp,
- __in unsigned int ns);
-
-extern __checkReturn efx_rc_t
-ef10_tx_qflush(
- __in efx_txq_t *etp);
-
-extern void
-ef10_tx_qenable(
- __in efx_txq_t *etp);
-
-extern __checkReturn efx_rc_t
-ef10_tx_qpio_enable(
- __in efx_txq_t *etp);
-
-extern void
-ef10_tx_qpio_disable(
- __in efx_txq_t *etp);
-
-extern __checkReturn efx_rc_t
-ef10_tx_qpio_write(
- __in efx_txq_t *etp,
- __in_ecount(buf_length) uint8_t *buffer,
- __in size_t buf_length,
- __in size_t pio_buf_offset);
-
-extern __checkReturn efx_rc_t
-ef10_tx_qpio_post(
- __in efx_txq_t *etp,
- __in size_t pkt_length,
- __in unsigned int completed,
- __inout unsigned int *addedp);
-
-extern __checkReturn efx_rc_t
-ef10_tx_qdesc_post(
- __in efx_txq_t *etp,
- __in_ecount(n) efx_desc_t *ed,
- __in unsigned int n,
- __in unsigned int completed,
- __inout unsigned int *addedp);
-
-extern void
-ef10_tx_qdesc_dma_create(
- __in efx_txq_t *etp,
- __in efsys_dma_addr_t addr,
- __in size_t size,
- __in boolean_t eop,
- __out efx_desc_t *edp);
-
-extern void
-hunt_tx_qdesc_tso_create(
- __in efx_txq_t *etp,
- __in uint16_t ipv4_id,
- __in uint32_t tcp_seq,
- __in uint8_t tcp_flags,
- __out efx_desc_t *edp);
-
-extern void
-ef10_tx_qdesc_tso2_create(
- __in efx_txq_t *etp,
- __in uint16_t ipv4_id,
- __in uint32_t tcp_seq,
- __in uint16_t tcp_mss,
- __out_ecount(count) efx_desc_t *edp,
- __in int count);
-
-extern void
-ef10_tx_qdesc_vlantci_create(
- __in efx_txq_t *etp,
- __in uint16_t vlan_tci,
- __out efx_desc_t *edp);
-
-
-#if EFSYS_OPT_QSTATS
-
-extern void
-ef10_tx_qstats_update(
- __in efx_txq_t *etp,
- __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
-
-#endif /* EFSYS_OPT_QSTATS */
-
-/* PIO */
-
-/* Missing register definitions */
-#ifndef ER_DZ_TX_PIOBUF_OFST
-#define ER_DZ_TX_PIOBUF_OFST 0x00001000
-#endif
-#ifndef ER_DZ_TX_PIOBUF_STEP
-#define ER_DZ_TX_PIOBUF_STEP 8192
-#endif
-#ifndef ER_DZ_TX_PIOBUF_ROWS
-#define ER_DZ_TX_PIOBUF_ROWS 2048
-#endif
-
-#ifndef ER_DZ_TX_PIOBUF_SIZE
-#define ER_DZ_TX_PIOBUF_SIZE 2048
-#endif
-
-#define HUNT_PIOBUF_NBUFS (16)
-#define HUNT_PIOBUF_SIZE (ER_DZ_TX_PIOBUF_SIZE)
-
-#define HUNT_MIN_PIO_ALLOC_SIZE (HUNT_PIOBUF_SIZE / 32)
-
-#define EF10_LEGACY_PF_PRIVILEGE_MASK \
- (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
- MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
- MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
- MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
- MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
- MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
- MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
- MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
- MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
- MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
- MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
-
-#define EF10_LEGACY_VF_PRIVILEGE_MASK 0
-
-typedef uint32_t efx_piobuf_handle_t;
-
-#define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t) -1)
-
-extern __checkReturn efx_rc_t
-ef10_nic_pio_alloc(
- __inout efx_nic_t *enp,
- __out uint32_t *bufnump,
- __out efx_piobuf_handle_t *handlep,
- __out uint32_t *blknump,
- __out uint32_t *offsetp,
- __out size_t *sizep);
-
-extern __checkReturn efx_rc_t
-ef10_nic_pio_free(
- __inout efx_nic_t *enp,
- __in uint32_t bufnum,
- __in uint32_t blknum);
-
-extern __checkReturn efx_rc_t
-ef10_nic_pio_link(
- __inout efx_nic_t *enp,
- __in uint32_t vi_index,
- __in efx_piobuf_handle_t handle);
-
-extern __checkReturn efx_rc_t
-ef10_nic_pio_unlink(
- __inout efx_nic_t *enp,
- __in uint32_t vi_index);
-
-
-/* VPD */
-
-#if EFSYS_OPT_VPD
-
-extern __checkReturn efx_rc_t
-ef10_vpd_init(
- __in efx_nic_t *enp);
-
-extern __checkReturn efx_rc_t
-ef10_vpd_size(
- __in efx_nic_t *enp,
- __out size_t *sizep);
-
-extern __checkReturn efx_rc_t
-ef10_vpd_read(
- __in efx_nic_t *enp,
- __out_bcount(size) caddr_t data,
- __in size_t size);
-
-extern __checkReturn efx_rc_t
-ef10_vpd_verify(
- __in efx_nic_t *enp,
- __in_bcount(size) caddr_t data,
- __in size_t size);
-
-extern __checkReturn efx_rc_t
-ef10_vpd_reinit(
- __in efx_nic_t *enp,
- __in_bcount(size) caddr_t data,
- __in size_t size);
-
-extern __checkReturn efx_rc_t
-ef10_vpd_get(
- __in efx_nic_t *enp,
- __in_bcount(size) caddr_t data,
- __in size_t size,
- __inout efx_vpd_value_t *evvp);
-
-extern __checkReturn efx_rc_t
-ef10_vpd_set(
- __in efx_nic_t *enp,
- __in_bcount(size) caddr_t data,
- __in size_t size,
- __in efx_vpd_value_t *evvp);
-
-extern __checkReturn efx_rc_t
-ef10_vpd_next(
- __in efx_nic_t *enp,
- __in_bcount(size) caddr_t data,
- __in size_t size,
- __out efx_vpd_value_t *evvp,
- __inout unsigned int *contp);
-
-extern __checkReturn efx_rc_t
-ef10_vpd_write(
- __in efx_nic_t *enp,
- __in_bcount(size) caddr_t data,
- __in size_t size);
-
-extern void
-ef10_vpd_fini(
- __in efx_nic_t *enp);
-
-#endif /* EFSYS_OPT_VPD */
-
-
-/* RX */
-
-extern __checkReturn efx_rc_t
-ef10_rx_init(
- __in efx_nic_t *enp);
-
-#if EFSYS_OPT_RX_SCATTER
-extern __checkReturn efx_rc_t
-ef10_rx_scatter_enable(
- __in efx_nic_t *enp,
- __in unsigned int buf_size);
-#endif /* EFSYS_OPT_RX_SCATTER */
-
-
-#if EFSYS_OPT_RX_SCALE
-
-extern __checkReturn efx_rc_t
-ef10_rx_scale_mode_set(
- __in efx_nic_t *enp,
- __in efx_rx_hash_alg_t alg,
- __in efx_rx_hash_type_t type,
- __in boolean_t insert);
-
-extern __checkReturn efx_rc_t
-ef10_rx_scale_key_set(
- __in efx_nic_t *enp,
- __in_ecount(n) uint8_t *key,
- __in size_t n);
-
-extern __checkReturn efx_rc_t
-ef10_rx_scale_tbl_set(
- __in efx_nic_t *enp,
- __in_ecount(n) unsigned int *table,
- __in size_t n);
-
-extern __checkReturn uint32_t
-ef10_rx_prefix_hash(
- __in efx_nic_t *enp,
- __in efx_rx_hash_alg_t func,
- __in uint8_t *buffer);
-
-#endif /* EFSYS_OPT_RX_SCALE */
-
-extern __checkReturn efx_rc_t
-ef10_rx_prefix_pktlen(
- __in efx_nic_t *enp,
- __in uint8_t *buffer,
- __out uint16_t *lengthp);
-
-extern void
-ef10_rx_qpost(
- __in efx_rxq_t *erp,
- __in_ecount(n) efsys_dma_addr_t *addrp,
- __in size_t size,
- __in unsigned int n,
- __in unsigned int completed,
- __in unsigned int added);
-
-extern void
-ef10_rx_qpush(
- __in efx_rxq_t *erp,
- __in unsigned int added,
- __inout unsigned int *pushedp);
-
-extern __checkReturn efx_rc_t
-ef10_rx_qflush(
- __in efx_rxq_t *erp);
-
-extern void
-ef10_rx_qenable(
- __in efx_rxq_t *erp);
-
-extern __checkReturn efx_rc_t
-ef10_rx_qcreate(
- __in efx_nic_t *enp,
- __in unsigned int index,
- __in unsigned int label,
- __in efx_rxq_type_t type,
- __in efsys_mem_t *esmp,
- __in size_t n,
- __in uint32_t id,
- __in efx_evq_t *eep,
- __in efx_rxq_t *erp);
-
-extern void
-ef10_rx_qdestroy(
- __in efx_rxq_t *erp);
-
-extern void
-ef10_rx_fini(
- __in efx_nic_t *enp);
-
-#if EFSYS_OPT_FILTER
-
-typedef struct ef10_filter_handle_s {
- uint32_t efh_lo;
- uint32_t efh_hi;
-} ef10_filter_handle_t;
-
-typedef struct ef10_filter_entry_s {
- uintptr_t efe_spec; /* pointer to filter spec plus busy bit */
- ef10_filter_handle_t efe_handle;
-} ef10_filter_entry_t;
-
-/*
- * BUSY flag indicates that an update is in progress.
- * AUTO_OLD flag is used to mark and sweep MAC packet filters.
- */
-#define EFX_EF10_FILTER_FLAG_BUSY 1U
-#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2U
-#define EFX_EF10_FILTER_FLAGS 3U
-
-/*
- * Size of the hash table used by the driver. Doesn't need to be the
- * same size as the hardware's table.
- */
-#define EFX_EF10_FILTER_TBL_ROWS 8192
-
-/* Allow for the broadcast address to be added to the multicast list */
-#define EFX_EF10_FILTER_MULTICAST_FILTERS_MAX (EFX_MAC_MULTICAST_LIST_MAX + 1)
-
-typedef struct ef10_filter_table_s {
- ef10_filter_entry_t eft_entry[EFX_EF10_FILTER_TBL_ROWS];
- efx_rxq_t * eft_default_rxq;
- boolean_t eft_using_rss;
- uint32_t eft_unicst_filter_index;
- boolean_t eft_unicst_filter_set;
- uint32_t eft_mulcst_filter_indexes[
- EFX_EF10_FILTER_MULTICAST_FILTERS_MAX];
- uint32_t eft_mulcst_filter_count;
-} ef10_filter_table_t;
-
- __checkReturn efx_rc_t
-ef10_filter_init(
- __in efx_nic_t *enp);
-
- void
-ef10_filter_fini(
- __in efx_nic_t *enp);
-
- __checkReturn efx_rc_t
-ef10_filter_restore(
- __in efx_nic_t *enp);
-
- __checkReturn efx_rc_t
-ef10_filter_add(
- __in efx_nic_t *enp,
- __inout efx_filter_spec_t *spec,
- __in boolean_t may_replace);
-
- __checkReturn efx_rc_t
-ef10_filter_delete(
- __in efx_nic_t *enp,
- __inout efx_filter_spec_t *spec);
-
-extern __checkReturn efx_rc_t
-ef10_filter_supported_filters(
- __in efx_nic_t *enp,
- __out uint32_t *list,
- __out size_t *length);
-
-extern __checkReturn efx_rc_t
-ef10_filter_reconfigure(
- __in efx_nic_t *enp,
- __in_ecount(6) uint8_t const *mac_addr,
- __in boolean_t all_unicst,
- __in boolean_t mulcst,
- __in boolean_t all_mulcst,
- __in boolean_t brdcst,
- __in_ecount(6*count) uint8_t const *addrs,
- __in int count);
-
-extern void
-ef10_filter_get_default_rxq(
- __in efx_nic_t *enp,
- __out efx_rxq_t **erpp,
- __out boolean_t *using_rss);
-
-extern void
-ef10_filter_default_rxq_set(
- __in efx_nic_t *enp,
- __in efx_rxq_t *erp,
- __in boolean_t using_rss);
-
-extern void
-ef10_filter_default_rxq_clear(
- __in efx_nic_t *enp);
-
-
-#endif /* EFSYS_OPT_FILTER */
-
-extern __checkReturn efx_rc_t
-efx_mcdi_get_function_info(
- __in efx_nic_t *enp,
- __out uint32_t *pfp,
- __out_opt uint32_t *vfp);
-
-extern __checkReturn efx_rc_t
-efx_mcdi_privilege_mask(
- __in efx_nic_t *enp,
- __in uint32_t pf,
- __in uint32_t vf,
- __out uint32_t *maskp);
-
#ifdef __cplusplus
}
#endif
diff --git a/sys/dev/sfxge/common/hunt_nic.c b/sys/dev/sfxge/common/hunt_nic.c
index 7f0c068..486e424 100644
--- a/sys/dev/sfxge/common/hunt_nic.c
+++ b/sys/dev/sfxge/common/hunt_nic.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -41,925 +41,53 @@ __FBSDID("$FreeBSD$");
#include "ef10_tlv_layout.h"
- __checkReturn efx_rc_t
-efx_mcdi_get_port_assignment(
- __in efx_nic_t *enp,
- __out uint32_t *portp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
- MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-efx_mcdi_get_port_modes(
+static __checkReturn efx_rc_t
+hunt_nic_get_required_pcie_bandwidth(
__in efx_nic_t *enp,
- __out uint32_t *modesp)
+ __out uint32_t *bandwidth_mbpsp)
{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
- MC_CMD_GET_PORT_MODES_OUT_LEN)];
+ uint32_t port_modes;
+ uint32_t max_port_mode;
+ uint32_t bandwidth;
efx_rc_t rc;
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_PORT_MODES;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
/*
- * Require only Modes and DefaultMode fields.
- * (CurrentMode field was added for Medford)
+ * On Huntington, the firmware may not give us the current port mode, so
+ * we need to go by the set of available port modes and assume the most
+ * capable mode is in use.
*/
- if (req.emr_out_length_used <
- MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-
-static __checkReturn efx_rc_t
-efx_mcdi_vadaptor_alloc(
- __in efx_nic_t *enp,
- __in uint32_t port_id)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
- MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
- efx_rc_t rc;
-
- EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
-
- MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
- MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
- VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
- enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-static __checkReturn efx_rc_t
-efx_mcdi_vadaptor_free(
- __in efx_nic_t *enp,
- __in uint32_t port_id)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
- MC_CMD_VADAPTOR_FREE_OUT_LEN)];
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_VADAPTOR_FREE;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
-
- MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-efx_mcdi_get_mac_address_pf(
- __in efx_nic_t *enp,
- __out_ecount_opt(6) uint8_t mac_addrp[6])
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
- MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
- rc = ENOENT;
- goto fail3;
- }
-
- if (mac_addrp != NULL) {
- uint8_t *addrp;
-
- addrp = MCDI_OUT2(req, uint8_t,
- GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
-
- EFX_MAC_ADDR_COPY(mac_addrp, addrp);
- }
-
- return (0);
-
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-efx_mcdi_get_mac_address_vf(
- __in efx_nic_t *enp,
- __out_ecount_opt(6) uint8_t mac_addrp[6])
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
- MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
-
- MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
- EVB_PORT_ID_ASSIGNED);
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used <
- MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- if (MCDI_OUT_DWORD(req,
- VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
- rc = ENOENT;
- goto fail3;
- }
-
- if (mac_addrp != NULL) {
- uint8_t *addrp;
-
- addrp = MCDI_OUT2(req, uint8_t,
- VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
-
- EFX_MAC_ADDR_COPY(mac_addrp, addrp);
- }
-
- return (0);
-
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-efx_mcdi_get_clock(
- __in efx_nic_t *enp,
- __out uint32_t *sys_freqp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
- MC_CMD_GET_CLOCK_OUT_LEN)];
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_CLOCK;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
- if (*sys_freqp == 0) {
- rc = EINVAL;
- goto fail3;
- }
-
- return (0);
-
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-efx_mcdi_get_vector_cfg(
- __in efx_nic_t *enp,
- __out_opt uint32_t *vec_basep,
- __out_opt uint32_t *pf_nvecp,
- __out_opt uint32_t *vf_nvecp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
- MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- if (vec_basep != NULL)
- *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
- if (pf_nvecp != NULL)
- *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
- if (vf_nvecp != NULL)
- *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-static __checkReturn efx_rc_t
-efx_mcdi_get_capabilities(
- __in efx_nic_t *enp,
- __out uint32_t *flagsp,
- __out uint32_t *flags2p)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
- MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)];
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_CAPABILITIES;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1);
-
- if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
- *flags2p = 0;
- else
- *flags2p = MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2);
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-
-static __checkReturn efx_rc_t
-efx_mcdi_alloc_vis(
- __in efx_nic_t *enp,
- __in uint32_t min_vi_count,
- __in uint32_t max_vi_count,
- __out uint32_t *vi_basep,
- __out uint32_t *vi_countp,
- __out uint32_t *vi_shiftp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
- MC_CMD_ALLOC_VIS_OUT_LEN)];
- efx_rc_t rc;
-
- if (vi_countp == NULL) {
- rc = EINVAL;
- goto fail1;
- }
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_ALLOC_VIS;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_ALLOC_VIS_OUT_LEN;
-
- MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
- MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail2;
- }
-
- if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail3;
- }
-
- *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
- *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
-
- /* Report VI_SHIFT if available (always zero for Huntington) */
- if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
- *vi_shiftp = 0;
- else
- *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
-
- return (0);
-
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-
-static __checkReturn efx_rc_t
-efx_mcdi_free_vis(
- __in efx_nic_t *enp)
-{
- efx_mcdi_req_t req;
- efx_rc_t rc;
-
- EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
- EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
-
- req.emr_cmd = MC_CMD_FREE_VIS;
- req.emr_in_buf = NULL;
- req.emr_in_length = 0;
- req.emr_out_buf = NULL;
- req.emr_out_length = 0;
-
- efx_mcdi_execute_quiet(enp, &req);
-
- /* Ignore ELREADY (no allocated VIs, so nothing to free) */
- if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-
-static __checkReturn efx_rc_t
-efx_mcdi_alloc_piobuf(
- __in efx_nic_t *enp,
- __out efx_piobuf_handle_t *handlep)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
- MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
- efx_rc_t rc;
-
- if (handlep == NULL) {
- rc = EINVAL;
- goto fail1;
- }
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
-
- efx_mcdi_execute_quiet(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail2;
- }
-
- if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail3;
- }
-
- *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
-
- return (0);
-
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-static __checkReturn efx_rc_t
-efx_mcdi_free_piobuf(
- __in efx_nic_t *enp,
- __in efx_piobuf_handle_t handle)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
- MC_CMD_FREE_PIOBUF_OUT_LEN)];
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_FREE_PIOBUF;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
-
- MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
-
- efx_mcdi_execute_quiet(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-static __checkReturn efx_rc_t
-efx_mcdi_link_piobuf(
- __in efx_nic_t *enp,
- __in uint32_t vi_index,
- __in efx_piobuf_handle_t handle)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
- MC_CMD_LINK_PIOBUF_OUT_LEN)];
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_LINK_PIOBUF;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
-
- MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
- MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-static __checkReturn efx_rc_t
-efx_mcdi_unlink_piobuf(
- __in efx_nic_t *enp,
- __in uint32_t vi_index)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
- MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
-
- MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, NULL)) != 0) {
+ /* No port mode info available */
+ bandwidth = 0;
+ goto out;
}
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-static void
-ef10_nic_alloc_piobufs(
- __in efx_nic_t *enp,
- __in uint32_t max_piobuf_count)
-{
- efx_piobuf_handle_t *handlep;
- unsigned int i;
- efx_rc_t rc;
-
- EFSYS_ASSERT3U(max_piobuf_count, <=,
- EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
-
- enp->en_arch.ef10.ena_piobuf_count = 0;
-
- for (i = 0; i < max_piobuf_count; i++) {
- handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
-
- if ((rc = efx_mcdi_alloc_piobuf(enp, handlep)) != 0)
+ if (port_modes & (1 << TLV_PORT_MODE_40G_40G)) {
+ /*
+ * This needs the full PCIe bandwidth (and could use
+ * more) - roughly 64 Gbit/s for 8 lanes of Gen3.
+ */
+ if ((rc = efx_nic_calculate_pcie_link_bandwidth(8,
+ EFX_PCIE_LINK_SPEED_GEN3, &bandwidth)) != 0)
goto fail1;
-
- enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
- enp->en_arch.ef10.ena_piobuf_count++;
- }
-
- return;
-
-fail1:
- for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
- handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
-
- efx_mcdi_free_piobuf(enp, *handlep);
- *handlep = EFX_PIOBUF_HANDLE_INVALID;
- }
- enp->en_arch.ef10.ena_piobuf_count = 0;
-}
-
-
-static void
-ef10_nic_free_piobufs(
- __in efx_nic_t *enp)
-{
- efx_piobuf_handle_t *handlep;
- unsigned int i;
-
- for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
- handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
-
- efx_mcdi_free_piobuf(enp, *handlep);
- *handlep = EFX_PIOBUF_HANDLE_INVALID;
- }
- enp->en_arch.ef10.ena_piobuf_count = 0;
-}
-
-/* Sub-allocate a block from a piobuf */
- __checkReturn efx_rc_t
-ef10_nic_pio_alloc(
- __inout efx_nic_t *enp,
- __out uint32_t *bufnump,
- __out efx_piobuf_handle_t *handlep,
- __out uint32_t *blknump,
- __out uint32_t *offsetp,
- __out size_t *sizep)
-{
- efx_nic_cfg_t *encp = &enp->en_nic_cfg;
- efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
- uint32_t blk_per_buf;
- uint32_t buf, blk;
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
- EFSYS_ASSERT(bufnump);
- EFSYS_ASSERT(handlep);
- EFSYS_ASSERT(blknump);
- EFSYS_ASSERT(offsetp);
- EFSYS_ASSERT(sizep);
-
- if ((edcp->edc_pio_alloc_size == 0) ||
- (enp->en_arch.ef10.ena_piobuf_count == 0)) {
- rc = ENOMEM;
- goto fail1;
- }
- blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
-
- for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
- uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
-
- if (~(*map) == 0)
- continue;
-
- EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
- for (blk = 0; blk < blk_per_buf; blk++) {
- if ((*map & (1u << blk)) == 0) {
- *map |= (1u << blk);
- goto done;
- }
+ } else {
+ if (port_modes & (1 << TLV_PORT_MODE_40G)) {
+ max_port_mode = TLV_PORT_MODE_40G;
+ } else if (port_modes & (1 << TLV_PORT_MODE_10G_10G_10G_10G)) {
+ max_port_mode = TLV_PORT_MODE_10G_10G_10G_10G;
+ } else {
+ /* Assume two 10G ports */
+ max_port_mode = TLV_PORT_MODE_10G_10G;
}
- }
- rc = ENOMEM;
- goto fail2;
-
-done:
- *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
- *bufnump = buf;
- *blknump = blk;
- *sizep = edcp->edc_pio_alloc_size;
- *offsetp = blk * (*sizep);
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-/* Free a piobuf sub-allocated block */
- __checkReturn efx_rc_t
-ef10_nic_pio_free(
- __inout efx_nic_t *enp,
- __in uint32_t bufnum,
- __in uint32_t blknum)
-{
- uint32_t *map;
- efx_rc_t rc;
-
- if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
- (blknum >= (8 * sizeof (*map)))) {
- rc = EINVAL;
- goto fail1;
- }
-
- map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
- if ((*map & (1u << blknum)) == 0) {
- rc = ENOENT;
- goto fail2;
- }
- *map &= ~(1u << blknum);
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_nic_pio_link(
- __inout efx_nic_t *enp,
- __in uint32_t vi_index,
- __in efx_piobuf_handle_t handle)
-{
- return (efx_mcdi_link_piobuf(enp, vi_index, handle));
-}
-
- __checkReturn efx_rc_t
-ef10_nic_pio_unlink(
- __inout efx_nic_t *enp,
- __in uint32_t vi_index)
-{
- return (efx_mcdi_unlink_piobuf(enp, vi_index));
-}
- __checkReturn efx_rc_t
-ef10_get_datapath_caps(
- __in efx_nic_t *enp)
-{
- efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- uint32_t flags;
- uint32_t flags2;
- efx_rc_t rc;
-
- if ((rc = efx_mcdi_get_capabilities(enp, &flags, &flags2)) != 0)
- goto fail1;
-
-#define CAP_FLAG(flags1, field) \
- ((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
-
-#define CAP_FLAG2(flags2, field) \
- ((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
-
- /*
- * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
- * We only support the 14 byte prefix here.
- */
- if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) {
- rc = ENOTSUP;
- goto fail2;
+ if ((rc = ef10_nic_get_port_mode_bandwidth(max_port_mode,
+ &bandwidth)) != 0)
+ goto fail2;
}
- encp->enc_rx_prefix_size = 14;
-
- /* Check if the firmware supports TSO */
- encp->enc_fw_assisted_tso_enabled =
- CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE;
-
- /* Check if the firmware supports FATSOv2 */
- encp->enc_fw_assisted_tso_v2_enabled =
- CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE;
-
- /* Check if the firmware has vadapter/vport/vswitch support */
- encp->enc_datapath_cap_evb =
- CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE;
-
- /* Check if the firmware supports VLAN insertion */
- encp->enc_hw_tx_insert_vlan_enabled =
- CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE;
-
- /* Check if the firmware supports RX event batching */
- encp->enc_rx_batching_enabled =
- CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE;
-
- if (encp->enc_rx_batching_enabled)
- encp->enc_rx_batch_max = 16;
-
- /* Check if the firmware supports disabling scatter on RXQs */
- encp->enc_rx_disable_scatter_supported =
- CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE;
-
- /* Check if the firmware supports set mac with running filters */
- encp->enc_allow_set_mac_with_installed_filters =
- CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ?
- B_TRUE : B_FALSE;
-
- /*
- * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
- * specifying which parameters to configure.
- */
- encp->enc_enhanced_set_mac_supported =
- CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE;
-#undef CAP_FLAG
-#undef CAP_FLAG2
+out:
+ *bandwidth_mbpsp = bandwidth;
return (0);
@@ -971,150 +99,6 @@ fail1:
return (rc);
}
-
- __checkReturn efx_rc_t
-ef10_get_privilege_mask(
- __in efx_nic_t *enp,
- __out uint32_t *maskp)
-{
- efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- uint32_t mask;
- efx_rc_t rc;
-
- if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
- &mask)) != 0) {
- if (rc != ENOTSUP)
- goto fail1;
-
- /* Fallback for old firmware without privilege mask support */
- if (EFX_PCI_FUNCTION_IS_PF(encp)) {
- /* Assume PF has admin privilege */
- mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
- } else {
- /* VF is always unprivileged by default */
- mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
- }
- }
-
- *maskp = mask;
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-
-/*
- * The external port mapping is a one-based numbering of the external
- * connectors on the board. It does not distinguish off-board separated
- * outputs such as multi-headed cables.
- * The number of ports that map to each external port connector
- * on the board is determined by the chip family and the port modes to
- * which the NIC can be configured. The mapping table lists modes with
- * port numbering requirements in increasing order.
- */
-static struct {
- efx_family_t family;
- uint32_t modes_mask;
- uint32_t stride;
-} __ef10_external_port_mappings[] = {
- /* Supported modes requiring 1 output per port */
- {
- EFX_FAMILY_HUNTINGTON,
- (1 << TLV_PORT_MODE_10G) |
- (1 << TLV_PORT_MODE_10G_10G) |
- (1 << TLV_PORT_MODE_10G_10G_10G_10G),
- 1
- },
- {
- EFX_FAMILY_MEDFORD,
- (1 << TLV_PORT_MODE_10G) |
- (1 << TLV_PORT_MODE_10G_10G) |
- (1 << TLV_PORT_MODE_10G_10G_10G_10G),
- 1
- },
- /* Supported modes requiring 2 outputs per port */
- {
- EFX_FAMILY_HUNTINGTON,
- (1 << TLV_PORT_MODE_40G) |
- (1 << TLV_PORT_MODE_40G_40G) |
- (1 << TLV_PORT_MODE_40G_10G_10G) |
- (1 << TLV_PORT_MODE_10G_10G_40G),
- 2
- },
- {
- EFX_FAMILY_MEDFORD,
- (1 << TLV_PORT_MODE_40G) |
- (1 << TLV_PORT_MODE_40G_40G) |
- (1 << TLV_PORT_MODE_40G_10G_10G) |
- (1 << TLV_PORT_MODE_10G_10G_40G),
- 2
- },
- /* Supported modes requiring 4 outputs per port */
- {
- EFX_FAMILY_MEDFORD,
- (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) |
- (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2),
- 4
- },
-};
-
- __checkReturn efx_rc_t
-ef10_external_port_mapping(
- __in efx_nic_t *enp,
- __in uint32_t port,
- __out uint8_t *external_portp)
-{
- efx_rc_t rc;
- int i;
- uint32_t port_modes;
- uint32_t matches;
- uint32_t stride = 1; /* default 1-1 mapping */
-
- if ((rc = efx_mcdi_get_port_modes(enp, &port_modes)) != 0) {
- /* No port mode information available - use default mapping */
- goto out;
- }
-
- /*
- * Infer the internal port -> external port mapping from
- * the possible port modes for this NIC.
- */
- for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
- if (__ef10_external_port_mappings[i].family !=
- enp->en_family)
- continue;
- matches = (__ef10_external_port_mappings[i].modes_mask &
- port_modes);
- if (matches != 0) {
- stride = __ef10_external_port_mappings[i].stride;
- port_modes &= ~matches;
- }
- }
-
- if (port_modes != 0) {
- /* Some advertised modes are not supported */
- rc = ENOTSUP;
- goto fail1;
- }
-
-out:
- /*
- * Scale as required by last matched mode and then convert to
- * one-based numbering
- */
- *external_portp = (uint8_t)(port / stride) + 1;
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
__checkReturn efx_rc_t
hunt_board_cfg(
__in efx_nic_t *enp)
@@ -1130,8 +114,9 @@ hunt_board_cfg(
uint32_t vf;
uint32_t mask;
uint32_t flags;
- uint32_t sysclk;
+ uint32_t sysclk, dpcpu_clk;
uint32_t base, nvec;
+ uint32_t bandwidth;
efx_rc_t rc;
if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
@@ -1289,13 +274,13 @@ hunt_board_cfg(
goto fail10;
}
- /* Get sysclk frequency (in MHz). */
- if ((rc = efx_mcdi_get_clock(enp, &sysclk)) != 0)
+ /* Get clock frequencies (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
goto fail11;
/*
- * The timer quantum is 1536 sysclk cycles, documented for the
- * EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ * The Huntington timer quantum is 1536 sysclk cycles, documented for
+ * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
*/
encp->enc_evq_timer_quantum_ns = 1536000UL / sysclk; /* 1536 cycles */
if (encp->enc_bug35388_workaround) {
@@ -1361,8 +346,17 @@ hunt_board_cfg(
*/
encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
+ if ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0)
+ goto fail15;
+ encp->enc_required_pcie_bandwidth_mbps = bandwidth;
+
+ /* All Huntington devices have a PCIe Gen3, 8 lane connector */
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
+
return (0);
+fail15:
+ EFSYS_PROBE(fail15);
fail14:
EFSYS_PROBE(fail14);
fail13:
@@ -1396,502 +390,4 @@ fail1:
}
- __checkReturn efx_rc_t
-ef10_nic_probe(
- __in efx_nic_t *enp)
-{
- efx_nic_ops_t *enop = enp->en_enop;
- efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- /* Read and clear any assertion state */
- if ((rc = efx_mcdi_read_assertion(enp)) != 0)
- goto fail1;
-
- /* Exit the assertion handler */
- if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
- if (rc != EACCES)
- goto fail2;
-
- if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
- goto fail3;
-
- if ((rc = enop->eno_board_cfg(enp)) != 0)
- if (rc != EACCES)
- goto fail4;
-
- /*
- * Set default driver config limits (based on board config).
- *
- * FIXME: For now allocate a fixed number of VIs which is likely to be
- * sufficient and small enough to allow multiple functions on the same
- * port.
- */
- edcp->edc_min_vi_count = edcp->edc_max_vi_count =
- MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
-
- /* The client driver must configure and enable PIO buffer support */
- edcp->edc_max_piobuf_count = 0;
- edcp->edc_pio_alloc_size = 0;
-
-#if EFSYS_OPT_MAC_STATS
- /* Wipe the MAC statistics */
- if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
- goto fail5;
-#endif
-
-#if EFSYS_OPT_LOOPBACK
- if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
- goto fail6;
-#endif
-
-#if EFSYS_OPT_MON_STATS
- if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
- /* Unprivileged functions do not have access to sensors */
- if (rc != EACCES)
- goto fail7;
- }
-#endif
-
- encp->enc_features = enp->en_features;
-
- return (0);
-
-#if EFSYS_OPT_MON_STATS
-fail7:
- EFSYS_PROBE(fail7);
-#endif
-#if EFSYS_OPT_LOOPBACK
-fail6:
- EFSYS_PROBE(fail6);
-#endif
-#if EFSYS_OPT_MAC_STATS
-fail5:
- EFSYS_PROBE(fail5);
-#endif
-fail4:
- EFSYS_PROBE(fail4);
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_nic_set_drv_limits(
- __inout efx_nic_t *enp,
- __in efx_drv_limits_t *edlp)
-{
- efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
- uint32_t min_evq_count, max_evq_count;
- uint32_t min_rxq_count, max_rxq_count;
- uint32_t min_txq_count, max_txq_count;
- efx_rc_t rc;
-
- if (edlp == NULL) {
- rc = EINVAL;
- goto fail1;
- }
-
- /* Get minimum required and maximum usable VI limits */
- min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
- min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
- min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
-
- edcp->edc_min_vi_count =
- MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
-
- max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
- max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
- max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
-
- edcp->edc_max_vi_count =
- MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
-
- /*
- * Check limits for sub-allocated piobuf blocks.
- * PIO is optional, so don't fail if the limits are incorrect.
- */
- if ((encp->enc_piobuf_size == 0) ||
- (encp->enc_piobuf_limit == 0) ||
- (edlp->edl_min_pio_alloc_size == 0) ||
- (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
- /* Disable PIO */
- edcp->edc_max_piobuf_count = 0;
- edcp->edc_pio_alloc_size = 0;
- } else {
- uint32_t blk_size, blk_count, blks_per_piobuf;
-
- blk_size =
- MAX(edlp->edl_min_pio_alloc_size,
- encp->enc_piobuf_min_alloc_size);
-
- blks_per_piobuf = encp->enc_piobuf_size / blk_size;
- EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
-
- blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
-
- /* A zero max pio alloc count means unlimited */
- if ((edlp->edl_max_pio_alloc_count > 0) &&
- (edlp->edl_max_pio_alloc_count < blk_count)) {
- blk_count = edlp->edl_max_pio_alloc_count;
- }
-
- edcp->edc_pio_alloc_size = blk_size;
- edcp->edc_max_piobuf_count =
- (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-
- __checkReturn efx_rc_t
-ef10_nic_reset(
- __in efx_nic_t *enp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
- MC_CMD_ENTITY_RESET_OUT_LEN)];
- efx_rc_t rc;
-
- /* ef10_nic_reset() is called to recover from BADASSERT failures. */
- if ((rc = efx_mcdi_read_assertion(enp)) != 0)
- goto fail1;
- if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
- goto fail2;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_ENTITY_RESET;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
-
- MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
- ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail3;
- }
-
- /* Clear RX/TX DMA queue errors */
- enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
-
- return (0);
-
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_nic_init(
- __in efx_nic_t *enp)
-{
- efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
- uint32_t min_vi_count, max_vi_count;
- uint32_t vi_count, vi_base, vi_shift;
- uint32_t i;
- uint32_t retry;
- uint32_t delay_us;
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- /* Enable reporting of some events (e.g. link change) */
- if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
- goto fail1;
-
- /* Allocate (optional) on-chip PIO buffers */
- ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
-
- /*
- * For best performance, PIO writes should use a write-combined
- * (WC) memory mapping. Using a separate WC mapping for the PIO
- * aperture of each VI would be a burden to drivers (and not
- * possible if the host page size is >4Kbyte).
- *
- * To avoid this we use a single uncached (UC) mapping for VI
- * register access, and a single WC mapping for extra VIs used
- * for PIO writes.
- *
- * Each piobuf must be linked to a VI in the WC mapping, and to
- * each VI that is using a sub-allocated block from the piobuf.
- */
- min_vi_count = edcp->edc_min_vi_count;
- max_vi_count =
- edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
-
- /* Ensure that the previously attached driver's VIs are freed */
- if ((rc = efx_mcdi_free_vis(enp)) != 0)
- goto fail2;
-
- /*
- * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
- * fails then retrying the request for fewer VI resources may succeed.
- */
- vi_count = 0;
- if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
- &vi_base, &vi_count, &vi_shift)) != 0)
- goto fail3;
-
- EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
-
- if (vi_count < min_vi_count) {
- rc = ENOMEM;
- goto fail4;
- }
-
- enp->en_arch.ef10.ena_vi_base = vi_base;
- enp->en_arch.ef10.ena_vi_count = vi_count;
- enp->en_arch.ef10.ena_vi_shift = vi_shift;
-
- if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
- /* Not enough extra VIs to map piobufs */
- ef10_nic_free_piobufs(enp);
- }
-
- enp->en_arch.ef10.ena_pio_write_vi_base =
- vi_count - enp->en_arch.ef10.ena_piobuf_count;
-
- /* Save UC memory mapping details */
- enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
- if (enp->en_arch.ef10.ena_piobuf_count > 0) {
- enp->en_arch.ef10.ena_uc_mem_map_size =
- (ER_DZ_TX_PIOBUF_STEP *
- enp->en_arch.ef10.ena_pio_write_vi_base);
- } else {
- enp->en_arch.ef10.ena_uc_mem_map_size =
- (ER_DZ_TX_PIOBUF_STEP *
- enp->en_arch.ef10.ena_vi_count);
- }
-
- /* Save WC memory mapping details */
- enp->en_arch.ef10.ena_wc_mem_map_offset =
- enp->en_arch.ef10.ena_uc_mem_map_offset +
- enp->en_arch.ef10.ena_uc_mem_map_size;
-
- enp->en_arch.ef10.ena_wc_mem_map_size =
- (ER_DZ_TX_PIOBUF_STEP *
- enp->en_arch.ef10.ena_piobuf_count);
-
- /* Link piobufs to extra VIs in WC mapping */
- if (enp->en_arch.ef10.ena_piobuf_count > 0) {
- for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
- rc = efx_mcdi_link_piobuf(enp,
- enp->en_arch.ef10.ena_pio_write_vi_base + i,
- enp->en_arch.ef10.ena_piobuf_handle[i]);
- if (rc != 0)
- break;
- }
- }
-
- /*
- * Allocate a vAdaptor attached to our upstream vPort/pPort.
- *
- * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
- * driver has yet to bring up the EVB port. See bug 56147. In this case,
- * retry the request several times after waiting a while. The wait time
- * between retries starts small (10ms) and exponentially increases.
- * Total wait time is a little over two seconds. Retry logic in the
- * client driver may mean this whole loop is repeated if it continues to
- * fail.
- */
- retry = 0;
- delay_us = 10000;
- while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
- if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
- (rc != ENOENT)) {
- /*
- * Do not retry alloc for PF, or for other errors on
- * a VF.
- */
- goto fail5;
- }
-
- /* VF startup before PF is ready. Retry allocation. */
- if (retry > 5) {
- /* Too many attempts */
- rc = EINVAL;
- goto fail6;
- }
- EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
- EFSYS_SLEEP(delay_us);
- retry++;
- if (delay_us < 500000)
- delay_us <<= 2;
- }
-
- enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
- enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
-
- return (0);
-
-fail6:
- EFSYS_PROBE(fail6);
-fail5:
- EFSYS_PROBE(fail5);
-fail4:
- EFSYS_PROBE(fail4);
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-
- ef10_nic_free_piobufs(enp);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_nic_get_vi_pool(
- __in efx_nic_t *enp,
- __out uint32_t *vi_countp)
-{
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- /*
- * Report VIs that the client driver can use.
- * Do not include VIs used for PIO buffer writes.
- */
- *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
-
- return (0);
-}
-
- __checkReturn efx_rc_t
-ef10_nic_get_bar_region(
- __in efx_nic_t *enp,
- __in efx_nic_region_t region,
- __out uint32_t *offsetp,
- __out size_t *sizep)
-{
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- /*
- * TODO: Specify host memory mapping alignment and granularity
- * in efx_drv_limits_t so that they can be taken into account
- * when allocating extra VIs for PIO writes.
- */
- switch (region) {
- case EFX_REGION_VI:
- /* UC mapped memory BAR region for VI registers */
- *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
- *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
- break;
-
- case EFX_REGION_PIO_WRITE_VI:
- /* WC mapped memory BAR region for piobuf writes */
- *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
- *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
- break;
-
- default:
- rc = EINVAL;
- goto fail1;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- void
-ef10_nic_fini(
- __in efx_nic_t *enp)
-{
- uint32_t i;
- efx_rc_t rc;
-
- (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
- enp->en_vport_id = 0;
-
- /* Unlink piobufs from extra VIs in WC mapping */
- if (enp->en_arch.ef10.ena_piobuf_count > 0) {
- for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
- rc = efx_mcdi_unlink_piobuf(enp,
- enp->en_arch.ef10.ena_pio_write_vi_base + i);
- if (rc != 0)
- break;
- }
- }
-
- ef10_nic_free_piobufs(enp);
-
- (void) efx_mcdi_free_vis(enp);
- enp->en_arch.ef10.ena_vi_count = 0;
-}
-
- void
-ef10_nic_unprobe(
- __in efx_nic_t *enp)
-{
-#if EFSYS_OPT_MON_STATS
- mcdi_mon_cfg_free(enp);
-#endif /* EFSYS_OPT_MON_STATS */
- (void) efx_mcdi_drv_attach(enp, B_FALSE);
-}
-
-#if EFSYS_OPT_DIAG
-
- __checkReturn efx_rc_t
-ef10_nic_register_test(
- __in efx_nic_t *enp)
-{
- efx_rc_t rc;
-
- /* FIXME */
- _NOTE(ARGUNUSED(enp))
- if (B_FALSE) {
- rc = ENOTSUP;
- goto fail1;
- }
- /* FIXME */
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-#endif /* EFSYS_OPT_DIAG */
-
-
-
#endif /* EFSYS_OPT_HUNTINGTON */
diff --git a/sys/dev/sfxge/common/hunt_phy.c b/sys/dev/sfxge/common/hunt_phy.c
index a6b7faa..63e6011 100644
--- a/sys/dev/sfxge/common/hunt_phy.c
+++ b/sys/dev/sfxge/common/hunt_phy.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,485 +36,6 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_HUNTINGTON
-static void
-mcdi_phy_decode_cap(
- __in uint32_t mcdi_cap,
- __out uint32_t *maskp)
-{
- uint32_t mask;
-
- mask = 0;
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
- mask |= (1 << EFX_PHY_CAP_10HDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
- mask |= (1 << EFX_PHY_CAP_10FDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
- mask |= (1 << EFX_PHY_CAP_100HDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
- mask |= (1 << EFX_PHY_CAP_100FDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
- mask |= (1 << EFX_PHY_CAP_1000HDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- mask |= (1 << EFX_PHY_CAP_1000FDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- mask |= (1 << EFX_PHY_CAP_10000FDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- mask |= (1 << EFX_PHY_CAP_40000FDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
- mask |= (1 << EFX_PHY_CAP_PAUSE);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
- mask |= (1 << EFX_PHY_CAP_ASYM);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- mask |= (1 << EFX_PHY_CAP_AN);
-
- *maskp = mask;
-}
-
-static void
-mcdi_phy_decode_link_mode(
- __in efx_nic_t *enp,
- __in uint32_t link_flags,
- __in unsigned int speed,
- __in unsigned int fcntl,
- __out efx_link_mode_t *link_modep,
- __out unsigned int *fcntlp)
-{
- boolean_t fd = !!(link_flags &
- (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
- boolean_t up = !!(link_flags &
- (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
-
- _NOTE(ARGUNUSED(enp))
-
- if (!up)
- *link_modep = EFX_LINK_DOWN;
- else if (speed == 40000 && fd)
- *link_modep = EFX_LINK_40000FDX;
- else if (speed == 10000 && fd)
- *link_modep = EFX_LINK_10000FDX;
- else if (speed == 1000)
- *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
- else if (speed == 100)
- *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
- else if (speed == 10)
- *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
- else
- *link_modep = EFX_LINK_UNKNOWN;
-
- if (fcntl == MC_CMD_FCNTL_OFF)
- *fcntlp = 0;
- else if (fcntl == MC_CMD_FCNTL_RESPOND)
- *fcntlp = EFX_FCNTL_RESPOND;
- else if (fcntl == MC_CMD_FCNTL_GENERATE)
- *fcntlp = EFX_FCNTL_GENERATE;
- else if (fcntl == MC_CMD_FCNTL_BIDIR)
- *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
- else {
- EFSYS_PROBE1(mc_pcol_error, int, fcntl);
- *fcntlp = 0;
- }
-}
-
-
- void
-ef10_phy_link_ev(
- __in efx_nic_t *enp,
- __in efx_qword_t *eqp,
- __out efx_link_mode_t *link_modep)
-{
- efx_port_t *epp = &(enp->en_port);
- unsigned int link_flags;
- unsigned int speed;
- unsigned int fcntl;
- efx_link_mode_t link_mode;
- uint32_t lp_cap_mask;
-
- /*
- * Convert the LINKCHANGE speed enumeration into mbit/s, in the
- * same way as GET_LINK encodes the speed
- */
- switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
- case MCDI_EVENT_LINKCHANGE_SPEED_100M:
- speed = 100;
- break;
- case MCDI_EVENT_LINKCHANGE_SPEED_1G:
- speed = 1000;
- break;
- case MCDI_EVENT_LINKCHANGE_SPEED_10G:
- speed = 10000;
- break;
- case MCDI_EVENT_LINKCHANGE_SPEED_40G:
- speed = 40000;
- break;
- default:
- speed = 0;
- break;
- }
-
- link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
- mcdi_phy_decode_link_mode(enp, link_flags, speed,
- MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
- &link_mode, &fcntl);
- mcdi_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
- &lp_cap_mask);
-
- /*
- * It's safe to update ep_lp_cap_mask without the driver's port lock
- * because presumably any concurrently running efx_port_poll() is
- * only going to arrive at the same value.
- *
- * ep_fcntl has two meanings. It's either the link common fcntl
- * (if the PHY supports AN), or it's the forced link state. If
- * the former, it's safe to update the value for the same reason as
- * for ep_lp_cap_mask. If the latter, then just ignore the value,
- * because we can race with efx_mac_fcntl_set().
- */
- epp->ep_lp_cap_mask = lp_cap_mask;
- epp->ep_fcntl = fcntl;
-
- *link_modep = link_mode;
-}
-
- __checkReturn efx_rc_t
-ef10_phy_power(
- __in efx_nic_t *enp,
- __in boolean_t power)
-{
- efx_rc_t rc;
-
- if (!power)
- return (0);
-
- /* Check if the PHY is a zombie */
- if ((rc = ef10_phy_verify(enp)) != 0)
- goto fail1;
-
- enp->en_reset_flags |= EFX_RESET_PHY;
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_phy_get_link(
- __in efx_nic_t *enp,
- __out ef10_link_state_t *elsp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
- MC_CMD_GET_LINK_OUT_LEN)];
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_LINK;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
- &elsp->els_adv_cap_mask);
- mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
- &elsp->els_lp_cap_mask);
-
- mcdi_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
- MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
- MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
- &elsp->els_link_mode, &elsp->els_fcntl);
-
-#if EFSYS_OPT_LOOPBACK
- /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
-
- elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
-#endif /* EFSYS_OPT_LOOPBACK */
-
- elsp->els_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_phy_reconfigure(
- __in efx_nic_t *enp)
-{
- efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- efx_port_t *epp = &(enp->en_port);
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN,
- MC_CMD_SET_LINK_OUT_LEN)];
- uint32_t cap_mask;
- unsigned int led_mode;
- unsigned int speed;
- efx_rc_t rc;
-
- if (~encp->enc_func_flags & EFX_NIC_FUNC_LINKCTRL)
- goto out;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_SET_LINK;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
-
- cap_mask = epp->ep_adv_cap_mask;
- MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
- PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
- PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
- PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
- PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
- PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
- PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
- PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
- PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
- PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
- PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
- /* Too many fields for for POPULATE macros, so insert this afterwards */
- MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
- PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1);
-
-#if EFSYS_OPT_LOOPBACK
- MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
- epp->ep_loopback_type);
- switch (epp->ep_loopback_link_mode) {
- case EFX_LINK_100FDX:
- speed = 100;
- break;
- case EFX_LINK_1000FDX:
- speed = 1000;
- break;
- case EFX_LINK_10000FDX:
- speed = 10000;
- break;
- case EFX_LINK_40000FDX:
- speed = 40000;
- break;
- default:
- speed = 0;
- }
-#else
- MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
- speed = 0;
-#endif /* EFSYS_OPT_LOOPBACK */
- MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
-
-#if EFSYS_OPT_PHY_FLAGS
- MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
-#else
- MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
-#endif /* EFSYS_OPT_PHY_FLAGS */
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- /* And set the blink mode */
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_SET_ID_LED;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
-
-#if EFSYS_OPT_PHY_LED_CONTROL
- switch (epp->ep_phy_led_mode) {
- case EFX_PHY_LED_DEFAULT:
- led_mode = MC_CMD_LED_DEFAULT;
- break;
- case EFX_PHY_LED_OFF:
- led_mode = MC_CMD_LED_OFF;
- break;
- case EFX_PHY_LED_ON:
- led_mode = MC_CMD_LED_ON;
- break;
- default:
- EFSYS_ASSERT(0);
- led_mode = MC_CMD_LED_DEFAULT;
- }
-
- MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
-#else
- MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
-#endif /* EFSYS_OPT_PHY_LED_CONTROL */
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail2;
- }
-out:
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_phy_verify(
- __in efx_nic_t *enp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
- MC_CMD_GET_PHY_STATE_OUT_LEN)];
- uint32_t state;
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_PHY_STATE;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
- if (state != MC_CMD_PHY_STATE_OK) {
- if (state != MC_CMD_PHY_STATE_ZOMBIE)
- EFSYS_PROBE1(mc_pcol_error, int, state);
- rc = ENOTACTIVE;
- goto fail3;
- }
-
- return (0);
-
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_phy_oui_get(
- __in efx_nic_t *enp,
- __out uint32_t *ouip)
-{
- _NOTE(ARGUNUSED(enp, ouip))
-
- return (ENOTSUP);
-}
-
-#if EFSYS_OPT_PHY_STATS
-
- __checkReturn efx_rc_t
-ef10_phy_stats_update(
- __in efx_nic_t *enp,
- __in efsys_mem_t *esmp,
- __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
-{
- /* TBD: no stats support in firmware yet */
- _NOTE(ARGUNUSED(enp, esmp))
- memset(stat, 0, EFX_PHY_NSTATS * sizeof (*stat));
-
- return (0);
-}
-
-#endif /* EFSYS_OPT_PHY_STATS */
-
-#if EFSYS_OPT_PHY_PROPS
-
-#if EFSYS_OPT_NAMES
-
- const char *
-ef10_phy_prop_name(
- __in efx_nic_t *enp,
- __in unsigned int id)
-{
- _NOTE(ARGUNUSED(enp, id))
-
- return (NULL);
-}
-
-#endif /* EFSYS_OPT_NAMES */
-
- __checkReturn efx_rc_t
-ef10_phy_prop_get(
- __in efx_nic_t *enp,
- __in unsigned int id,
- __in uint32_t flags,
- __out uint32_t *valp)
-{
- _NOTE(ARGUNUSED(enp, id, flags, valp))
-
- return (ENOTSUP);
-}
-
- __checkReturn efx_rc_t
-ef10_phy_prop_set(
- __in efx_nic_t *enp,
- __in unsigned int id,
- __in uint32_t val)
-{
- _NOTE(ARGUNUSED(enp, id, val))
-
- return (ENOTSUP);
-}
-
-#endif /* EFSYS_OPT_PHY_PROPS */
-
#if EFSYS_OPT_BIST
__checkReturn efx_rc_t
@@ -571,6 +92,8 @@ hunt_bist_poll(
uint32_t result;
efx_rc_t rc;
+ _NOTE(ARGUNUSED(type))
+
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_POLL_BIST;
req.emr_in_buf = payload;
diff --git a/sys/dev/sfxge/common/hunt_sram.c b/sys/dev/sfxge/common/hunt_sram.c
deleted file mode 100644
index 1e35991..0000000
--- a/sys/dev/sfxge/common/hunt_sram.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*-
- * Copyright (c) 2012-2015 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
- */
-
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include "efx.h"
-#include "efx_impl.h"
-
-#if EFSYS_OPT_HUNTINGTON
-
-
-#if EFSYS_OPT_DIAG
-
- __checkReturn efx_rc_t
-ef10_sram_test(
- __in efx_nic_t *enp,
- __in efx_sram_pattern_fn_t func)
-{
- efx_rc_t rc;
-
- /* FIXME */
- _NOTE(ARGUNUSED(enp))
- _NOTE(ARGUNUSED(func))
- if (B_FALSE) {
- rc = ENOTSUP;
- goto fail1;
- }
- /* FIXME */
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-#endif /* EFSYS_OPT_DIAG */
-
-#endif /* EFSYS_OPT_HUNTINGTON */
diff --git a/sys/dev/sfxge/common/mcdi_mon.c b/sys/dev/sfxge/common/mcdi_mon.c
index 4c7f961..a379515 100644
--- a/sys/dev/sfxge/common/mcdi_mon.c
+++ b/sys/dev/sfxge/common/mcdi_mon.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -155,6 +155,8 @@ static const struct mcdi_sensor_map_s {
STAT(Px, PHY0_VCC), /* 0x4c PHY0_VCC */
STAT(Px, PHY1_VCC), /* 0x4d PHY1_VCC */
STAT(Px, CONTROLLER_TDIODE_TEMP), /* 0x4e CONTROLLER_TDIODE_TEMP */
+ STAT(Px, BOARD_FRONT_TEMP), /* 0x4f BOARD_FRONT_TEMP */
+ STAT(Px, BOARD_BACK_TEMP), /* 0x50 BOARD_BACK_TEMP */
};
#define MCDI_STATIC_SENSOR_ASSERT(_field) \
@@ -365,7 +367,7 @@ efx_mcdi_sensor_info_npages(
goto fail1;
}
} while (MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK) &
- (1 << MC_CMD_SENSOR_PAGE0_NEXT));
+ (1U << MC_CMD_SENSOR_PAGE0_NEXT));
*npagesp = page;
diff --git a/sys/dev/sfxge/common/mcdi_mon.h b/sys/dev/sfxge/common/mcdi_mon.h
index 440a887..ce93160 100644
--- a/sys/dev/sfxge/common/mcdi_mon.h
+++ b/sys/dev/sfxge/common/mcdi_mon.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/medford_impl.h b/sys/dev/sfxge/common/medford_impl.h
index 59ea35f..3e71eb0 100644
--- a/sys/dev/sfxge/common/medford_impl.h
+++ b/sys/dev/sfxge/common/medford_impl.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2015 Solarflare Communications Inc.
+ * Copyright (c) 2015-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/medford_nic.c b/sys/dev/sfxge/common/medford_nic.c
index 7c176b5..0942bd7 100644
--- a/sys/dev/sfxge/common/medford_nic.c
+++ b/sys/dev/sfxge/common/medford_nic.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2015 Solarflare Communications Inc.
+ * Copyright (c) 2015-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,11 +33,9 @@ __FBSDID("$FreeBSD$");
#include "efx.h"
#include "efx_impl.h"
-#include "mcdi_mon.h"
-#if EFSYS_OPT_MEDFORD
-#include "ef10_tlv_layout.h"
+#if EFSYS_OPT_MEDFORD
static __checkReturn efx_rc_t
efx_mcdi_get_rxdp_config(
@@ -97,6 +95,38 @@ fail1:
return (rc);
}
+static __checkReturn efx_rc_t
+medford_nic_get_required_pcie_bandwidth(
+ __in efx_nic_t *enp,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t port_modes;
+ uint32_t current_mode;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
+ &current_mode)) != 0) {
+ /* No port mode info available. */
+ bandwidth = 0;
+ goto out;
+ }
+
+ if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode,
+ &bandwidth)) != 0)
+ goto fail1;
+
+out:
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
__checkReturn efx_rc_t
medford_board_cfg(
__in efx_nic_t *enp)
@@ -111,10 +141,10 @@ medford_board_cfg(
uint32_t pf;
uint32_t vf;
uint32_t mask;
- uint32_t flags;
- uint32_t sysclk;
+ uint32_t sysclk, dpcpu_clk;
uint32_t base, nvec;
uint32_t end_padding;
+ uint32_t bandwidth;
efx_rc_t rc;
/*
@@ -150,6 +180,12 @@ medford_board_cfg(
/* MAC address for this function */
if (EFX_PCI_FUNCTION_IS_PF(encp)) {
rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
+#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
+ /* Disable static config checking for Medford NICs, ONLY
+ * for manufacturing test and setup at the factory, to
+ * allow the static config to be installed.
+ */
+#else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
if ((rc == 0) && (mac_addr[0] & 0x02)) {
/*
* If the static config does not include a global MAC
@@ -159,6 +195,7 @@ medford_board_cfg(
*/
rc = EINVAL;
}
+#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
} else {
rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
}
@@ -201,15 +238,15 @@ medford_board_cfg(
/* Chained multicast is always enabled on Medford */
encp->enc_bug26807_workaround = B_TRUE;
- /* Get sysclk frequency (in MHz). */
- if ((rc = efx_mcdi_get_clock(enp, &sysclk)) != 0)
+ /* Get clock frequencies (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
goto fail8;
/*
- * The timer quantum is 1536 sysclk cycles, documented for the
- * EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ * The Medford timer quantum is 1536 dpcpu_clk cycles, documented for
+ * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
*/
- encp->enc_evq_timer_quantum_ns = 1536000UL / sysclk; /* 1536 cycles */
+ encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */
encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
@@ -278,8 +315,16 @@ medford_board_cfg(
*/
encp->enc_vpd_is_global = B_TRUE;
+ rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth);
+ if (rc != 0)
+ goto fail13;
+ encp->enc_required_pcie_bandwidth_mbps = bandwidth;
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
+
return (0);
+fail13:
+ EFSYS_PROBE(fail13);
fail12:
EFSYS_PROBE(fail12);
fail11:
diff --git a/sys/dev/sfxge/common/siena_flash.h b/sys/dev/sfxge/common/siena_flash.h
index 143a14e..4558f15 100644
--- a/sys/dev/sfxge/common/siena_flash.h
+++ b/sys/dev/sfxge/common/siena_flash.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/siena_impl.h b/sys/dev/sfxge/common/siena_impl.h
index 4c80cd6..7160f77 100644
--- a/sys/dev/sfxge/common/siena_impl.h
+++ b/sys/dev/sfxge/common/siena_impl.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,31 +42,12 @@
extern "C" {
#endif
-#if EFSYS_OPT_PHY_PROPS
-
-/* START MKCONFIG GENERATED SienaPhyHeaderPropsBlock a8db1f8eb5106efd */
-typedef enum siena_phy_prop_e {
- SIENA_PHY_NPROPS
-} siena_phy_prop_t;
-
-/* END MKCONFIG GENERATED SienaPhyHeaderPropsBlock */
-
-#endif /* EFSYS_OPT_PHY_PROPS */
-
#define SIENA_NVRAM_CHUNK 0x80
extern __checkReturn efx_rc_t
siena_nic_probe(
__in efx_nic_t *enp);
-#if EFSYS_OPT_PCIE_TUNE
-
-extern __checkReturn efx_rc_t
-siena_nic_pcie_extended_sync(
- __in efx_nic_t *enp);
-
-#endif
-
extern __checkReturn efx_rc_t
siena_nic_reset(
__in efx_nic_t *enp);
@@ -368,32 +349,6 @@ siena_phy_stats_update(
#endif /* EFSYS_OPT_PHY_STATS */
-#if EFSYS_OPT_PHY_PROPS
-
-#if EFSYS_OPT_NAMES
-
-extern const char *
-siena_phy_prop_name(
- __in efx_nic_t *enp,
- __in unsigned int id);
-
-#endif /* EFSYS_OPT_NAMES */
-
-extern __checkReturn efx_rc_t
-siena_phy_prop_get(
- __in efx_nic_t *enp,
- __in unsigned int id,
- __in uint32_t flags,
- __out uint32_t *valp);
-
-extern __checkReturn efx_rc_t
-siena_phy_prop_set(
- __in efx_nic_t *enp,
- __in unsigned int id,
- __in uint32_t val);
-
-#endif /* EFSYS_OPT_PHY_PROPS */
-
#if EFSYS_OPT_BIST
extern __checkReturn efx_rc_t
@@ -433,6 +388,11 @@ extern __checkReturn efx_rc_t
siena_mac_reconfigure(
__in efx_nic_t *enp);
+extern __checkReturn efx_rc_t
+siena_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
#if EFSYS_OPT_LOOPBACK
extern __checkReturn efx_rc_t
diff --git a/sys/dev/sfxge/common/siena_mac.c b/sys/dev/sfxge/common/siena_mac.c
index 12ecffd..2042f06 100644
--- a/sys/dev/sfxge/common/siena_mac.c
+++ b/sys/dev/sfxge/common/siena_mac.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -158,8 +158,17 @@ siena_mac_reconfigure(
* so we always add bit 0xff to the mask (bit 0x7f in the
* second octword).
*/
- if (epp->ep_brdcst)
+ if (epp->ep_brdcst) {
+ /*
+ * NOTE: due to constant folding, some of this evaluates
+ * to null expressions, giving E_EXPR_NULL_EFFECT during
+ * lint on Illumos. No good way to fix this without
+ * explicit coding the individual word/bit setting.
+ * So just suppress lint for this one line.
+ */
+ /* LINTED */
EFX_SET_OWORD_BIT(multicast_hash[1], 0x7f);
+ }
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_MCAST_HASH;
@@ -197,7 +206,7 @@ siena_mac_loopback_set(
__in efx_loopback_type_t loopback_type)
{
efx_port_t *epp = &(enp->en_port);
- efx_phy_ops_t *epop = epp->ep_epop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
efx_loopback_type_t old_loopback_type;
efx_link_mode_t old_loopback_link_mode;
efx_rc_t rc;
@@ -432,4 +441,12 @@ siena_mac_stats_update(
#endif /* EFSYS_OPT_MAC_STATS */
+ __checkReturn efx_rc_t
+siena_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ return (ENOTSUP);
+}
+
#endif /* EFSYS_OPT_SIENA */
diff --git a/sys/dev/sfxge/common/siena_mcdi.c b/sys/dev/sfxge/common/siena_mcdi.c
index 4fb2b6c..a9bc1ca 100644
--- a/sys/dev/sfxge/common/siena_mcdi.c
+++ b/sys/dev/sfxge/common/siena_mcdi.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -174,6 +174,8 @@ siena_mcdi_init(
unsigned int portnum;
efx_rc_t rc;
+ _NOTE(ARGUNUSED(mtp))
+
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
/* Determine the port number to use for MCDI */
@@ -212,6 +214,7 @@ fail1:
siena_mcdi_fini(
__in efx_nic_t *enp)
{
+ _NOTE(ARGUNUSED(enp))
}
__checkReturn efx_rc_t
@@ -234,7 +237,6 @@ siena_mcdi_feature_supported(
default:
rc = ENOTSUP;
goto fail1;
- break;
}
return (0);
diff --git a/sys/dev/sfxge/common/siena_nic.c b/sys/dev/sfxge/common/siena_nic.c
index 59e1283..b809a4d 100644
--- a/sys/dev/sfxge/common/siena_nic.c
+++ b/sys/dev/sfxge/common/siena_nic.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -78,28 +78,6 @@ fail1:
return (rc);
}
-#if EFSYS_OPT_PCIE_TUNE
-
- __checkReturn efx_rc_t
-siena_nic_pcie_extended_sync(
- __in efx_nic_t *enp)
-{
- efx_rc_t rc;
-
- if ((rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG17230,
- B_TRUE, NULL) != 0))
- goto fail1;
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-#endif /* EFSYS_OPT_PCIE_TUNE */
-
static __checkReturn efx_rc_t
siena_board_cfg(
__in efx_nic_t *enp)
@@ -172,6 +150,10 @@ siena_board_cfg(
encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
+ /* Siena supports two 10G ports, and 8 lanes of PCIe Gen2 */
+ encp->enc_required_pcie_bandwidth_mbps = 2 * 10000;
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN2;
+
return (0);
fail2:
diff --git a/sys/dev/sfxge/common/siena_nvram.c b/sys/dev/sfxge/common/siena_nvram.c
index 9708e0c..de6fd01 100644
--- a/sys/dev/sfxge/common/siena_nvram.c
+++ b/sys/dev/sfxge/common/siena_nvram.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/siena_phy.c b/sys/dev/sfxge/common/siena_phy.c
index 920314a..b3f1a35 100644
--- a/sys/dev/sfxge/common/siena_phy.c
+++ b/sys/dev/sfxge/common/siena_phy.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -592,47 +592,6 @@ fail1:
#endif /* EFSYS_OPT_PHY_STATS */
-#if EFSYS_OPT_PHY_PROPS
-
-#if EFSYS_OPT_NAMES
-
-extern const char *
-siena_phy_prop_name(
- __in efx_nic_t *enp,
- __in unsigned int id)
-{
- _NOTE(ARGUNUSED(enp, id))
-
- return (NULL);
-}
-
-#endif /* EFSYS_OPT_NAMES */
-
-extern __checkReturn efx_rc_t
-siena_phy_prop_get(
- __in efx_nic_t *enp,
- __in unsigned int id,
- __in uint32_t flags,
- __out uint32_t *valp)
-{
- _NOTE(ARGUNUSED(enp, id, flags, valp))
-
- return (ENOTSUP);
-}
-
-extern __checkReturn efx_rc_t
-siena_phy_prop_set(
- __in efx_nic_t *enp,
- __in unsigned int id,
- __in uint32_t val)
-{
- _NOTE(ARGUNUSED(enp, id, val))
-
- return (ENOTSUP);
-}
-
-#endif /* EFSYS_OPT_PHY_PROPS */
-
#if EFSYS_OPT_BIST
__checkReturn efx_rc_t
diff --git a/sys/dev/sfxge/common/siena_sram.c b/sys/dev/sfxge/common/siena_sram.c
index 762de42..6f4dbf4 100644
--- a/sys/dev/sfxge/common/siena_sram.c
+++ b/sys/dev/sfxge/common/siena_sram.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/common/siena_vpd.c b/sys/dev/sfxge/common/siena_vpd.c
index 7a7ce67..6fb4ca9 100644
--- a/sys/dev/sfxge/common/siena_vpd.c
+++ b/sys/dev/sfxge/common/siena_vpd.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -436,8 +436,12 @@ siena_vpd_get(
/* And then from the provided data buffer */
if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag,
- evvp->evv_keyword, &offset, &length)) != 0)
+ evvp->evv_keyword, &offset, &length)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
+
goto fail2;
+ }
evvp->evv_length = length;
memcpy(evvp->evv_value, data + offset, length);
diff --git a/sys/dev/sfxge/sfxge.c b/sys/dev/sfxge/sfxge.c
index b112a1e..6275d1b 100644
--- a/sys/dev/sfxge/sfxge.c
+++ b/sys/dev/sfxge/sfxge.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2010-2015 Solarflare Communications Inc.
+ * Copyright (c) 2010-2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
@@ -1174,6 +1174,11 @@ sfxge_probe(device_t dev)
return (0);
}
+ if (family == EFX_FAMILY_MEDFORD) {
+ device_set_desc(dev, "Solarflare SFC9200 family");
+ return (0);
+ }
+
DBGPRINT(dev, "impossible controller family %d", family);
return (ENXIO);
}
diff --git a/sys/dev/sfxge/sfxge.h b/sys/dev/sfxge/sfxge.h
index 9cf8f07..e7a0899 100644
--- a/sys/dev/sfxge/sfxge.h
+++ b/sys/dev/sfxge/sfxge.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2010-2015 Solarflare Communications Inc.
+ * Copyright (c) 2010-2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
@@ -112,6 +112,43 @@
#define SFXGE_ETHERTYPE_LOOPBACK 0x9000 /* Xerox loopback */
+
+#define SFXGE_MAGIC_RESERVED 0x8000
+
+#define SFXGE_MAGIC_DMAQ_LABEL_WIDTH 6
+#define SFXGE_MAGIC_DMAQ_LABEL_MASK \
+ ((1 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH) - 1)
+
+enum sfxge_sw_ev {
+ SFXGE_SW_EV_RX_QFLUSH_DONE = 1,
+ SFXGE_SW_EV_RX_QFLUSH_FAILED,
+ SFXGE_SW_EV_RX_QREFILL,
+ SFXGE_SW_EV_TX_QFLUSH_DONE,
+};
+
+#define SFXGE_SW_EV_MAGIC(_sw_ev) \
+ (SFXGE_MAGIC_RESERVED | ((_sw_ev) << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
+
+static inline uint16_t
+sfxge_sw_ev_mk_magic(enum sfxge_sw_ev sw_ev, unsigned int label)
+{
+ KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
+ ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
+ return SFXGE_SW_EV_MAGIC(sw_ev) | label;
+}
+
+static inline uint16_t
+sfxge_sw_ev_rxq_magic(enum sfxge_sw_ev sw_ev, struct sfxge_rxq *rxq)
+{
+ return sfxge_sw_ev_mk_magic(sw_ev, 0);
+}
+
+static inline uint16_t
+sfxge_sw_ev_txq_magic(enum sfxge_sw_ev sw_ev, struct sfxge_txq *txq)
+{
+ return sfxge_sw_ev_mk_magic(sw_ev, txq->type);
+}
+
enum sfxge_evq_state {
SFXGE_EVQ_UNINITIALIZED = 0,
SFXGE_EVQ_INITIALIZED,
diff --git a/sys/dev/sfxge/sfxge_dma.c b/sys/dev/sfxge/sfxge_dma.c
index 4f9f749..882fb82 100644
--- a/sys/dev/sfxge/sfxge_dma.c
+++ b/sys/dev/sfxge/sfxge_dma.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2010-2015 Solarflare Communications Inc.
+ * Copyright (c) 2010-2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
diff --git a/sys/dev/sfxge/sfxge_ev.c b/sys/dev/sfxge/sfxge_ev.c
index bc2dca7..c3cd324 100644
--- a/sys/dev/sfxge/sfxge_ev.c
+++ b/sys/dev/sfxge/sfxge_ev.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2010-2015 Solarflare Communications Inc.
+ * Copyright (c) 2010-2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
@@ -202,7 +202,6 @@ sfxge_ev_rxq_flush_done(void *arg, uint32_t rxq_index)
struct sfxge_softc *sc;
struct sfxge_rxq *rxq;
unsigned int index;
- unsigned int label;
uint16_t magic;
evq = (struct sfxge_evq *)arg;
@@ -221,11 +220,7 @@ sfxge_ev_rxq_flush_done(void *arg, uint32_t rxq_index)
}
evq = sc->evq[index];
-
- label = 0;
- KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
- ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != level"));
- magic = SFXGE_MAGIC_RX_QFLUSH_DONE | label;
+ magic = sfxge_sw_ev_rxq_magic(SFXGE_SW_EV_RX_QFLUSH_DONE, rxq);
KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
("evq not started"));
@@ -241,7 +236,6 @@ sfxge_ev_rxq_flush_failed(void *arg, uint32_t rxq_index)
struct sfxge_softc *sc;
struct sfxge_rxq *rxq;
unsigned int index;
- unsigned int label;
uint16_t magic;
evq = (struct sfxge_evq *)arg;
@@ -255,11 +249,7 @@ sfxge_ev_rxq_flush_failed(void *arg, uint32_t rxq_index)
/* Resend a software event on the correct queue */
index = rxq->index;
evq = sc->evq[index];
-
- label = 0;
- KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
- ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
- magic = SFXGE_MAGIC_RX_QFLUSH_FAILED | label;
+ magic = sfxge_sw_ev_rxq_magic(SFXGE_SW_EV_RX_QFLUSH_FAILED, rxq);
KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
("evq not started"));
@@ -326,7 +316,6 @@ sfxge_ev_txq_flush_done(void *arg, uint32_t txq_index)
struct sfxge_evq *evq;
struct sfxge_softc *sc;
struct sfxge_txq *txq;
- unsigned int label;
uint16_t magic;
evq = (struct sfxge_evq *)arg;
@@ -346,11 +335,7 @@ sfxge_ev_txq_flush_done(void *arg, uint32_t txq_index)
/* Resend a software event on the correct queue */
evq = sc->evq[txq->evq_index];
-
- label = txq->type;
- KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
- ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
- magic = SFXGE_MAGIC_TX_QFLUSH_DONE | label;
+ magic = sfxge_sw_ev_txq_magic(SFXGE_SW_EV_TX_QFLUSH_DONE, txq);
KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
("evq not started"));
@@ -375,19 +360,19 @@ sfxge_ev_software(void *arg, uint16_t magic)
magic &= ~SFXGE_MAGIC_DMAQ_LABEL_MASK;
switch (magic) {
- case SFXGE_MAGIC_RX_QFLUSH_DONE:
+ case SFXGE_SW_EV_MAGIC(SFXGE_SW_EV_RX_QFLUSH_DONE):
sfxge_rx_qflush_done(sfxge_get_rxq_by_label(evq, label));
break;
- case SFXGE_MAGIC_RX_QFLUSH_FAILED:
+ case SFXGE_SW_EV_MAGIC(SFXGE_SW_EV_RX_QFLUSH_FAILED):
sfxge_rx_qflush_failed(sfxge_get_rxq_by_label(evq, label));
break;
- case SFXGE_MAGIC_RX_QREFILL:
+ case SFXGE_SW_EV_MAGIC(SFXGE_SW_EV_RX_QREFILL):
sfxge_rx_qrefill(sfxge_get_rxq_by_label(evq, label));
break;
- case SFXGE_MAGIC_TX_QFLUSH_DONE: {
+ case SFXGE_SW_EV_MAGIC(SFXGE_SW_EV_TX_QFLUSH_DONE): {
struct sfxge_txq *txq = sfxge_get_txq_by_label(evq, label);
KASSERT(txq != NULL, ("txq == NULL"));
diff --git a/sys/dev/sfxge/sfxge_intr.c b/sys/dev/sfxge/sfxge_intr.c
index 20f4d53..4a6a273 100644
--- a/sys/dev/sfxge/sfxge_intr.c
+++ b/sys/dev/sfxge/sfxge_intr.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2010-2015 Solarflare Communications Inc.
+ * Copyright (c) 2010-2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
diff --git a/sys/dev/sfxge/sfxge_ioc.h b/sys/dev/sfxge/sfxge_ioc.h
index bf079c8..77d4fa5 100644
--- a/sys/dev/sfxge/sfxge_ioc.h
+++ b/sys/dev/sfxge/sfxge_ioc.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2014-2015 Solarflare Communications Inc.
+ * Copyright (c) 2014-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/sfxge/sfxge_mcdi.c b/sys/dev/sfxge/sfxge_mcdi.c
index 3a85c28..02b59ac 100644
--- a/sys/dev/sfxge/sfxge_mcdi.c
+++ b/sys/dev/sfxge/sfxge_mcdi.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2010-2015 Solarflare Communications Inc.
+ * Copyright (c) 2010-2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
diff --git a/sys/dev/sfxge/sfxge_nvram.c b/sys/dev/sfxge/sfxge_nvram.c
index c4fa224..17aeea9 100644
--- a/sys/dev/sfxge/sfxge_nvram.c
+++ b/sys/dev/sfxge/sfxge_nvram.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2010-2015 Solarflare Communications, Inc.
+ * Copyright (c) 2010-2016 Solarflare Communications, Inc.
* All rights reserved.
*
* This software was developed in part by OKTET Labs Ltd. under contract for
diff --git a/sys/dev/sfxge/sfxge_port.c b/sys/dev/sfxge/sfxge_port.c
index 277752a..709ed78 100644
--- a/sys/dev/sfxge/sfxge_port.c
+++ b/sys/dev/sfxge/sfxge_port.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2010-2015 Solarflare Communications Inc.
+ * Copyright (c) 2010-2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
diff --git a/sys/dev/sfxge/sfxge_rx.c b/sys/dev/sfxge/sfxge_rx.c
index 7b3228d..9fcccad 100644
--- a/sys/dev/sfxge/sfxge_rx.c
+++ b/sys/dev/sfxge/sfxge_rx.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2010-2015 Solarflare Communications Inc.
+ * Copyright (c) 2010-2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
@@ -179,8 +179,7 @@ sfxge_rx_post_refill(void *arg)
sc = rxq->sc;
index = rxq->index;
evq = sc->evq[index];
-
- magic = SFXGE_MAGIC_RX_QREFILL | index;
+ magic = sfxge_sw_ev_rxq_magic(SFXGE_SW_EV_RX_QREFILL, rxq);
/* This is guaranteed due to the start/stop order of rx and ev */
KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
@@ -843,7 +842,7 @@ sfxge_rx_qcomplete(struct sfxge_rxq *rxq, boolean_t eop)
if (rx_desc->flags & EFX_PKT_PREFIX_LEN) {
uint16_t tmp_size;
int rc;
- rc = efx_psuedo_hdr_pkt_length_get(sc->enp,
+ rc = efx_psuedo_hdr_pkt_length_get(sc->enp,
mtod(m, uint8_t *),
&tmp_size);
KASSERT(rc == 0, ("cannot get packet length: %d", rc));
@@ -1110,7 +1109,7 @@ sfxge_rx_start(struct sfxge_softc *sc)
EFSYS_ASSERT(ISP2(align));
sc->rx_buffer_size = P2ROUNDUP(sc->rx_buffer_size, align);
- /*
+ /*
* Standard mbuf zones only guarantee pointer-size alignment;
* we need extra space to align to the cache line
*/
diff --git a/sys/dev/sfxge/sfxge_rx.h b/sys/dev/sfxge/sfxge_rx.h
index e870ed5..2196c4e 100644
--- a/sys/dev/sfxge/sfxge_rx.h
+++ b/sys/dev/sfxge/sfxge_rx.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2010-2015 Solarflare Communications Inc.
+ * Copyright (c) 2010-2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
@@ -43,24 +43,6 @@
#define SFXGE_LRO 1
#endif
-#define SFXGE_MAGIC_RESERVED 0x8000
-
-#define SFXGE_MAGIC_DMAQ_LABEL_WIDTH 6
-#define SFXGE_MAGIC_DMAQ_LABEL_MASK \
- ((1 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH) - 1)
-
-#define SFXGE_MAGIC_RX_QFLUSH_DONE \
- (SFXGE_MAGIC_RESERVED | (1 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
-
-#define SFXGE_MAGIC_RX_QFLUSH_FAILED \
- (SFXGE_MAGIC_RESERVED | (2 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
-
-#define SFXGE_MAGIC_RX_QREFILL \
- (SFXGE_MAGIC_RESERVED | (3 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
-
-#define SFXGE_MAGIC_TX_QFLUSH_DONE \
- (SFXGE_MAGIC_RESERVED | (4 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
-
#define SFXGE_RX_SCALE_MAX EFX_MAXRSS
struct sfxge_rx_sw_desc {
diff --git a/sys/dev/sfxge/sfxge_tx.c b/sys/dev/sfxge/sfxge_tx.c
index 6ae5a07..caaa067 100644
--- a/sys/dev/sfxge/sfxge_tx.c
+++ b/sys/dev/sfxge/sfxge_tx.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2010-2015 Solarflare Communications Inc.
+ * Copyright (c) 2010-2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
diff --git a/sys/dev/sfxge/sfxge_tx.h b/sys/dev/sfxge/sfxge_tx.h
index ce5d3c4..c53279b 100644
--- a/sys/dev/sfxge/sfxge_tx.h
+++ b/sys/dev/sfxge/sfxge_tx.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2010-2015 Solarflare Communications Inc.
+ * Copyright (c) 2010-2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
diff --git a/sys/dev/sfxge/sfxge_version.h b/sys/dev/sfxge/sfxge_version.h
index dbc23ba..ab0ad54 100644
--- a/sys/dev/sfxge/sfxge_version.h
+++ b/sys/dev/sfxge/sfxge_version.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2015 Solarflare Communications Inc.
+ * Copyright (c) 2015-2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was developed in part by OKTET Labs under contract for
@@ -36,6 +36,6 @@
#ifndef _SFXGE_VERSION_H
#define _SFXGE_VERSION_H
-#define SFXGE_VERSION_STRING "v4.8.0.1019"
+#define SFXGE_VERSION_STRING "v4.8.0.1071"
#endif /* _SFXGE_DRIVER_VERSION_H */
diff --git a/sys/dev/usb/net/uhso.c b/sys/dev/usb/net/uhso.c
index 1795993..e614582 100644
--- a/sys/dev/usb/net/uhso.c
+++ b/sys/dev/usb/net/uhso.c
@@ -1223,6 +1223,7 @@ uhso_mux_write_callback(struct usb_xfer *xfer, usb_error_t error)
ht->ht_muxport);
/* FALLTHROUGH */
case USB_ST_SETUP:
+tr_setup:
pc = usbd_xfer_get_frame(xfer, 1);
if (ucom_get_data(&sc->sc_ucom[ht->ht_muxport], pc,
0, 32, &actlen)) {
@@ -1253,7 +1254,8 @@ uhso_mux_write_callback(struct usb_xfer *xfer, usb_error_t error)
UHSO_DPRINTF(0, "error: %s\n", usbd_errstr(error));
if (error == USB_ERR_CANCELLED)
break;
- break;
+ usbd_xfer_set_stall(xfer);
+ goto tr_setup;
}
}
diff --git a/sys/dev/usb/usb_dev.c b/sys/dev/usb/usb_dev.c
index 7231230..2973fa4 100644
--- a/sys/dev/usb/usb_dev.c
+++ b/sys/dev/usb/usb_dev.c
@@ -229,7 +229,7 @@ usb_ref_device(struct usb_cdev_privdata *cpd,
* We need to grab the enumeration SX-lock before
* grabbing the FIFO refs to avoid deadlock at detach!
*/
- crd->do_unlock = usbd_enum_lock(cpd->udev);
+ crd->do_unlock = usbd_enum_lock_sig(cpd->udev);
mtx_lock(&usb_ref_lock);
@@ -237,6 +237,12 @@ usb_ref_device(struct usb_cdev_privdata *cpd,
* Set "is_uref" after grabbing the default SX lock
*/
crd->is_uref = 1;
+
+ /* check for signal */
+ if (crd->do_unlock > 1) {
+ crd->do_unlock = 0;
+ goto error;
+ }
}
/* check if we are doing an open */
diff --git a/sys/dev/usb/usb_device.c b/sys/dev/usb/usb_device.c
index 3e29aa4..639d461 100644
--- a/sys/dev/usb/usb_device.c
+++ b/sys/dev/usb/usb_device.c
@@ -1777,7 +1777,9 @@ usb_alloc_device(device_t parent_dev, struct usb_bus *bus,
scratch_ptr = udev->scratch.data;
- if (udev->ddesc.iManufacturer ||
+ if (udev->flags.no_strings) {
+ err = USB_ERR_INVAL;
+ } else if (udev->ddesc.iManufacturer ||
udev->ddesc.iProduct ||
udev->ddesc.iSerialNumber) {
/* read out the language ID string */
@@ -2738,7 +2740,7 @@ usbd_device_attached(struct usb_device *udev)
/*
* The following function locks enumerating the given USB device. If
* the lock is already grabbed this function returns zero. Else a
- * non-zero value is returned.
+ * a value of one is returned.
*/
uint8_t
usbd_enum_lock(struct usb_device *udev)
@@ -2757,6 +2759,27 @@ usbd_enum_lock(struct usb_device *udev)
return (1);
}
+#if USB_HAVE_UGEN
+/*
+ * This function is the same like usbd_enum_lock() except a value of
+ * 255 is returned when a signal is pending:
+ */
+uint8_t
+usbd_enum_lock_sig(struct usb_device *udev)
+{
+ if (sx_xlocked(&udev->enum_sx))
+ return (0);
+ if (sx_xlock_sig(&udev->enum_sx))
+ return (255);
+ if (sx_xlock_sig(&udev->sr_sx)) {
+ sx_xunlock(&udev->enum_sx);
+ return (255);
+ }
+ mtx_lock(&Giant);
+ return (1);
+}
+#endif
+
/* The following function unlocks enumerating the given USB device. */
void
diff --git a/sys/dev/usb/usb_device.h b/sys/dev/usb/usb_device.h
index 5a94998..7a979bb 100644
--- a/sys/dev/usb/usb_device.h
+++ b/sys/dev/usb/usb_device.h
@@ -314,6 +314,9 @@ void usb_set_device_state(struct usb_device *, enum usb_dev_state);
enum usb_dev_state usb_get_device_state(struct usb_device *);
uint8_t usbd_enum_lock(struct usb_device *);
+#if USB_HAVE_UGEN
+uint8_t usbd_enum_lock_sig(struct usb_device *);
+#endif
void usbd_enum_unlock(struct usb_device *);
void usbd_sr_lock(struct usb_device *);
void usbd_sr_unlock(struct usb_device *);
diff --git a/sys/fs/devfs/devfs_devs.c b/sys/fs/devfs/devfs_devs.c
index 4723a63..288e7bc 100644
--- a/sys/fs/devfs/devfs_devs.c
+++ b/sys/fs/devfs/devfs_devs.c
@@ -127,16 +127,11 @@ devfs_alloc(int flags)
return (NULL);
cdp->cdp_dirents = &cdp->cdp_dirent0;
- cdp->cdp_dirent0 = NULL;
- cdp->cdp_maxdirent = 0;
- cdp->cdp_inode = 0;
cdev = &cdp->cdp_c;
-
LIST_INIT(&cdev->si_children);
vfs_timestamp(&ts);
cdev->si_atime = cdev->si_mtime = cdev->si_ctime = ts;
- cdev->si_cred = NULL;
return (cdev);
}
diff --git a/sys/fs/fuse/fuse_file.c b/sys/fs/fuse/fuse_file.c
index d9fb67b..376e5f5 100644
--- a/sys/fs/fuse/fuse_file.c
+++ b/sys/fs/fuse/fuse_file.c
@@ -141,7 +141,17 @@ fuse_filehandle_open(struct vnode *vp,
foo = fdi.answ;
fuse_filehandle_init(vp, fufh_type, fufhp, foo->fh);
- fuse_vnode_open(vp, foo->open_flags, td);
+
+ /*
+ * For WRONLY opens, force DIRECT_IO. This is necessary
+ * since writing a partial block through the buffer cache
+ * will result in a read of the block and that read won't
+ * be allowed by the WRONLY open.
+ */
+ if (fufh_type == FUFH_WRONLY)
+ fuse_vnode_open(vp, foo->open_flags | FOPEN_DIRECT_IO, td);
+ else
+ fuse_vnode_open(vp, foo->open_flags, td);
out:
fdisp_destroy(&fdi);
@@ -206,6 +216,28 @@ fuse_filehandle_valid(struct vnode *vp, fufh_type_t fufh_type)
return FUFH_IS_VALID(fufh);
}
+/*
+ * Check for a valid file handle, first the type requested, but if that
+ * isn't valid, try for FUFH_RDWR.
+ * Return the FUFH type that is valid or FUFH_INVALID if there are none.
+ * This is a variant of fuse_filehandle_vaild() analogous to
+ * fuse_filehandle_getrw().
+ */
+fufh_type_t
+fuse_filehandle_validrw(struct vnode *vp, fufh_type_t fufh_type)
+{
+ struct fuse_vnode_data *fvdat = VTOFUD(vp);
+ struct fuse_filehandle *fufh;
+
+ fufh = &fvdat->fufh[fufh_type];
+ if (FUFH_IS_VALID(fufh) != 0)
+ return (fufh_type);
+ fufh = &fvdat->fufh[FUFH_RDWR];
+ if (FUFH_IS_VALID(fufh) != 0)
+ return (FUFH_RDWR);
+ return (FUFH_INVALID);
+}
+
int
fuse_filehandle_get(struct vnode *vp, fufh_type_t fufh_type,
struct fuse_filehandle **fufhp)
diff --git a/sys/fs/fuse/fuse_file.h b/sys/fs/fuse/fuse_file.h
index 7d605ee..097cf18 100644
--- a/sys/fs/fuse/fuse_file.h
+++ b/sys/fs/fuse/fuse_file.h
@@ -137,6 +137,7 @@ fuse_filehandle_xlate_to_oflags(fufh_type_t type)
}
int fuse_filehandle_valid(struct vnode *vp, fufh_type_t fufh_type);
+fufh_type_t fuse_filehandle_validrw(struct vnode *vp, fufh_type_t fufh_type);
int fuse_filehandle_get(struct vnode *vp, fufh_type_t fufh_type,
struct fuse_filehandle **fufhp);
int fuse_filehandle_getrw(struct vnode *vp, fufh_type_t fufh_type,
diff --git a/sys/fs/fuse/fuse_node.c b/sys/fs/fuse/fuse_node.c
index a6119dd..3d6dd8f 100644
--- a/sys/fs/fuse/fuse_node.c
+++ b/sys/fs/fuse/fuse_node.c
@@ -289,7 +289,9 @@ fuse_vnode_open(struct vnode *vp, int32_t fuse_open_flags, struct thread *td)
* XXXIP: Handle fd based DIRECT_IO
*/
if (fuse_open_flags & FOPEN_DIRECT_IO) {
+ ASSERT_VOP_ELOCKED(vp, __func__);
VTOFUD(vp)->flag |= FN_DIRECTIO;
+ fuse_io_invalbuf(vp, td);
} else {
VTOFUD(vp)->flag &= ~FN_DIRECTIO;
}
diff --git a/sys/fs/fuse/fuse_vnops.c b/sys/fs/fuse/fuse_vnops.c
index f2d0188..1398ed1 100644
--- a/sys/fs/fuse/fuse_vnops.c
+++ b/sys/fs/fuse/fuse_vnops.c
@@ -335,8 +335,9 @@ fuse_vnop_create(struct vop_create_args *ap)
/* XXX: Will we ever want devices ? */
if ((vap->va_type != VREG)) {
- MPASS(vap->va_type != VFIFO);
- goto bringup;
+ printf("fuse_vnop_create: unsupported va_type %d\n",
+ vap->va_type);
+ return (EINVAL);
}
debug_printf("parent nid = %ju, mode = %x\n", (uintmax_t)parentnid,
mode);
@@ -364,7 +365,7 @@ fuse_vnop_create(struct vop_create_args *ap)
debug_printf("create: got err=%d from daemon\n", err);
goto out;
}
-bringup:
+
feo = fdip->answ;
if ((err = fuse_internal_checkentry(feo, VREG))) {
@@ -1125,6 +1126,7 @@ fuse_vnop_open(struct vop_open_args *ap)
struct fuse_vnode_data *fvdat;
int error, isdir = 0;
+ int32_t fuse_open_flags;
FS_DEBUG2G("inode=%ju mode=0x%x\n", (uintmax_t)VTOI(vp), mode);
@@ -1136,14 +1138,24 @@ fuse_vnop_open(struct vop_open_args *ap)
if (vnode_isdir(vp)) {
isdir = 1;
}
+ fuse_open_flags = 0;
if (isdir) {
fufh_type = FUFH_RDONLY;
} else {
fufh_type = fuse_filehandle_xlate_from_fflags(mode);
- }
-
- if (fuse_filehandle_valid(vp, fufh_type)) {
- fuse_vnode_open(vp, 0, td);
+ /*
+ * For WRONLY opens, force DIRECT_IO. This is necessary
+ * since writing a partial block through the buffer cache
+ * will result in a read of the block and that read won't
+ * be allowed by the WRONLY open.
+ */
+ if (fufh_type == FUFH_WRONLY ||
+ (fvdat->flag & FN_DIRECTIO) != 0)
+ fuse_open_flags = FOPEN_DIRECT_IO;
+ }
+
+ if (fuse_filehandle_validrw(vp, fufh_type) != FUFH_INVALID) {
+ fuse_vnode_open(vp, fuse_open_flags, td);
return 0;
}
error = fuse_filehandle_open(vp, fufh_type, NULL, td, cred);
diff --git a/sys/fs/nfsserver/nfs_nfsdserv.c b/sys/fs/nfsserver/nfs_nfsdserv.c
index 0264182..4532ca4 100644
--- a/sys/fs/nfsserver/nfs_nfsdserv.c
+++ b/sys/fs/nfsserver/nfs_nfsdserv.c
@@ -2416,8 +2416,6 @@ nfsrvd_lockt(struct nfsrv_descript *nd, __unused int isdgram,
if (!nd->nd_repstat)
nd->nd_repstat = nfsrv_lockctrl(vp, &stp, &lop, &cf, clientid,
&stateid, exp, nd, p);
- if (stp)
- FREE((caddr_t)stp, M_NFSDSTATE);
if (nd->nd_repstat) {
if (nd->nd_repstat == NFSERR_DENIED) {
NFSM_BUILD(tl, u_int32_t *, 7 * NFSX_UNSIGNED);
@@ -2439,6 +2437,8 @@ nfsrvd_lockt(struct nfsrv_descript *nd, __unused int isdgram,
}
}
vput(vp);
+ if (stp)
+ FREE((caddr_t)stp, M_NFSDSTATE);
NFSEXITCODE2(0, nd);
return (0);
nfsmout:
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index a633f72..c0e65d5 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -486,13 +486,7 @@ cpu_set_syscall_retval(struct thread *td, int error)
break;
default:
- if (td->td_proc->p_sysent->sv_errsize) {
- if (error >= td->td_proc->p_sysent->sv_errsize)
- error = -1; /* XXX */
- else
- error = td->td_proc->p_sysent->sv_errtbl[error];
- }
- td->td_frame->tf_eax = error;
+ td->td_frame->tf_eax = SV_ABI_ERRNO(td->td_proc, error);
td->td_frame->tf_eflags |= PSL_C;
break;
}
diff --git a/sys/i386/linux/linux_proto.h b/sys/i386/linux/linux_proto.h
index cc5da24..3717c9f 100644
--- a/sys/i386/linux/linux_proto.h
+++ b/sys/i386/linux/linux_proto.h
@@ -478,16 +478,16 @@ struct linux_sysctl_args {
};
struct linux_sched_setparam_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_getparam_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_setscheduler_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
char policy_l_[PADL_(l_int)]; l_int policy; char policy_r_[PADR_(l_int)];
- char param_l_[PADL_(struct l_sched_param *)]; struct l_sched_param * param; char param_r_[PADR_(struct l_sched_param *)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
};
struct linux_sched_getscheduler_args {
char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
diff --git a/sys/i386/linux/linux_systrace_args.c b/sys/i386/linux/linux_systrace_args.c
index 7feba47..f02f34f 100644
--- a/sys/i386/linux/linux_systrace_args.c
+++ b/sys/i386/linux/linux_systrace_args.c
@@ -1085,7 +1085,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 154: {
struct linux_sched_setparam_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
- uarg[1] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[1] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 2;
break;
}
@@ -1093,7 +1093,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 155: {
struct linux_sched_getparam_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
- uarg[1] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[1] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 2;
break;
}
@@ -1102,7 +1102,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
struct linux_sched_setscheduler_args *p = params;
iarg[0] = p->pid; /* l_pid_t */
iarg[1] = p->policy; /* l_int */
- uarg[2] = (intptr_t) p->param; /* struct l_sched_param * */
+ uarg[2] = (intptr_t) p->param; /* struct sched_param * */
*n_args = 3;
break;
}
@@ -4072,7 +4072,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_pid_t";
break;
case 1:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
@@ -4085,7 +4085,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_pid_t";
break;
case 1:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
@@ -4101,7 +4101,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_int";
break;
case 2:
- p = "struct l_sched_param *";
+ p = "struct sched_param *";
break;
default:
break;
diff --git a/sys/i386/linux/syscalls.master b/sys/i386/linux/syscalls.master
index 1032ef0..7ec3154 100644
--- a/sys/i386/linux/syscalls.master
+++ b/sys/i386/linux/syscalls.master
@@ -270,12 +270,12 @@
152 AUE_MLOCKALL NOPROTO { int mlockall(int how); }
153 AUE_MUNLOCKALL NOPROTO { int munlockall(void); }
154 AUE_SCHED_SETPARAM STD { int linux_sched_setparam(l_pid_t pid, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
155 AUE_SCHED_GETPARAM STD { int linux_sched_getparam(l_pid_t pid, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
156 AUE_SCHED_SETSCHEDULER STD { int linux_sched_setscheduler( \
l_pid_t pid, l_int policy, \
- struct l_sched_param *param); }
+ struct sched_param *param); }
157 AUE_SCHED_GETSCHEDULER STD { int linux_sched_getscheduler( \
l_pid_t pid); }
158 AUE_NULL NOPROTO { int sched_yield(void); }
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 867c263..fbad175 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -362,7 +362,7 @@ pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
if (sbt == 0)
sbt = tick_sbt;
- if (cold || kdb_active) {
+ if (cold || kdb_active || SCHEDULER_STOPPED()) {
/*
* We delay one second at a time to avoid overflowing the
* system specific DELAY() function(s):
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index aecf554..bef5829 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -2197,6 +2197,7 @@ cvtstat(st, ost)
struct ostat *ost;
{
+ bzero(ost, sizeof(*ost));
ost->st_dev = st->st_dev;
ost->st_ino = st->st_ino;
ost->st_mode = st->st_mode;
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index b1c1299..f17ae89 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -1372,6 +1372,8 @@ vn_truncate(struct file *fp, off_t length, struct ucred *active_cred,
if (error == 0) {
VATTR_NULL(&vattr);
vattr.va_size = length;
+ if ((fp->f_flag & O_FSYNC) != 0)
+ vattr.va_vaflags |= VA_SYNC;
error = VOP_SETATTR(vp, &vattr, fp->f_cred);
}
out:
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index 5135093..9d075d1 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -155,6 +155,7 @@ SUBDIR= \
${_iir} \
${_imgact_binmisc} \
${_io} \
+ ${_ioat} \
${_ipoib} \
${_ipdivert} \
${_ipfilter} \
@@ -723,6 +724,7 @@ _if_ndis= if_ndis
_igb= igb
_iir= iir
_io= io
+_ioat= ioat
_ipmi= ipmi
.if ${MK_OFED} != "no" || defined(ALL_MODULES)
_ipoib= ipoib
diff --git a/sys/modules/ioat/Makefile b/sys/modules/ioat/Makefile
new file mode 100644
index 0000000..5a2c417
--- /dev/null
+++ b/sys/modules/ioat/Makefile
@@ -0,0 +1,15 @@
+# ioat Loadable Kernel Module
+#
+# $FreeBSD$
+
+IOAT_SRC_PATH = ${.CURDIR}/../..
+
+.PATH: ${IOAT_SRC_PATH}/dev/ioat
+
+KMOD= ioat
+SRCS= ioat.c ioat_test.c
+SRCS+= device_if.h bus_if.h pci_if.h
+
+CFLAGS+= -I${IOAT_SRC_PATH}
+
+.include <bsd.kmod.mk>
diff --git a/sys/modules/sfxge/Makefile b/sys/modules/sfxge/Makefile
index ba4108b..cc00d3a 100644
--- a/sys/modules/sfxge/Makefile
+++ b/sys/modules/sfxge/Makefile
@@ -31,11 +31,11 @@ SRCS+= siena_mac.c siena_mcdi.c siena_nic.c siena_nvram.c siena_phy.c
SRCS+= siena_sram.c siena_vpd.c
SRCS+= siena_flash.h siena_impl.h
+SRCS+= ef10_ev.c ef10_filter.c ef10_intr.c ef10_mac.c ef10_mcdi.c ef10_nic.c
+SRCS+= ef10_nvram.c ef10_phy.c ef10_rx.c ef10_tx.c ef10_vpd.c
SRCS+= ef10_impl.h
-SRCS+= hunt_ev.c hunt_intr.c hunt_mac.c hunt_mcdi.c hunt_nic.c
-SRCS+= hunt_nvram.c hunt_rx.c hunt_phy.c hunt_sram.c hunt_tx.c hunt_vpd.c
-SRCS+= hunt_filter.c
+SRCS+= hunt_nic.c hunt_phy.c
SRCS+= hunt_impl.h
SRCS+= medford_nic.c
diff --git a/sys/net/if.c b/sys/net/if.c
index 5c2dda3..e03584f 100644
--- a/sys/net/if.c
+++ b/sys/net/if.c
@@ -116,7 +116,8 @@ SYSCTL_INT(_net_link, OID_AUTO, log_link_state_change, CTLFLAG_RW,
/* Log promiscuous mode change events */
static int log_promisc_mode_change = 1;
-SYSCTL_INT(_net_link, OID_AUTO, log_promisc_mode_change, CTLFLAG_RW,
+TUNABLE_INT("net.link.log_promisc_mode_change", &log_promisc_mode_change);
+SYSCTL_INT(_net_link, OID_AUTO, log_promisc_mode_change, CTLFLAG_RDTUN,
&log_promisc_mode_change, 1,
"log promiscuous mode change events");
diff --git a/sys/netinet/ip_dummynet.h b/sys/netinet/ip_dummynet.h
index 1c09197..202f1e2 100644
--- a/sys/netinet/ip_dummynet.h
+++ b/sys/netinet/ip_dummynet.h
@@ -104,6 +104,7 @@ enum { /* user flags */
DN_HAS_PROFILE = 0x0010, /* a link has a profile */
DN_IS_RED = 0x0020,
DN_IS_GENTLE_RED= 0x0040,
+ DN_IS_ECN = 0x0080,
DN_PIPE_CMD = 0x1000, /* pipe config... */
};
diff --git a/sys/netpfil/ipfw/ip_dn_io.c b/sys/netpfil/ipfw/ip_dn_io.c
index a67cf0a..90e2ccf 100644
--- a/sys/netpfil/ipfw/ip_dn_io.c
+++ b/sys/netpfil/ipfw/ip_dn_io.c
@@ -337,6 +337,8 @@ red_drops (struct dn_queue *q, int len)
return (0); /* accept packet */
}
if (q->avg >= fs->max_th) { /* average queue >= max threshold */
+ if (fs->fs.flags & DN_IS_ECN)
+ return (1);
if (fs->fs.flags & DN_IS_GENTLE_RED) {
/*
* According to Gentle-RED, if avg is greater than
@@ -352,6 +354,8 @@ red_drops (struct dn_queue *q, int len)
return (1);
}
} else if (q->avg > fs->min_th) {
+ if (fs->fs.flags & DN_IS_ECN)
+ return (1);
/*
* We compute p_b using the linear dropping function
* p_b = c_1 * avg - c_2
@@ -384,6 +388,70 @@ red_drops (struct dn_queue *q, int len)
}
/*
+ * ECN/ECT Processing (partially adopted from altq)
+ */
+static int
+ecn_mark(struct mbuf* m)
+{
+ struct ip *ip;
+ ip = mtod(m, struct ip *);
+
+ switch (ip->ip_v) {
+ case IPVERSION:
+ {
+ u_int8_t otos;
+ int sum;
+
+ if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT)
+ return (0); /* not-ECT */
+ if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE)
+ return (1); /* already marked */
+
+ /*
+ * ecn-capable but not marked,
+ * mark CE and update checksum
+ */
+ otos = ip->ip_tos;
+ ip->ip_tos |= IPTOS_ECN_CE;
+ /*
+ * update checksum (from RFC1624)
+ * HC' = ~(~HC + ~m + m')
+ */
+ sum = ~ntohs(ip->ip_sum) & 0xffff;
+ sum += (~otos & 0xffff) + ip->ip_tos;
+ sum = (sum >> 16) + (sum & 0xffff);
+ sum += (sum >> 16); /* add carry */
+ ip->ip_sum = htons(~sum & 0xffff);
+ return (1);
+ }
+#ifdef INET6
+ case (IPV6_VERSION >> 4):
+ {
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+ u_int32_t flowlabel;
+
+ flowlabel = ntohl(ip6->ip6_flow);
+ if ((flowlabel >> 28) != 6)
+ return (0); /* version mismatch! */
+ if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
+ (IPTOS_ECN_NOTECT << 20))
+ return (0); /* not-ECT */
+ if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
+ (IPTOS_ECN_CE << 20))
+ return (1); /* already marked */
+ /*
+ * ecn-capable but not marked, mark CE
+ */
+ flowlabel |= (IPTOS_ECN_CE << 20);
+ ip6->ip6_flow = htonl(flowlabel);
+ return (1);
+ }
+#endif
+ }
+ return (0);
+}
+
+/*
* Enqueue a packet in q, subject to space and queue management policy
* (whose parameters are in q->fs).
* Update stats for the queue and the scheduler.
@@ -414,8 +482,10 @@ dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop)
goto drop;
if (f->plr && random() < f->plr)
goto drop;
- if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len))
- goto drop;
+ if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len)) {
+ if (!(f->flags & DN_IS_ECN) || !ecn_mark(m))
+ goto drop;
+ }
if (f->flags & DN_QSIZE_BYTES) {
if (q->ni.len_bytes > f->qsize)
goto drop;
@@ -427,14 +497,14 @@ dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop)
q->ni.len_bytes += len;
ni->length++;
ni->len_bytes += len;
- return 0;
+ return (0);
drop:
io_pkt_drop++;
q->ni.drops++;
ni->drops++;
FREE_PKT(m);
- return 1;
+ return (1);
}
/*
diff --git a/sys/netpfil/ipfw/ip_dummynet.c b/sys/netpfil/ipfw/ip_dummynet.c
index 57216be..420b491 100644
--- a/sys/netpfil/ipfw/ip_dummynet.c
+++ b/sys/netpfil/ipfw/ip_dummynet.c
@@ -1073,7 +1073,10 @@ config_red(struct dn_fsk *fs)
fs->min_th = SCALE(fs->fs.min_th);
fs->max_th = SCALE(fs->fs.max_th);
- fs->c_1 = fs->max_p / (fs->fs.max_th - fs->fs.min_th);
+ if (fs->fs.max_th == fs->fs.min_th)
+ fs->c_1 = fs->max_p;
+ else
+ fs->c_1 = SCALE((int64_t)(fs->max_p)) / (fs->fs.max_th - fs->fs.min_th);
fs->c_2 = SCALE_MUL(fs->c_1, SCALE(fs->fs.min_th));
if (fs->fs.flags & DN_IS_GENTLE_RED) {
diff --git a/sys/netpfil/pf/pf_norm.c b/sys/netpfil/pf/pf_norm.c
index b925960..3559909 100644
--- a/sys/netpfil/pf/pf_norm.c
+++ b/sys/netpfil/pf/pf_norm.c
@@ -434,7 +434,7 @@ pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
*(struct pf_fragment_cmp *)frag = *key;
frag->fr_flags = 0;
- frag->fr_timeout = time_second;
+ frag->fr_timeout = time_uptime;
frag->fr_maxlen = frent->fe_len;
TAILQ_INIT(&frag->fr_queue);
diff --git a/sys/nfs/bootp_subr.c b/sys/nfs/bootp_subr.c
index 55bc165..7a2848a 100644
--- a/sys/nfs/bootp_subr.c
+++ b/sys/nfs/bootp_subr.c
@@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/endian.h>
#include <sys/jail.h>
#include <sys/kernel.h>
#include <sys/sockio.h>
@@ -154,6 +155,7 @@ struct bootpc_ifcontext {
int dhcpquerytype; /* dhcp type sent */
struct in_addr dhcpserver;
int gotdhcpserver;
+ uint16_t mtu;
};
#define TAG_MAXLEN 1024
@@ -195,6 +197,7 @@ struct bootpc_globalcontext {
#define TAG_ROUTERS 3 /* Routers (in order of preference) */
#define TAG_HOSTNAME 12 /* Client host name */
#define TAG_ROOT 17 /* Root path */
+#define TAG_INTF_MTU 26 /* Interface MTU Size (RFC2132) */
/* DHCP specific tags */
#define TAG_OVERLOAD 52 /* Option Overload */
@@ -269,7 +272,7 @@ static int bootpc_call(struct bootpc_globalcontext *gctx,
static void bootpc_fakeup_interface(struct bootpc_ifcontext *ifctx,
struct thread *td);
-static int bootpc_adjust_interface(struct bootpc_ifcontext *ifctx,
+static void bootpc_adjust_interface(struct bootpc_ifcontext *ifctx,
struct bootpc_globalcontext *gctx, struct thread *td);
static void bootpc_decode_reply(struct nfsv3_diskless *nd,
@@ -1004,33 +1007,41 @@ bootpc_shutdown_interface(struct bootpc_ifcontext *ifctx, struct thread *td)
panic("%s: SIOCDIFADDR, error=%d", __func__, error);
}
-static int
+static void
bootpc_adjust_interface(struct bootpc_ifcontext *ifctx,
struct bootpc_globalcontext *gctx, struct thread *td)
{
int error;
- struct sockaddr_in defdst;
- struct sockaddr_in defmask;
struct sockaddr_in *sin;
struct ifreq *ifr;
struct in_aliasreq *ifra;
struct sockaddr_in *myaddr;
struct sockaddr_in *netmask;
- struct sockaddr_in *gw;
ifr = &ifctx->ireq;
ifra = &ifctx->iareq;
myaddr = &ifctx->myaddr;
netmask = &ifctx->netmask;
- gw = &ifctx->gw;
if (bootpc_ifctx_isresolved(ifctx) == 0) {
/* Shutdown interfaces where BOOTP failed */
bootpc_shutdown_interface(ifctx, td);
- return (0);
+ return;
}
- printf("Adjusted interface %s\n", ifctx->ireq.ifr_name);
+ printf("Adjusted interface %s", ifctx->ireq.ifr_name);
+
+ /* Do BOOTP interface options */
+ if (ifctx->mtu != 0) {
+ printf(" (MTU=%d%s)", ifctx->mtu,
+ (ifctx->mtu > 1514) ? "/JUMBO" : "");
+ ifr->ifr_mtu = ifctx->mtu;
+ error = ifioctl(bootp_so, SIOCSIFMTU, (caddr_t) ifr, td);
+ if (error != 0)
+ panic("%s: SIOCSIFMTU, error=%d", __func__, error);
+ }
+ printf("\n");
+
/*
* Do enough of ifconfig(8) so that the chosen interface
* can talk to the servers. (just set the address)
@@ -1050,24 +1061,48 @@ bootpc_adjust_interface(struct bootpc_ifcontext *ifctx,
error = ifioctl(bootp_so, SIOCAIFADDR, (caddr_t)ifra, td);
if (error != 0)
panic("%s: SIOCAIFADDR, error=%d", __func__, error);
+}
+
+static void
+bootpc_add_default_route(struct bootpc_ifcontext *ifctx)
+{
+ int error;
+ struct sockaddr_in defdst;
+ struct sockaddr_in defmask;
- /* Add new default route */
+ if (ifctx->gw.sin_addr.s_addr == htonl(INADDR_ANY))
+ return;
- if (ifctx->gotgw != 0 || gctx->gotgw == 0) {
- clear_sinaddr(&defdst);
- clear_sinaddr(&defmask);
- /* XXX MRT just table 0 */
- error = rtrequest_fib(RTM_ADD,
- (struct sockaddr *) &defdst, (struct sockaddr *) gw,
- (struct sockaddr *) &defmask,
- (RTF_UP | RTF_GATEWAY | RTF_STATIC), NULL, RT_DEFAULT_FIB);
- if (error != 0) {
- printf("%s: RTM_ADD, error=%d\n", __func__, error);
- return (error);
- }
+ clear_sinaddr(&defdst);
+ clear_sinaddr(&defmask);
+
+ error = rtrequest_fib(RTM_ADD, (struct sockaddr *)&defdst,
+ (struct sockaddr *) &ifctx->gw, (struct sockaddr *)&defmask,
+ (RTF_UP | RTF_GATEWAY | RTF_STATIC), NULL, RT_DEFAULT_FIB);
+ if (error != 0) {
+ printf("%s: RTM_ADD, error=%d\n", __func__, error);
}
+}
+
+static void
+bootpc_remove_default_route(struct bootpc_ifcontext *ifctx)
+{
+ int error;
+ struct sockaddr_in defdst;
+ struct sockaddr_in defmask;
+
+ if (ifctx->gw.sin_addr.s_addr == htonl(INADDR_ANY))
+ return;
+
+ clear_sinaddr(&defdst);
+ clear_sinaddr(&defmask);
- return (0);
+ error = rtrequest_fib(RTM_DELETE, (struct sockaddr *)&defdst,
+ (struct sockaddr *) &ifctx->gw, (struct sockaddr *)&defmask,
+ (RTF_UP | RTF_GATEWAY | RTF_STATIC), NULL, RT_DEFAULT_FIB);
+ if (error != 0) {
+ printf("%s: RTM_DELETE, error=%d\n", __func__, error);
+ }
}
static int
@@ -1459,6 +1494,8 @@ bootpc_decode_reply(struct nfsv3_diskless *nd, struct bootpc_ifcontext *ifctx,
if (p == NULL) {
p = bootpc_tag(&gctx->tag, &ifctx->reply, ifctx->replylen,
TAG_ROOT);
+ if (p != NULL)
+ ifctx->gotrootpath = 1;
}
#ifdef ROOTDEVNAME
if ((p == NULL || (boothowto & RB_DFLTROOT) != 0) &&
@@ -1478,7 +1515,6 @@ bootpc_decode_reply(struct nfsv3_diskless *nd, struct bootpc_ifcontext *ifctx,
}
printf("rootfs %s ", p);
gctx->gotrootpath = 1;
- ifctx->gotrootpath = 1;
gctx->setrootfs = ifctx;
p = bootpc_tag(&gctx->tag, &ifctx->reply,
@@ -1518,6 +1554,11 @@ bootpc_decode_reply(struct nfsv3_diskless *nd, struct bootpc_ifcontext *ifctx,
p[i] = '\0';
}
+ p = bootpc_tag(&gctx->tag, &ifctx->reply, ifctx->replylen,
+ TAG_INTF_MTU);
+ if (p != NULL) {
+ ifctx->mtu = be16dec(p);
+ }
printf("\n");
@@ -1529,10 +1570,6 @@ bootpc_decode_reply(struct nfsv3_diskless *nd, struct bootpc_ifcontext *ifctx,
else
ifctx->netmask.sin_addr.s_addr = htonl(IN_CLASSC_NET);
}
- if (ifctx->gotgw == 0) {
- /* Use proxyarp */
- ifctx->gw.sin_addr.s_addr = ifctx->myaddr.sin_addr.s_addr;
- }
}
void
@@ -1724,9 +1761,11 @@ retry:
setenv("boot.netif.name", ifctx->ifp->if_xname);
+ bootpc_add_default_route(ifctx);
error = md_mount(&nd->root_saddr, nd->root_hostnam,
nd->root_fh, &nd->root_fhsize,
&nd->root_args, td);
+ bootpc_remove_default_route(ifctx);
if (error != 0) {
if (gctx->any_root_overrides == 0)
panic("nfs_boot: mount root, error=%d", error);
@@ -1747,6 +1786,7 @@ retry:
ifctx->myaddr.sin_addr.s_addr |
~ ifctx->netmask.sin_addr.s_addr;
bcopy(&ifctx->netmask, &nd->myif.ifra_mask, sizeof(ifctx->netmask));
+ bcopy(&ifctx->gw, &nd->mygateway, sizeof(ifctx->gw));
out:
while((ifctx = STAILQ_FIRST(&gctx->interfaces)) != NULL) {
diff --git a/sys/ofed/include/linux/etherdevice.h b/sys/ofed/include/linux/etherdevice.h
index c50dc5d..a975bd0 100644
--- a/sys/ofed/include/linux/etherdevice.h
+++ b/sys/ofed/include/linux/etherdevice.h
@@ -37,6 +37,9 @@
#include <linux/types.h>
+#include <sys/random.h>
+#include <sys/libkern.h>
+
#define ETH_MODULE_SFF_8079 1
#define ETH_MODULE_SFF_8079_LEN 256
#define ETH_MODULE_SFF_8472 2
@@ -113,4 +116,31 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src)
memcpy(dst, src, 6);
}
+static inline bool
+ether_addr_equal(const u8 *pa, const u8 *pb)
+{
+ return (memcmp(pa, pb, 6) == 0);
+}
+
+static inline bool
+ether_addr_equal_64bits(const u8 *pa, const u8 *pb)
+{
+ return (memcmp(pa, pb, 6) == 0);
+}
+
+static inline void
+eth_broadcast_addr(u8 *pa)
+{
+ memset(pa, 0xff, 6);
+}
+
+static inline void
+random_ether_addr(u8 * dst)
+{
+ read_random(dst, 6);
+
+ dst[0] &= 0xfe;
+ dst[0] |= 0x02;
+}
+
#endif /* _LINUX_ETHERDEVICE */
diff --git a/sys/powerpc/powerpc/exec_machdep.c b/sys/powerpc/powerpc/exec_machdep.c
index d53773c..a053a1a 100644
--- a/sys/powerpc/powerpc/exec_machdep.c
+++ b/sys/powerpc/powerpc/exec_machdep.c
@@ -901,11 +901,7 @@ cpu_set_syscall_retval(struct thread *td, int error)
tf->srr0 -= 4;
break;
default:
- if (p->p_sysent->sv_errsize) {
- error = (error < p->p_sysent->sv_errsize) ?
- p->p_sysent->sv_errtbl[error] : -1;
- }
- tf->fixreg[FIRSTARG] = error;
+ tf->fixreg[FIRSTARG] = SV_ABI_ERRNO(p, error);
tf->cr |= 0x10000000; /* Set summary overflow */
break;
}
diff --git a/sys/sparc64/sparc64/vm_machdep.c b/sys/sparc64/sparc64/vm_machdep.c
index 60e4a2f..056a1b3 100644
--- a/sys/sparc64/sparc64/vm_machdep.c
+++ b/sys/sparc64/sparc64/vm_machdep.c
@@ -196,13 +196,7 @@ cpu_set_syscall_retval(struct thread *td, int error)
break;
default:
- if (td->td_proc->p_sysent->sv_errsize) {
- if (error >= td->td_proc->p_sysent->sv_errsize)
- error = -1; /* XXX */
- else
- error = td->td_proc->p_sysent->sv_errtbl[error];
- }
- td->td_frame->tf_out[0] = error;
+ td->td_frame->tf_out[0] = SV_ABI_ERRNO(td->td_proc, error);
td->td_frame->tf_tstate |= TSTATE_XCC_C;
break;
}
diff --git a/sys/sys/sysent.h b/sys/sys/sysent.h
index 8436ba3..747b8ff 100644
--- a/sys/sys/sysent.h
+++ b/sys/sys/sysent.h
@@ -141,6 +141,8 @@ struct sysentvec {
#define SV_SHP 0x010000
#define SV_ABI_MASK 0xff
+#define SV_ABI_ERRNO(p, e) ((p)->p_sysent->sv_errsize <= 0 ? e : \
+ ((e) >= (p)->p_sysent->sv_errsize ? -1 : (p)->p_sysent->sv_errtbl[e]))
#define SV_PROC_FLAG(p, x) ((p)->p_sysent->sv_flags & (x))
#define SV_PROC_ABI(p) ((p)->p_sysent->sv_flags & SV_ABI_MASK)
#define SV_CURPROC_FLAG(x) SV_PROC_FLAG(curproc, x)
@@ -152,8 +154,6 @@ struct sysentvec {
#ifdef _KERNEL
extern struct sysentvec aout_sysvec;
-extern struct sysentvec elf_freebsd_sysvec;
-extern struct sysentvec null_sysvec;
extern struct sysent sysent[];
extern const char *syscallnames[];
diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h
index d3d79aba..93037ae 100644
--- a/sys/sys/vnode.h
+++ b/sys/sys/vnode.h
@@ -286,6 +286,7 @@ struct vattr {
*/
#define VA_UTIMES_NULL 0x01 /* utimes argument was NULL */
#define VA_EXCLUSIVE 0x02 /* exclusive create request */
+#define VA_SYNC 0x04 /* O_SYNC truncation */
/*
* Flags for ioflag. (high 16 bits used to ask for read-ahead and
diff --git a/sys/ufs/ffs/ffs_inode.c b/sys/ufs/ffs/ffs_inode.c
index 608f876..ca92da2 100644
--- a/sys/ufs/ffs/ffs_inode.c
+++ b/sys/ufs/ffs/ffs_inode.c
@@ -562,7 +562,7 @@ extclean:
softdep_journal_freeblocks(ip, cred, length, IO_EXT);
else
softdep_setup_freeblocks(ip, length, IO_EXT);
- return (ffs_update(vp, !DOINGASYNC(vp)));
+ return (ffs_update(vp, (flags & IO_SYNC) != 0 || !DOINGASYNC(vp)));
}
/*
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c
index a46ce4c..fe07ff0 100644
--- a/sys/ufs/ffs/ffs_vfsops.c
+++ b/sys/ufs/ffs/ffs_vfsops.c
@@ -512,7 +512,7 @@ ffs_mount(struct mount *mp)
* We need the name for the mount point (also used for
* "last mounted on") copied in. If an error occurs,
* the mount point is discarded by the upper level code.
- * Note that vfs_mount() populates f_mntonname for us.
+ * Note that vfs_mount_alloc() populates f_mntonname for us.
*/
if ((error = ffs_mountfs(devvp, mp, td)) != 0) {
vrele(devvp);
@@ -764,23 +764,31 @@ ffs_mountfs(devvp, mp, td)
cred = td ? td->td_ucred : NOCRED;
ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
+ KASSERT(devvp->v_type == VCHR, ("reclaimed devvp"));
dev = devvp->v_rdev;
- dev_ref(dev);
+ if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0,
+ (uintptr_t)mp) == 0) {
+ VOP_UNLOCK(devvp, 0);
+ return (EBUSY);
+ }
DROP_GIANT();
g_topology_lock();
error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
g_topology_unlock();
PICKUP_GIANT();
+ if (error != 0) {
+ atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
+ VOP_UNLOCK(devvp, 0);
+ return (error);
+ }
+ dev_ref(dev);
+ devvp->v_bufobj.bo_ops = &ffs_ops;
VOP_UNLOCK(devvp, 0);
- if (error)
- goto out;
- if (devvp->v_rdev->si_iosize_max != 0)
- mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
+ if (dev->si_iosize_max != 0)
+ mp->mnt_iosize_max = dev->si_iosize_max;
if (mp->mnt_iosize_max > MAXPHYS)
mp->mnt_iosize_max = MAXPHYS;
- devvp->v_bufobj.bo_ops = &ffs_ops;
-
fs = NULL;
sblockloc = 0;
/*
@@ -1049,8 +1057,6 @@ ffs_mountfs(devvp, mp, td)
ffs_flushfiles(mp, FORCECLOSE, td);
goto out;
}
- if (devvp->v_type == VCHR && devvp->v_rdev != NULL)
- devvp->v_rdev->si_mountpt = mp;
if (fs->fs_snapinum[0] != 0)
ffs_snapshot_mount(mp);
fs->fs_fmod = 1;
@@ -1058,7 +1064,7 @@ ffs_mountfs(devvp, mp, td)
(void) ffs_sbupdate(ump, MNT_WAIT, 0);
}
/*
- * Initialize filesystem stat information in mount struct.
+ * Initialize filesystem state information in mount struct.
*/
MNT_ILOCK(mp);
mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
@@ -1100,6 +1106,7 @@ out:
free(ump, M_UFSMNT);
mp->mnt_data = NULL;
}
+ atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
dev_rel(dev);
return (error);
}
@@ -1285,8 +1292,7 @@ ffs_unmount(mp, mntflags)
g_vfs_close(ump->um_cp);
g_topology_unlock();
PICKUP_GIANT();
- if (ump->um_devvp->v_type == VCHR && ump->um_devvp->v_rdev != NULL)
- ump->um_devvp->v_rdev->si_mountpt = NULL;
+ atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0);
vrele(ump->um_devvp);
dev_rel(ump->um_dev);
mtx_destroy(UFS_MTX(ump));
diff --git a/sys/ufs/ufs/ufs_lookup.c b/sys/ufs/ufs/ufs_lookup.c
index 85ab35a..c563300 100644
--- a/sys/ufs/ufs/ufs_lookup.c
+++ b/sys/ufs/ufs/ufs_lookup.c
@@ -1131,9 +1131,9 @@ ufs_direnter(dvp, tvp, dirp, cnp, newdirbp, isrename)
if (tvp != NULL)
VOP_UNLOCK(tvp, 0);
error = UFS_TRUNCATE(dvp, (off_t)dp->i_endoff,
- IO_NORMAL | IO_SYNC, cr);
+ IO_NORMAL | (DOINGASYNC(dvp) ? 0 : IO_SYNC), cr);
if (error != 0)
- vprint("ufs_direnter: failted to truncate", dvp);
+ vprint("ufs_direnter: failed to truncate", dvp);
#ifdef UFS_DIRHASH
if (error == 0 && dp->i_dirhash != NULL)
ufsdirhash_dirtrunc(dp, dp->i_endoff);
diff --git a/sys/ufs/ufs/ufs_vnops.c b/sys/ufs/ufs/ufs_vnops.c
index 392a626..3d91a71 100644
--- a/sys/ufs/ufs/ufs_vnops.c
+++ b/sys/ufs/ufs/ufs_vnops.c
@@ -626,7 +626,8 @@ ufs_setattr(ap)
*/
return (0);
}
- if ((error = UFS_TRUNCATE(vp, vap->va_size, IO_NORMAL,
+ if ((error = UFS_TRUNCATE(vp, vap->va_size, IO_NORMAL |
+ ((vap->va_vaflags & VA_SYNC) != 0 ? IO_SYNC : 0),
cred)) != 0)
return (error);
}
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index de2dff2..7fc922e 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -732,8 +732,6 @@ faultin(p)
* This swapin algorithm attempts to swap-in processes only if there
* is enough space for them. Of course, if a process waits for a long
* time, it will be swapped in anyway.
- *
- * Giant is held on entry.
*/
void
swapper(void)
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index e9d1280..5ea59b3 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -1673,11 +1673,11 @@ vm_object_qcollapse(vm_object_t object)
void
vm_object_collapse(vm_object_t object)
{
+ vm_object_t backing_object, new_backing_object;
+
VM_OBJECT_ASSERT_WLOCKED(object);
-
- while (TRUE) {
- vm_object_t backing_object;
+ while (TRUE) {
/*
* Verify that the conditions are right for collapse:
*
@@ -1703,14 +1703,13 @@ vm_object_collapse(vm_object_t object)
break;
}
- if (
- object->paging_in_progress != 0 ||
- backing_object->paging_in_progress != 0
- ) {
+ if (object->paging_in_progress != 0 ||
+ backing_object->paging_in_progress != 0) {
vm_object_qcollapse(object);
VM_OBJECT_WUNLOCK(backing_object);
break;
}
+
/*
* We know that we can either collapse the backing object (if
* the parent is the only reference to it) or (perhaps) have
@@ -1722,6 +1721,9 @@ vm_object_collapse(vm_object_t object)
* case.
*/
if (backing_object->ref_count == 1) {
+ vm_object_pip_add(object, 1);
+ vm_object_pip_add(backing_object, 1);
+
/*
* If there is exactly one reference to the backing
* object, we can collapse it into the parent.
@@ -1793,15 +1795,15 @@ vm_object_collapse(vm_object_t object)
KASSERT(backing_object->ref_count == 1, (
"backing_object %p was somehow re-referenced during collapse!",
backing_object));
+ vm_object_pip_wakeup(backing_object);
backing_object->type = OBJT_DEAD;
backing_object->ref_count = 0;
VM_OBJECT_WUNLOCK(backing_object);
vm_object_destroy(backing_object);
+ vm_object_pip_wakeup(object);
object_collapses++;
} else {
- vm_object_t new_backing_object;
-
/*
* If we do not entirely shadow the backing object,
* there is nothing we can do so we give up.
@@ -2130,6 +2132,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
*/
if (!reserved && !swap_reserve_by_cred(ptoa(next_size),
prev_object->cred)) {
+ VM_OBJECT_WUNLOCK(prev_object);
return (FALSE);
}
prev_object->charge += ptoa(next_size);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 243b11f..512151b 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1838,8 +1838,10 @@ retry:
m < &m_ret[npages]; m++) {
if ((req & VM_ALLOC_WIRED) != 0)
m->wire_count = 0;
- if (m >= m_tmp)
+ if (m >= m_tmp) {
m->object = NULL;
+ m->oflags |= VPO_UNMANAGED;
+ }
vm_page_free(m);
}
return (NULL);
@@ -2580,7 +2582,8 @@ vm_page_cache(vm_page_t m)
cache_was_empty = vm_radix_is_empty(&object->cache);
if (vm_radix_insert(&object->cache, m)) {
mtx_unlock(&vm_page_queue_free_mtx);
- if (object->resident_page_count == 0)
+ if (object->type == OBJT_VNODE &&
+ object->resident_page_count == 0)
vdrop(object->handle);
m->object = NULL;
vm_page_free(m);
OpenPOWER on IntegriCloud