summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjimharris <jimharris@FreeBSD.org>2012-01-31 00:12:51 +0000
committerjimharris <jimharris@FreeBSD.org>2012-01-31 00:12:51 +0000
commite8263c4e27b8d101f2bdc08c6172a3ac3ffcebc0 (patch)
treebee289bfd388d943ca586efbfc3ad0e0710a39fc /sys
parent829d20bae62c17797f4e0bcf261457818e9cbfd2 (diff)
parentda7d786fd42cccabf9a0a984972ae8d78c1d61fa (diff)
downloadFreeBSD-src-e8263c4e27b8d101f2bdc08c6172a3ac3ffcebc0.zip
FreeBSD-src-e8263c4e27b8d101f2bdc08c6172a3ac3ffcebc0.tar.gz
Rebase user/jimharris/isci branch from head.
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/acpica/acpi_wakecode.S4
-rw-r--r--sys/amd64/amd64/fpu.c38
-rw-r--r--sys/amd64/amd64/minidump_machdep.c4
-rw-r--r--sys/amd64/amd64/uma_machdep.c3
-rw-r--r--sys/amd64/include/cpufunc.h38
-rw-r--r--sys/amd64/include/signal.h5
-rw-r--r--sys/cam/cam_ccb.h1
-rw-r--r--sys/cam/cam_xpt.c11
-rw-r--r--sys/cam/cam_xpt_internal.h2
-rw-r--r--sys/cam/scsi/scsi_all.c8
-rw-r--r--sys/cam/scsi/scsi_all.h22
-rw-r--r--sys/cam/scsi/scsi_da.c125
-rw-r--r--sys/cam/scsi/scsi_xpt.c34
-rw-r--r--sys/cddl/compat/opensolaris/sys/kmem.h4
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c11
-rw-r--r--sys/conf/files5
-rw-r--r--sys/dev/ata/chipsets/ata-acerlabs.c4
-rw-r--r--sys/dev/ata/chipsets/ata-siliconimage.c4
-rw-r--r--sys/dev/ath/ath_dfs/null/dfs_null.c11
-rw-r--r--sys/dev/ath/ath_hal/ar5210/ar5210_attach.c6
-rw-r--r--sys/dev/ath/ath_hal/ar5211/ar5211_attach.c6
-rw-r--r--sys/dev/ath/ath_hal/ar5212/ar5212.h1
-rw-r--r--sys/dev/ath/ath_hal/ar5212/ar5212_attach.c7
-rw-r--r--sys/dev/ath/ath_hal/ar5212/ar5212_misc.c10
-rw-r--r--sys/dev/ath/if_ath.c28
-rw-r--r--sys/dev/ath/if_athdfs.h2
-rw-r--r--sys/dev/ciss/ciss.c11
-rw-r--r--sys/dev/e1000/README1
-rw-r--r--sys/dev/fe/if_fe.c5
-rw-r--r--sys/dev/firewire/sbp_targ.c393
-rw-r--r--sys/dev/hwpmc/hwpmc_x86.c2
-rw-r--r--sys/dev/iwn/if_iwn.c12
-rw-r--r--sys/dev/ixgbe/ixgbe.c392
-rw-r--r--sys/dev/ixgbe/ixgbe.h12
-rw-r--r--sys/dev/ixgbe/ixgbe_82598.c256
-rwxr-xr-xsys/dev/ixgbe/ixgbe_82598.h52
-rw-r--r--sys/dev/ixgbe/ixgbe_82599.c1017
-rwxr-xr-xsys/dev/ixgbe/ixgbe_82599.h65
-rw-r--r--sys/dev/ixgbe/ixgbe_api.c293
-rw-r--r--sys/dev/ixgbe/ixgbe_api.h88
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c1271
-rw-r--r--sys/dev/ixgbe/ixgbe_common.h44
-rw-r--r--sys/dev/ixgbe/ixgbe_mbx.c37
-rw-r--r--sys/dev/ixgbe/ixgbe_mbx.h95
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.h13
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.c349
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.h126
-rw-r--r--sys/dev/ixgbe/ixgbe_type.h4229
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.c143
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.h110
-rwxr-xr-xsys/dev/ixgbe/ixgbe_x540.c971
-rwxr-xr-xsys/dev/ixgbe/ixgbe_x540.h65
-rw-r--r--sys/dev/ixgbe/ixv.c136
-rw-r--r--sys/dev/ixgbe/ixv.h18
-rw-r--r--sys/dev/mps/mpi/mpi2.h62
-rw-r--r--sys/dev/mps/mpi/mpi2_cnfg.h321
-rw-r--r--sys/dev/mps/mpi/mpi2_hbd.h42
-rw-r--r--sys/dev/mps/mpi/mpi2_history.txt149
-rw-r--r--sys/dev/mps/mpi/mpi2_init.h61
-rw-r--r--sys/dev/mps/mpi/mpi2_ioc.h162
-rw-r--r--sys/dev/mps/mpi/mpi2_ra.h33
-rw-r--r--sys/dev/mps/mpi/mpi2_raid.h37
-rw-r--r--sys/dev/mps/mpi/mpi2_sas.h45
-rw-r--r--sys/dev/mps/mpi/mpi2_targ.h39
-rw-r--r--sys/dev/mps/mpi/mpi2_tool.h87
-rw-r--r--sys/dev/mps/mpi/mpi2_type.h33
-rw-r--r--sys/dev/mps/mps.c761
-rw-r--r--sys/dev/mps/mps_config.c1393
-rw-r--r--sys/dev/mps/mps_ioctl.h285
-rw-r--r--sys/dev/mps/mps_mapping.c2268
-rw-r--r--sys/dev/mps/mps_mapping.h71
-rw-r--r--sys/dev/mps/mps_pci.c62
-rw-r--r--sys/dev/mps/mps_sas.c2989
-rw-r--r--sys/dev/mps/mps_sas.h161
-rw-r--r--sys/dev/mps/mps_sas_lsi.c865
-rw-r--r--sys/dev/mps/mps_table.c6
-rw-r--r--sys/dev/mps/mps_user.c1495
-rw-r--r--sys/dev/mps/mpsvar.h400
-rw-r--r--sys/dev/netmap/ixgbe_netmap.h55
-rw-r--r--sys/dev/netmap/netmap.c6
-rw-r--r--sys/dev/netmap/netmap_kern.h7
-rw-r--r--sys/dev/ofw/openfirm.c22
-rw-r--r--sys/dev/pccbb/pccbb.c7
-rw-r--r--sys/dev/pci/pcireg.h1
-rw-r--r--sys/dev/sound/pci/csa.c5
-rw-r--r--sys/dev/sound/pci/csareg.h20
-rw-r--r--sys/dev/sound/pci/hda/hdaa.c61
-rw-r--r--sys/dev/sound/pci/hda/hdac.c2
-rw-r--r--sys/dev/sound/pci/hda/hdac.h73
-rw-r--r--sys/dev/sound/pci/hda/hdacc.c74
-rw-r--r--sys/dev/usb/usb_transfer.c1
-rw-r--r--sys/dev/wi/if_wi.c2
-rw-r--r--sys/dev/xen/blkback/blkback.c4
-rw-r--r--sys/dev/xen/netback/netback.c3438
-rw-r--r--sys/dev/xen/netback/netback_unit_tests.c2530
-rw-r--r--sys/fs/nfsclient/nfs_clbio.c14
-rw-r--r--sys/fs/nfsclient/nfs_clnode.c1
-rw-r--r--sys/fs/nfsclient/nfs_clport.c1
-rw-r--r--sys/fs/nfsclient/nfs_clvfsops.c17
-rw-r--r--sys/geom/geom_bsd.c1
-rw-r--r--sys/geom/geom_mbr.c1
-rw-r--r--sys/geom/geom_pc98.c1
-rw-r--r--sys/geom/mountver/g_mountver.c1
-rw-r--r--sys/i386/i386/initcpu.c3
-rw-r--r--sys/i386/include/signal.h7
-rw-r--r--sys/kern/imgact_elf.c7
-rw-r--r--sys/kern/kern_shutdown.c3
-rw-r--r--sys/kern/subr_scanf.c21
-rw-r--r--sys/kern/subr_syscall.c3
-rw-r--r--sys/kern/uipc_mbuf.c5
-rw-r--r--sys/kern/vfs_aio.c24
-rw-r--r--sys/kern/vfs_vnops.c14
-rw-r--r--sys/modules/ixgbe/Makefile2
-rw-r--r--sys/modules/mps/Makefile5
-rw-r--r--sys/net/flowtable.c4
-rw-r--r--sys/net/if_llatbl.c1
-rw-r--r--sys/net80211/ieee80211_hwmp.c21
-rw-r--r--sys/netinet/ipfw/dn_sched_qfq.c2
-rw-r--r--sys/netinet6/in6.h32
-rw-r--r--sys/nfsclient/nfs_bio.c14
-rw-r--r--sys/nfsclient/nfs_vfsops.c17
-rw-r--r--sys/powerpc/aim/mmu_oea64.c7
-rw-r--r--sys/powerpc/booke/machdep.c5
-rw-r--r--sys/sparc64/include/clock.h30
-rw-r--r--sys/sparc64/include/cpu.h4
-rw-r--r--sys/sparc64/include/ofw_machdep.h5
-rw-r--r--sys/sparc64/include/vmparam.h2
-rw-r--r--sys/sparc64/pci/schizo.c56
-rw-r--r--sys/sparc64/pci/schizovar.h2
-rw-r--r--sys/sparc64/sparc64/cache.c12
-rw-r--r--sys/sparc64/sparc64/clock.c24
-rw-r--r--sys/sparc64/sparc64/machdep.c49
-rw-r--r--sys/sparc64/sparc64/ofw_machdep.c14
-rw-r--r--sys/sparc64/sparc64/pmap.c37
-rw-r--r--sys/sparc64/sparc64/support.S18
-rw-r--r--sys/sys/elf_common.h1
-rw-r--r--sys/sys/malloc.h1
-rw-r--r--sys/sys/param.h2
-rw-r--r--sys/sys/proc.h1
-rw-r--r--sys/sys/systm.h3
-rw-r--r--sys/vm/uma.h4
-rw-r--r--sys/vm/uma_core.c3
-rw-r--r--sys/vm/vm_contig.c2
-rw-r--r--sys/vm/vm_kern.c2
-rw-r--r--sys/vm/vm_page.c5
-rw-r--r--sys/vm/vm_page.h2
-rw-r--r--sys/xen/interface/io/netif.h8
147 files changed, 22584 insertions, 6672 deletions
diff --git a/sys/amd64/acpica/acpi_wakecode.S b/sys/amd64/acpica/acpi_wakecode.S
index 6e44e6c5..ed239c5 100644
--- a/sys/amd64/acpica/acpi_wakecode.S
+++ b/sys/amd64/acpica/acpi_wakecode.S
@@ -267,11 +267,11 @@ wakeup_ctx:
.quad 0
wakeup_pcb:
.quad 0
+wakeup_fpusave:
+ .quad 0
wakeup_gdt:
.word 0
.quad 0
-wakeup_fpusave:
- .quad 0
ALIGN_DATA
wakeup_efer:
diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c
index af1fc5e..27c618f 100644
--- a/sys/amd64/amd64/fpu.c
+++ b/sys/amd64/amd64/fpu.c
@@ -78,6 +78,41 @@ __FBSDID("$FreeBSD$");
: : "n" (CR0_TS) : "ax")
#define stop_emulating() __asm __volatile("clts")
+static __inline void
+xrstor(char *addr, uint64_t mask)
+{
+ uint32_t low, hi;
+
+ low = mask;
+ hi = mask >> 32;
+ /* xrstor (%rdi) */
+ __asm __volatile(".byte 0x0f,0xae,0x2f" : :
+ "a" (low), "d" (hi), "D" (addr));
+}
+
+static __inline void
+xsave(char *addr, uint64_t mask)
+{
+ uint32_t low, hi;
+
+ low = mask;
+ hi = mask >> 32;
+ /* xsave (%rdi) */
+ __asm __volatile(".byte 0x0f,0xae,0x27" : :
+ "a" (low), "d" (hi), "D" (addr) : "memory");
+}
+
+static __inline void
+xsetbv(uint32_t reg, uint64_t val)
+{
+ uint32_t low, hi;
+
+ low = val;
+ hi = val >> 32;
+ __asm __volatile(".byte 0x0f,0x01,0xd1" : :
+ "c" (reg), "a" (low), "d" (hi));
+}
+
#else /* !(__GNUCLIKE_ASM && !lint) */
void fldcw(u_short cw);
@@ -90,6 +125,9 @@ void fxrstor(caddr_t addr);
void ldmxcsr(u_int csr);
void start_emulating(void);
void stop_emulating(void);
+void xrstor(char *addr, uint64_t mask);
+void xsave(char *addr, uint64_t mask);
+void xsetbv(uint32_t reg, uint64_t val);
#endif /* __GNUCLIKE_ASM && !lint */
diff --git a/sys/amd64/amd64/minidump_machdep.c b/sys/amd64/amd64/minidump_machdep.c
index 577de07..057d81d 100644
--- a/sys/amd64/amd64/minidump_machdep.c
+++ b/sys/amd64/amd64/minidump_machdep.c
@@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <sys/watchdog.h>
#endif
#include <vm/vm.h>
+#include <vm/vm_page.h>
#include <vm/pmap.h>
#include <machine/atomic.h>
#include <machine/elf.h>
@@ -75,8 +76,11 @@ CTASSERT(sizeof(*vm_page_dump) == 8);
static int
is_dumpable(vm_paddr_t pa)
{
+ vm_page_t m;
int i;
+ if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
+ return ((m->flags & PG_NODUMP) == 0);
for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
return (1);
diff --git a/sys/amd64/amd64/uma_machdep.c b/sys/amd64/amd64/uma_machdep.c
index 3583975..dc9c307 100644
--- a/sys/amd64/amd64/uma_machdep.c
+++ b/sys/amd64/amd64/uma_machdep.c
@@ -65,7 +65,8 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
break;
}
pa = m->phys_addr;
- dump_add_page(pa);
+ if ((wait & M_NODUMP) == 0)
+ dump_add_page(pa);
va = (void *)PHYS_TO_DMAP(pa);
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
pagezero(va);
diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
index d112e66..c07e09b 100644
--- a/sys/amd64/include/cpufunc.h
+++ b/sys/amd64/include/cpufunc.h
@@ -669,41 +669,6 @@ intr_restore(register_t rflags)
write_rflags(rflags);
}
-static __inline void
-xsave(char *addr, uint64_t mask)
-{
- uint32_t low, hi;
-
- low = mask;
- hi = mask >> 32;
- /* xsave (%rdi) */
- __asm __volatile(".byte 0x0f,0xae,0x27" : :
- "a" (low), "d" (hi), "D" (addr) : "memory");
-}
-
-static __inline void
-xsetbv(uint32_t reg, uint64_t val)
-{
- uint32_t low, hi;
-
- low = val;
- hi = val >> 32;
- __asm __volatile(".byte 0x0f,0x01,0xd1" : :
- "c" (reg), "a" (low), "d" (hi));
-}
-
-static __inline void
-xrstor(char *addr, uint64_t mask)
-{
- uint32_t low, hi;
-
- low = mask;
- hi = mask >> 32;
- /* xrstor (%rdi) */
- __asm __volatile(".byte 0x0f,0xae,0x2f" : :
- "a" (low), "d" (hi), "D" (addr));
-}
-
#else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
int breakpoint(void);
@@ -768,9 +733,6 @@ u_int rgs(void);
void wbinvd(void);
void write_rflags(u_int rf);
void wrmsr(u_int msr, uint64_t newval);
-void xsave(char *addr, uint64_t mask);
-void xsetbv(uint32_t reg, uint64_t val);
-void xrstor(char *addr, uint64_t mask);
#endif /* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */
diff --git a/sys/amd64/include/signal.h b/sys/amd64/include/signal.h
index 0374339..085d43a 100644
--- a/sys/amd64/include/signal.h
+++ b/sys/amd64/include/signal.h
@@ -99,7 +99,10 @@ struct sigcontext {
long sc_fsbase;
long sc_gsbase;
- long sc_spare[6];
+ long sc_xfpustate;
+ long sc_xfpustate_len;
+
+ long sc_spare[4];
};
#endif /* __BSD_VISIBLE */
diff --git a/sys/cam/cam_ccb.h b/sys/cam/cam_ccb.h
index 0d51482..6eb3b50 100644
--- a/sys/cam/cam_ccb.h
+++ b/sys/cam/cam_ccb.h
@@ -1118,6 +1118,7 @@ struct ccb_dev_advinfo {
#define CDAI_TYPE_SCSI_DEVID 1
#define CDAI_TYPE_SERIAL_NUM 2
#define CDAI_TYPE_PHYS_PATH 3
+#define CDAI_TYPE_RCAPLONG 4
off_t bufsiz; /* IN: Size of external buffer */
#define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */
off_t provsiz; /* OUT: Size required/used */
diff --git a/sys/cam/cam_xpt.c b/sys/cam/cam_xpt.c
index 7b639a9..0ee1cc4 100644
--- a/sys/cam/cam_xpt.c
+++ b/sys/cam/cam_xpt.c
@@ -4588,6 +4588,17 @@ xpt_release_device(struct cam_ed *device)
cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
camq_fini(&device->drvq);
cam_ccbq_fini(&device->ccbq);
+ /*
+ * Free allocated memory. free(9) does nothing if the
+ * supplied pointer is NULL, so it is safe to call without
+ * checking.
+ */
+ free(device->supported_vpds, M_CAMXPT);
+ free(device->device_id, M_CAMXPT);
+ free(device->physpath, M_CAMXPT);
+ free(device->rcap_buf, M_CAMXPT);
+ free(device->serial_num, M_CAMXPT);
+
xpt_release_target(device->target);
free(device, M_CAMXPT);
} else
diff --git a/sys/cam/cam_xpt_internal.h b/sys/cam/cam_xpt_internal.h
index b6e8f66..8c62c4e 100644
--- a/sys/cam/cam_xpt_internal.h
+++ b/sys/cam/cam_xpt_internal.h
@@ -99,6 +99,8 @@ struct cam_ed {
uint8_t *device_id;
uint8_t physpath_len;
uint8_t *physpath; /* physical path string form */
+ uint32_t rcap_len;
+ uint8_t *rcap_buf;
struct ata_params ident_data;
u_int8_t inq_flags; /*
* Current settings for inquiry flags.
diff --git a/sys/cam/scsi/scsi_all.c b/sys/cam/scsi/scsi_all.c
index 93e5658..2622609 100644
--- a/sys/cam/scsi/scsi_all.c
+++ b/sys/cam/scsi/scsi_all.c
@@ -5325,8 +5325,8 @@ void
scsi_read_capacity_16(struct ccb_scsiio *csio, uint32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
uint8_t tag_action, uint64_t lba, int reladr, int pmi,
- struct scsi_read_capacity_data_long *rcap_buf,
- uint8_t sense_len, uint32_t timeout)
+ uint8_t *rcap_buf, int rcap_buf_len, uint8_t sense_len,
+ uint32_t timeout)
{
struct scsi_read_capacity_16 *scsi_cmd;
@@ -5337,7 +5337,7 @@ scsi_read_capacity_16(struct ccb_scsiio *csio, uint32_t retries,
/*flags*/CAM_DIR_IN,
tag_action,
/*data_ptr*/(u_int8_t *)rcap_buf,
- /*dxfer_len*/sizeof(*rcap_buf),
+ /*dxfer_len*/rcap_buf_len,
sense_len,
sizeof(*scsi_cmd),
timeout);
@@ -5346,7 +5346,7 @@ scsi_read_capacity_16(struct ccb_scsiio *csio, uint32_t retries,
scsi_cmd->opcode = SERVICE_ACTION_IN;
scsi_cmd->service_action = SRC16_SERVICE_ACTION;
scsi_u64to8b(lba, scsi_cmd->addr);
- scsi_ulto4b(sizeof(*rcap_buf), scsi_cmd->alloc_len);
+ scsi_ulto4b(rcap_buf_len, scsi_cmd->alloc_len);
if (pmi)
reladr |= SRC16_PMI;
if (reladr)
diff --git a/sys/cam/scsi/scsi_all.h b/sys/cam/scsi/scsi_all.h
index bc56418..ebee01c 100644
--- a/sys/cam/scsi/scsi_all.h
+++ b/sys/cam/scsi/scsi_all.h
@@ -1437,15 +1437,26 @@ struct scsi_read_capacity_data_long
uint8_t length[4];
#define SRC16_PROT_EN 0x01
#define SRC16_P_TYPE 0x0e
+#define SRC16_PTYPE_1 0x00
+#define SRC16_PTYPE_2 0x02
+#define SRC16_PTYPE_3 0x04
uint8_t prot;
#define SRC16_LBPPBE 0x0f
#define SRC16_PI_EXPONENT 0xf0
#define SRC16_PI_EXPONENT_SHIFT 4
uint8_t prot_lbppbe;
-#define SRC16_LALBA 0x3fff
-#define SRC16_LBPRZ 0x4000
-#define SRC16_LBPME 0x8000
+#define SRC16_LALBA 0x3f
+#define SRC16_LBPRZ 0x40
+#define SRC16_LBPME 0x80
+/*
+ * Alternate versions of these macros that are intended for use on a 16-bit
+ * version of the lalba_lbp field instead of the array of 2 8 bit numbers.
+ */
+#define SRC16_LALBA_A 0x3fff
+#define SRC16_LBPRZ_A 0x4000
+#define SRC16_LBPME_A 0x8000
uint8_t lalba_lbp[2];
+ uint8_t reserved[16];
};
struct scsi_report_luns
@@ -2293,9 +2304,8 @@ void scsi_read_capacity_16(struct ccb_scsiio *csio, uint32_t retries,
void (*cbfcnp)(struct cam_periph *,
union ccb *), uint8_t tag_action,
uint64_t lba, int reladr, int pmi,
- struct scsi_read_capacity_data_long
- *rcap_buf, uint8_t sense_len,
- uint32_t timeout);
+ uint8_t *rcap_buf, int rcap_buf_len,
+ uint8_t sense_len, uint32_t timeout);
void scsi_report_luns(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *,
diff --git a/sys/cam/scsi/scsi_da.c b/sys/cam/scsi/scsi_da.c
index 756093c..b3bf5ff 100644
--- a/sys/cam/scsi/scsi_da.c
+++ b/sys/cam/scsi/scsi_da.c
@@ -160,6 +160,7 @@ struct da_softc {
struct callout sendordered_c;
uint64_t wwpn;
uint8_t unmap_buf[UNMAP_MAX_RANGES * 16 + 8];
+ struct scsi_read_capacity_data_long rcaplong;
};
struct da_quirk_entry {
@@ -830,7 +831,9 @@ static int daerror(union ccb *ccb, u_int32_t cam_flags,
static void daprevent(struct cam_periph *periph, int action);
static int dagetcapacity(struct cam_periph *periph);
static void dasetgeom(struct cam_periph *periph, uint32_t block_len,
- uint64_t maxsector, u_int lbppbe, u_int lalba);
+ uint64_t maxsector,
+ struct scsi_read_capacity_data_long *rcaplong,
+ size_t rcap_size);
static timeout_t dasendorderedtag;
static void dashutdown(void *arg, int howto);
@@ -1948,7 +1951,8 @@ out:
/*lba*/ 0,
/*reladr*/ 0,
/*pmi*/ 0,
- rcaplong,
+ /*rcap_buf*/ (uint8_t *)rcaplong,
+ /*rcap_buf_len*/ sizeof(*rcaplong),
/*sense_len*/ SSD_FULL_SIZE,
/*timeout*/ 60000);
start_ccb->ccb_h.ccb_bp = NULL;
@@ -2227,10 +2231,15 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
announce_buf[0] = '\0';
cam_periph_invalidate(periph);
} else {
+ /*
+ * We pass rcaplong into dasetgeom(),
+ * because it will only use it if it is
+ * non-NULL.
+ */
dasetgeom(periph, block_size, maxsector,
- lbppbe, lalba & SRC16_LALBA);
- if ((lalba & SRC16_LBPME) &&
- softc->delete_method == DA_DELETE_NONE)
+ rcaplong, sizeof(*rcaplong));
+ if ((lalba & SRC16_LBPME_A)
+ && softc->delete_method == DA_DELETE_NONE)
softc->delete_method = DA_DELETE_UNMAP;
dp = &softc->params;
snprintf(announce_buf, sizeof(announce_buf),
@@ -2504,6 +2513,7 @@ dagetcapacity(struct cam_periph *periph)
lalba = 0;
error = 0;
rc16failed = 0;
+ rcaplong = NULL;
sense_flags = SF_RETRY_UA;
if (softc->flags & DA_FLAG_PACK_REMOVABLE)
sense_flags |= SF_NO_PRINT;
@@ -2521,39 +2531,47 @@ dagetcapacity(struct cam_periph *periph)
/* Try READ CAPACITY(16) first if we think it should work. */
if (softc->flags & DA_FLAG_CAN_RC16) {
scsi_read_capacity_16(&ccb->csio,
- /*retries*/ 4,
- /*cbfcnp*/ dadone,
- /*tag_action*/ MSG_SIMPLE_Q_TAG,
- /*lba*/ 0,
- /*reladr*/ 0,
- /*pmi*/ 0,
- rcaplong,
- /*sense_len*/ SSD_FULL_SIZE,
- /*timeout*/ 60000);
+ /*retries*/ 4,
+ /*cbfcnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*lba*/ 0,
+ /*reladr*/ 0,
+ /*pmi*/ 0,
+ /*rcap_buf*/ (uint8_t *)rcaplong,
+ /*rcap_buf_len*/ sizeof(*rcaplong),
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ 60000);
ccb->ccb_h.ccb_bp = NULL;
error = cam_periph_runccb(ccb, daerror,
- /*cam_flags*/CAM_RETRY_SELTO,
- sense_flags,
- softc->disk->d_devstat);
+ /*cam_flags*/CAM_RETRY_SELTO,
+ sense_flags, softc->disk->d_devstat);
if (error == 0)
goto rc16ok;
/* If we got ILLEGAL REQUEST, do not prefer RC16 any more. */
- if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
- CAM_REQ_INVALID) {
+ if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
softc->flags &= ~DA_FLAG_CAN_RC16;
} else if (((ccb->ccb_h.status & CAM_STATUS_MASK) ==
- CAM_SCSI_STATUS_ERROR) &&
- (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND) &&
- (ccb->ccb_h.status & CAM_AUTOSNS_VALID) &&
- ((ccb->ccb_h.flags & CAM_SENSE_PHYS) == 0) &&
- ((ccb->ccb_h.flags & CAM_SENSE_PTR) == 0)) {
+ CAM_SCSI_STATUS_ERROR)
+ && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND)
+ && (ccb->ccb_h.status & CAM_AUTOSNS_VALID)
+ && ((ccb->ccb_h.flags & CAM_SENSE_PHYS) == 0)
+ && ((ccb->ccb_h.flags & CAM_SENSE_PTR) == 0)) {
int sense_key, error_code, asc, ascq;
- scsi_extract_sense(&ccb->csio.sense_data,
- &error_code, &sense_key, &asc, &ascq);
- if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
+ scsi_extract_sense_len(&ccb->csio.sense_data,
+ ccb->csio.sense_len -
+ ccb->csio.sense_resid,
+ &error_code, &sense_key,
+ &asc, &ascq, /*show_errors*/1);
+ /*
+ * If we don't have enough sense to get the sense
+ * key, or if it's illegal request, turn off
+ * READ CAPACITY (16).
+ */
+ if ((sense_key == -1)
+ || (sense_key == SSD_KEY_ILLEGAL_REQUEST))
softc->flags &= ~DA_FLAG_CAN_RC16;
}
rc16failed = 1;
@@ -2590,7 +2608,8 @@ dagetcapacity(struct cam_periph *periph)
/*lba*/ 0,
/*reladr*/ 0,
/*pmi*/ 0,
- rcaplong,
+ /*rcap_buf*/ (uint8_t *)rcaplong,
+ /*rcap_buf_len*/ sizeof(*rcaplong),
/*sense_len*/ SSD_FULL_SIZE,
/*timeout*/ 60000);
ccb->ccb_h.ccb_bp = NULL;
@@ -2617,9 +2636,9 @@ done:
error = EINVAL;
} else {
dasetgeom(periph, block_len, maxsector,
- lbppbe, lalba & SRC16_LALBA);
- if ((lalba & SRC16_LBPME) &&
- softc->delete_method == DA_DELETE_NONE)
+ rcaplong, sizeof(*rcaplong));
+ if ((lalba & SRC16_LBPME)
+ && softc->delete_method == DA_DELETE_NONE)
softc->delete_method = DA_DELETE_UNMAP;
}
}
@@ -2633,17 +2652,27 @@ done:
static void
dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector,
- u_int lbppbe, u_int lalba)
+ struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len)
{
struct ccb_calc_geometry ccg;
struct da_softc *softc;
struct disk_params *dp;
+ u_int lbppbe, lalba;
softc = (struct da_softc *)periph->softc;
dp = &softc->params;
dp->secsize = block_len;
dp->sectors = maxsector + 1;
+ if (rcaplong != NULL) {
+ lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE;
+ lalba = scsi_2btoul(rcaplong->lalba_lbp);
+ lalba &= SRC16_LALBA_A;
+ } else {
+ lbppbe = 0;
+ lalba = 0;
+ }
+
if (lbppbe > 0) {
dp->stripesize = block_len << lbppbe;
dp->stripeoffset = (dp->stripesize - block_len * lalba) %
@@ -2688,6 +2717,38 @@ dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector,
dp->secs_per_track = ccg.secs_per_track;
dp->cylinders = ccg.cylinders;
}
+
+ /*
+ * If the user supplied a read capacity buffer, and if it is
+ * different than the previous buffer, update the data in the EDT.
+ * If it's the same, we don't bother. This avoids sending an
+ * update every time someone opens this device.
+ */
+ if ((rcaplong != NULL)
+ && (bcmp(rcaplong, &softc->rcaplong,
+ min(sizeof(softc->rcaplong), rcap_len)) != 0)) {
+ struct ccb_dev_advinfo cdai;
+
+ xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
+ cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
+ cdai.buftype = CDAI_TYPE_RCAPLONG;
+ cdai.flags |= CDAI_FLAG_STORE;
+ cdai.bufsiz = rcap_len;
+ cdai.buf = (uint8_t *)rcaplong;
+ xpt_action((union ccb *)&cdai);
+ if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
+ if (cdai.ccb_h.status != CAM_REQ_CMP) {
+ xpt_print(periph->path, "%s: failed to set read "
+ "capacity advinfo\n", __func__);
+ /* Use cam_error_print() to decode the status */
+ cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS,
+ CAM_EPF_ALL);
+ } else {
+ bcopy(rcaplong, &softc->rcaplong,
+ min(sizeof(softc->rcaplong), rcap_len));
+ }
+ }
}
static void
diff --git a/sys/cam/scsi/scsi_xpt.c b/sys/cam/scsi/scsi_xpt.c
index b177fca..7ff4f7e 100644
--- a/sys/cam/scsi/scsi_xpt.c
+++ b/sys/cam/scsi/scsi_xpt.c
@@ -2468,8 +2468,10 @@ scsi_dev_advinfo(union ccb *start_ccb)
break;
case CDAI_TYPE_PHYS_PATH:
if (cdai->flags & CDAI_FLAG_STORE) {
- if (device->physpath != NULL)
+ if (device->physpath != NULL) {
free(device->physpath, M_CAMXPT);
+ device->physpath = NULL;
+ }
device->physpath_len = cdai->bufsiz;
/* Clear existing buffer if zero length */
if (cdai->bufsiz == 0)
@@ -2490,6 +2492,36 @@ scsi_dev_advinfo(union ccb *start_ccb)
memcpy(cdai->buf, device->physpath, amt);
}
break;
+ case CDAI_TYPE_RCAPLONG:
+ if (cdai->flags & CDAI_FLAG_STORE) {
+ if (device->rcap_buf != NULL) {
+ free(device->rcap_buf, M_CAMXPT);
+ device->rcap_buf = NULL;
+ }
+
+ device->rcap_len = cdai->bufsiz;
+ /* Clear existing buffer if zero length */
+ if (cdai->bufsiz == 0)
+ break;
+
+ device->rcap_buf = malloc(cdai->bufsiz, M_CAMXPT,
+ M_NOWAIT);
+ if (device->rcap_buf == NULL) {
+ start_ccb->ccb_h.status = CAM_REQ_ABORTED;
+ return;
+ }
+
+ memcpy(device->rcap_buf, cdai->buf, cdai->bufsiz);
+ } else {
+ cdai->provsiz = device->rcap_len;
+ if (device->rcap_len == 0)
+ break;
+ amt = device->rcap_len;
+ if (cdai->provsiz > cdai->bufsiz)
+ amt = cdai->bufsiz;
+ memcpy(cdai->buf, device->rcap_buf, amt);
+ }
+ break;
default:
return;
}
diff --git a/sys/cddl/compat/opensolaris/sys/kmem.h b/sys/cddl/compat/opensolaris/sys/kmem.h
index 6be2735..428badf 100644
--- a/sys/cddl/compat/opensolaris/sys/kmem.h
+++ b/sys/cddl/compat/opensolaris/sys/kmem.h
@@ -45,7 +45,9 @@ MALLOC_DECLARE(M_SOLARIS);
#define KM_SLEEP M_WAITOK
#define KM_PUSHPAGE M_WAITOK
#define KM_NOSLEEP M_NOWAIT
-#define KMC_NODEBUG 0
+#define KM_ZERO M_ZERO
+#define KM_NODEBUG M_NODUMP
+#define KMC_NODEBUG UMA_ZONE_NODUMP
#define KMC_NOTOUCH 0
typedef struct kmem_cache {
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c
index 3eeeb58..694302e 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c
@@ -42,6 +42,10 @@ static int zio_use_uma = 0;
TUNABLE_INT("vfs.zfs.zio.use_uma", &zio_use_uma);
SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, use_uma, CTLFLAG_RDTUN, &zio_use_uma, 0,
"Use uma(9) for ZIO allocations");
+static int zio_exclude_metadata = 0;
+TUNABLE_INT("vfs.zfs.zio.exclude_metadata", &zio_exclude_metadata);
+SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata, CTLFLAG_RDTUN, &zio_exclude_metadata, 0,
+ "Exclude metadata buffers from dumps as well");
/*
* ==========================================================================
@@ -148,7 +152,7 @@ zio_init(void)
(void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size);
zio_data_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL,
- cflags | KMC_NOTOUCH);
+ cflags | KMC_NOTOUCH | KMC_NODEBUG);
}
}
@@ -217,13 +221,14 @@ void *
zio_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
+ int flags = zio_exclude_metadata ? KM_NODEBUG : 0;
ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
if (zio_use_uma)
return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
else
- return (kmem_alloc(size, KM_SLEEP));
+ return (kmem_alloc(size, KM_SLEEP|flags));
}
/*
@@ -242,7 +247,7 @@ zio_data_buf_alloc(size_t size)
if (zio_use_uma)
return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
else
- return (kmem_alloc(size, KM_SLEEP));
+ return (kmem_alloc(size, KM_SLEEP | KM_NODEBUG));
}
void
diff --git a/sys/conf/files b/sys/conf/files
index 85c87b0..b777ccd 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1395,6 +1395,8 @@ dev/ixgbe/ixgbe_82598.c optional ixgbe inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_82599.c optional ixgbe inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
+dev/ixgbe/ixgbe_x540.c optional ixgbe inet \
+ compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/jme/if_jme.c optional jme pci
dev/joy/joy.c optional joy
dev/joy/joy_isa.c optional joy isa
@@ -1469,8 +1471,11 @@ dev/mmc/mmcbus_if.m standard
dev/mmc/mmcsd.c optional mmcsd
dev/mn/if_mn.c optional mn pci
dev/mps/mps.c optional mps
+dev/mps/mps_config.c optional mps
+dev/mps/mps_mapping.c optional mps
dev/mps/mps_pci.c optional mps pci
dev/mps/mps_sas.c optional mps
+dev/mps/mps_sas_lsi.c optional mps
dev/mps/mps_table.c optional mps
dev/mps/mps_user.c optional mps
dev/mpt/mpt.c optional mpt
diff --git a/sys/dev/ata/chipsets/ata-acerlabs.c b/sys/dev/ata/chipsets/ata-acerlabs.c
index dfcc3cf..8281d0f 100644
--- a/sys/dev/ata/chipsets/ata-acerlabs.c
+++ b/sys/dev/ata/chipsets/ata-acerlabs.c
@@ -213,6 +213,10 @@ ata_ali_ch_attach(device_t dev)
if (ch->dma.max_iosize > 256 * 512)
ch->dma.max_iosize = 256 * 512;
}
+#ifdef ATA_CAM
+ if (ctlr->chip->cfg2 & ALI_NEW)
+ ch->flags |= ATA_NO_ATAPI_DMA;
+#endif
return 0;
}
diff --git a/sys/dev/ata/chipsets/ata-siliconimage.c b/sys/dev/ata/chipsets/ata-siliconimage.c
index 4ec9a5b..ce170a1 100644
--- a/sys/dev/ata/chipsets/ata-siliconimage.c
+++ b/sys/dev/ata/chipsets/ata-siliconimage.c
@@ -240,6 +240,10 @@ ata_cmd_ch_attach(device_t dev)
if (ctlr->chip->cfg2 & SII_INTR)
ch->hw.status = ata_cmd_status;
+#ifdef ATA_CAM
+ ch->flags |= ATA_NO_ATAPI_DMA;
+#endif
+
return 0;
}
diff --git a/sys/dev/ath/ath_dfs/null/dfs_null.c b/sys/dev/ath/ath_dfs/null/dfs_null.c
index 75574b4..f10b010 100644
--- a/sys/dev/ath/ath_dfs/null/dfs_null.c
+++ b/sys/dev/ath/ath_dfs/null/dfs_null.c
@@ -95,12 +95,19 @@ ath_dfs_detach(struct ath_softc *sc)
/*
* Enable radar check
*/
-void
+int
ath_dfs_radar_enable(struct ath_softc *sc, struct ieee80211_channel *chan)
{
/* Check if the current channel is radar-enabled */
if (! IEEE80211_IS_CHAN_DFS(chan))
- return;
+ return (0);
+
+ /*
+ * Enabling the radar parameters and setting sc->sc_dodfs = 1
+ * would occur here.
+ */
+
+ return (1);
}
/*
diff --git a/sys/dev/ath/ath_hal/ar5210/ar5210_attach.c b/sys/dev/ath/ath_hal/ar5210/ar5210_attach.c
index c624e7a..6f9010a 100644
--- a/sys/dev/ath/ath_hal/ar5210/ar5210_attach.c
+++ b/sys/dev/ath/ath_hal/ar5210/ar5210_attach.c
@@ -359,6 +359,12 @@ ar5210FillCapabilityInfo(struct ath_hal *ah)
pCap->halChanHalfRate = AH_FALSE;
pCap->halChanQuarterRate = AH_FALSE;
+ /*
+ * RSSI uses the combined field; some 11n NICs may use
+ * the control chain RSSI.
+ */
+ pCap->halUseCombinedRadarRssi = AH_TRUE;
+
if (ath_hal_eepromGetFlag(ah, AR_EEP_RFKILL)) {
/*
* Setup initial rfsilent settings based on the EEPROM
diff --git a/sys/dev/ath/ath_hal/ar5211/ar5211_attach.c b/sys/dev/ath/ath_hal/ar5211/ar5211_attach.c
index e9def44..c7de792 100644
--- a/sys/dev/ath/ath_hal/ar5211/ar5211_attach.c
+++ b/sys/dev/ath/ath_hal/ar5211/ar5211_attach.c
@@ -494,6 +494,12 @@ ar5211FillCapabilityInfo(struct ath_hal *ah)
pCap->halChanHalfRate = AH_FALSE;
pCap->halChanQuarterRate = AH_FALSE;
+ /*
+ * RSSI uses the combined field; some 11n NICs may use
+ * the control chain RSSI.
+ */
+ pCap->halUseCombinedRadarRssi = AH_TRUE;
+
if (ath_hal_eepromGetFlag(ah, AR_EEP_RFKILL) &&
ath_hal_eepromGet(ah, AR_EEP_RFSILENT, &ahpriv->ah_rfsilent) == HAL_OK) {
/* NB: enabled by default */
diff --git a/sys/dev/ath/ath_hal/ar5212/ar5212.h b/sys/dev/ath/ath_hal/ar5212/ar5212.h
index ec91193..606f615 100644
--- a/sys/dev/ath/ath_hal/ar5212/ar5212.h
+++ b/sys/dev/ath/ath_hal/ar5212/ar5212.h
@@ -628,5 +628,6 @@ extern HAL_BOOL ar5212ProcessRadarEvent(struct ath_hal *ah,
struct ath_rx_status *rxs, uint64_t fulltsf, const char *buf,
HAL_DFS_EVENT *event);
extern HAL_BOOL ar5212IsFastClockEnabled(struct ath_hal *ah);
+extern uint32_t ar5212Get11nExtBusy(struct ath_hal *ah);
#endif /* _ATH_AR5212_H_ */
diff --git a/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c b/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c
index b2a630c..15bdd60 100644
--- a/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c
+++ b/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c
@@ -134,6 +134,7 @@ static const struct ath_hal_private ar5212hal = {{
.ah_getDfsThresh = ar5212GetDfsThresh,
.ah_procRadarEvent = ar5212ProcessRadarEvent,
.ah_isFastClockEnabled = ar5212IsFastClockEnabled,
+ .ah_get11nExtBusy = ar5212Get11nExtBusy,
/* Key Cache Functions */
.ah_getKeyCacheSize = ar5212GetKeyCacheSize,
@@ -839,6 +840,12 @@ ar5212FillCapabilityInfo(struct ath_hal *ah)
pCap->halChanHalfRate = AH_TRUE;
pCap->halChanQuarterRate = AH_TRUE;
+ /*
+ * RSSI uses the combined field; some 11n NICs may use
+ * the control chain RSSI.
+ */
+ pCap->halUseCombinedRadarRssi = AH_TRUE;
+
if (ath_hal_eepromGetFlag(ah, AR_EEP_RFKILL) &&
ath_hal_eepromGet(ah, AR_EEP_RFSILENT, &ahpriv->ah_rfsilent) == HAL_OK) {
/* NB: enabled by default */
diff --git a/sys/dev/ath/ath_hal/ar5212/ar5212_misc.c b/sys/dev/ath/ath_hal/ar5212/ar5212_misc.c
index 7eceec3..9cbf320 100644
--- a/sys/dev/ath/ath_hal/ar5212/ar5212_misc.c
+++ b/sys/dev/ath/ath_hal/ar5212/ar5212_misc.c
@@ -1233,3 +1233,13 @@ ar5212IsFastClockEnabled(struct ath_hal *ah)
{
return AH_FALSE;
}
+
+/*
+ * Return what percentage of the extension channel is busy.
+ * This is always disabled for AR5212 series NICs.
+ */
+uint32_t
+ar5212Get11nExtBusy(struct ath_hal *ah)
+{
+ return 0;
+}
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index 48fb917..193e49e 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -2695,6 +2695,8 @@ ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
int error;
bf = avp->av_bcbuf;
+ DPRINTF(sc, ATH_DEBUG_NODE, "%s: bf_m=%p, bf_node=%p\n",
+ __func__, bf->bf_m, bf->bf_node);
if (bf->bf_m != NULL) {
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
m_freem(bf->bf_m);
@@ -3152,6 +3154,8 @@ static void
ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
{
+ DPRINTF(sc, ATH_DEBUG_NODE, "%s: free bf=%p, bf_m=%p, bf_node=%p\n",
+ __func__, bf, bf->bf_m, bf->bf_node);
if (bf->bf_m != NULL) {
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
m_freem(bf->bf_m);
@@ -3173,6 +3177,9 @@ ath_beacon_free(struct ath_softc *sc)
struct ath_buf *bf;
TAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
+ DPRINTF(sc, ATH_DEBUG_NODE,
+ "%s: free bf=%p, bf_m=%p, bf_node=%p\n",
+ __func__, bf, bf->bf_m, bf->bf_node);
if (bf->bf_m != NULL) {
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
m_freem(bf->bf_m);
@@ -3976,7 +3983,7 @@ ath_rx_proc(struct ath_softc *sc, int resched)
int len, type, ngood;
HAL_STATUS status;
int16_t nf;
- u_int64_t tsf;
+ u_int64_t tsf, rstamp;
int npkts = 0;
/* XXX we must not hold the ATH_LOCK here */
@@ -4047,6 +4054,12 @@ ath_rx_proc(struct ath_softc *sc, int resched)
TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
npkts++;
+ /*
+ * Calculate the correct 64 bit TSF given
+ * the TSF64 register value and rs_tstamp.
+ */
+ rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf);
+
/* These aren't specifically errors */
#ifdef AH_SUPPORT_AR5416
if (rs->rs_flags & HAL_RX_GI)
@@ -4078,7 +4091,7 @@ ath_rx_proc(struct ath_softc *sc, int resched)
bf->bf_dmamap,
BUS_DMASYNC_POSTREAD);
/* Now pass it to the radar processing code */
- ath_dfs_process_phy_err(sc, mtod(m, char *), tsf, rs);
+ ath_dfs_process_phy_err(sc, mtod(m, char *), rstamp, rs);
}
/* Be suitably paranoid about receiving phy errors out of the stats array bounds */
@@ -4142,7 +4155,7 @@ rx_error:
len = rs->rs_datalen;
m->m_pkthdr.len = m->m_len = len;
bf->bf_m = NULL;
- ath_rx_tap(ifp, m, rs, tsf, nf);
+ ath_rx_tap(ifp, m, rs, rstamp, nf);
ieee80211_radiotap_rx_all(ic, m);
m_freem(m);
}
@@ -4239,7 +4252,7 @@ rx_accept:
* noise setting is filled in above.
*/
if (ieee80211_radiotap_active(ic))
- ath_rx_tap(ifp, m, rs, tsf, nf);
+ ath_rx_tap(ifp, m, rs, rstamp, nf);
/*
* From this point on we assume the frame is at least
@@ -6679,7 +6692,14 @@ ath_dfs_tasklet(void *p, int npending)
*/
if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) {
/* DFS event found, initiate channel change */
+ /*
+ * XXX doesn't currently tell us whether the event
+ * XXX was found in the primary or extension
+ * XXX channel!
+ */
+ IEEE80211_LOCK(ic);
ieee80211_dfs_notify_radar(ic, sc->sc_curchan);
+ IEEE80211_UNLOCK(ic);
}
}
diff --git a/sys/dev/ath/if_athdfs.h b/sys/dev/ath/if_athdfs.h
index a3f27f9..215b8ca 100644
--- a/sys/dev/ath/if_athdfs.h
+++ b/sys/dev/ath/if_athdfs.h
@@ -33,7 +33,7 @@
extern int ath_dfs_attach(struct ath_softc *sc);
extern int ath_dfs_detach(struct ath_softc *sc);
-extern void ath_dfs_radar_enable(struct ath_softc *,
+extern int ath_dfs_radar_enable(struct ath_softc *,
struct ieee80211_channel *chan);
extern void ath_dfs_process_phy_err(struct ath_softc *sc, const char *buf,
uint64_t tsf, struct ath_rx_status *rxstat);
diff --git a/sys/dev/ciss/ciss.c b/sys/dev/ciss/ciss.c
index 994daa4..37af516 100644
--- a/sys/dev/ciss/ciss.c
+++ b/sys/dev/ciss/ciss.c
@@ -329,7 +329,13 @@ static struct
{ 0x103C, 0x3249, CISS_BOARD_SA5, "HP Smart Array P812" },
{ 0x103C, 0x324A, CISS_BOARD_SA5, "HP Smart Array P712m" },
{ 0x103C, 0x324B, CISS_BOARD_SA5, "HP Smart Array" },
- { 0x103C, 0x3351, CISS_BOARD_SA5, "HP Smart Array P420" },
+ { 0x103C, 0x3350, CISS_BOARD_SA5, "HP Smart Array P222" },
+ { 0x103C, 0x3351, CISS_BOARD_SA5, "HP Smart Array P420" },
+ { 0x103C, 0x3352, CISS_BOARD_SA5, "HP Smart Array P421" },
+ { 0x103C, 0x3353, CISS_BOARD_SA5, "HP Smart Array P822" },
+ { 0x103C, 0x3354, CISS_BOARD_SA5, "HP Smart Array P420i" },
+ { 0x103C, 0x3355, CISS_BOARD_SA5, "HP Smart Array P220i" },
+ { 0x103C, 0x3356, CISS_BOARD_SA5, "HP Smart Array P721m" },
{ 0, 0, 0, NULL }
};
@@ -4536,7 +4542,8 @@ ciss_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thre
pis->bus = pci_get_bus(sc->ciss_dev);
pis->dev_fn = pci_get_slot(sc->ciss_dev);
- pis->board_id = pci_get_devid(sc->ciss_dev);
+ pis->board_id = (pci_get_subvendor(sc->ciss_dev) << 16) |
+ pci_get_subdevice(sc->ciss_dev);
break;
}
diff --git a/sys/dev/e1000/README b/sys/dev/e1000/README
index f0816f0..88879e1 100644
--- a/sys/dev/e1000/README
+++ b/sys/dev/e1000/README
@@ -354,6 +354,7 @@ Known Limitations
include:
Planex FXG-08TE
I-O Data ETG-SH8
+ Netgear GS105v3
The driver can be compiled with the following changes:
diff --git a/sys/dev/fe/if_fe.c b/sys/dev/fe/if_fe.c
index 9fe1f2c..7bfca2a 100644
--- a/sys/dev/fe/if_fe.c
+++ b/sys/dev/fe/if_fe.c
@@ -2255,6 +2255,7 @@ fe_medchange (struct ifnet *ifp)
static void
fe_medstat (struct ifnet *ifp, struct ifmediareq *ifmr)
{
- (void)ifp;
- (void)ifmr;
+ struct fe_softc *sc = ifp->if_softc;
+
+ ifmr->ifm_active = sc->media.ifm_media;
}
diff --git a/sys/dev/firewire/sbp_targ.c b/sys/dev/firewire/sbp_targ.c
index 9be31bc4..3a74389 100644
--- a/sys/dev/firewire/sbp_targ.c
+++ b/sys/dev/firewire/sbp_targ.c
@@ -62,6 +62,7 @@
#include <cam/cam_debug.h>
#include <cam/cam_periph.h>
#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
#define SBP_TARG_RECV_LEN 8
#define MAX_INITIATORS 8
@@ -186,6 +187,21 @@ struct morb4 {
#endif
};
+
+/*
+ * Urestricted page table format
+ * states that the segment length
+ * and high base addr are in the first
+ * 32 bits and the base low is in
+ * the second
+ */
+struct unrestricted_page_table_fmt {
+ uint16_t segment_len;
+ uint16_t segment_base_high;
+ uint32_t segment_base_low;
+};
+
+
struct orb_info {
struct sbp_targ_softc *sc;
struct fw_device *fwdev;
@@ -208,7 +224,10 @@ struct orb_info {
struct corb4 orb4;
STAILQ_ENTRY(orb_info) link;
uint32_t orb[8];
- uint32_t *page_table;
+ struct unrestricted_page_table_fmt *page_table;
+ struct unrestricted_page_table_fmt *cur_pte;
+ struct unrestricted_page_table_fmt *last_pte;
+ uint32_t last_block_read;
struct sbp_status status;
};
@@ -219,6 +238,7 @@ static char *orb_fun_name[] = {
static void sbp_targ_recv(struct fw_xfer *);
static void sbp_targ_fetch_orb(struct sbp_targ_softc *, struct fw_device *,
uint16_t, uint32_t, struct sbp_targ_login *, int);
+static void sbp_targ_xfer_pt(struct orb_info *);
static void sbp_targ_abort(struct sbp_targ_softc *, struct orb_info *);
static void
@@ -252,13 +272,19 @@ sbp_targ_dealloc_login(struct sbp_targ_login *login)
}
for (orbi = STAILQ_FIRST(&login->orbs); orbi != NULL; orbi = next) {
next = STAILQ_NEXT(orbi, link);
+ if (debug)
+ printf("%s: free orbi %p\n", __func__, orbi);
free(orbi, M_SBP_TARG);
+ orbi = NULL;
}
callout_stop(&login->hold_callout);
STAILQ_REMOVE(&login->lstate->logins, login, sbp_targ_login, link);
login->lstate->sc->logins[login->id] = NULL;
+ if (debug)
+ printf("%s: free login %p\n", __func__, login);
free((void *)login, M_SBP_TARG);
+ login = NULL;
}
static void
@@ -361,20 +387,26 @@ sbp_targ_find_devs(struct sbp_targ_softc *sc, union ccb *ccb,
if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD &&
ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
*lstate = sc->black_hole;
+ if (debug)
+ printf("setting black hole for this target id(%d)\n", ccb->ccb_h.target_id);
return (CAM_REQ_CMP);
}
- if (ccb->ccb_h.target_id != 0)
- return (CAM_TID_INVALID);
-
lun = ccb->ccb_h.target_lun;
if (lun >= MAX_LUN)
return (CAM_LUN_INVALID);
*lstate = sc->lstate[lun];
- if (notfound_failure != 0 && *lstate == NULL)
+ if (notfound_failure != 0 && *lstate == NULL) {
+ if (debug)
+ printf("%s: lstate for lun is invalid, target(%d), lun(%d)\n",
+ __func__, ccb->ccb_h.target_id, lun);
return (CAM_PATH_INVALID);
+ } else
+ if (debug)
+ printf("%s: setting lstate for tgt(%d) lun(%d)\n",
+ __func__,ccb->ccb_h.target_id, lun);
return (CAM_REQ_CMP);
}
@@ -411,11 +443,18 @@ sbp_targ_en_lun(struct sbp_targ_softc *sc, union ccb *ccb)
printf("Couldn't allocate lstate\n");
ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
return;
- }
- if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD)
+ } else {
+ if (debug)
+ printf("%s: malloc'd lstate %p\n",__func__, lstate);
+ }
+ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD) {
sc->black_hole = lstate;
- else
+ if (debug)
+ printf("Blackhole set due to target id == %d\n",
+ ccb->ccb_h.target_id);
+ } else
sc->lstate[ccb->ccb_h.target_lun] = lstate;
+
memset(lstate, 0, sizeof(*lstate));
lstate->sc = sc;
status = xpt_create_path(&lstate->path, /*periph*/NULL,
@@ -424,6 +463,7 @@ sbp_targ_en_lun(struct sbp_targ_softc *sc, union ccb *ccb)
xpt_path_lun_id(ccb->ccb_h.path));
if (status != CAM_REQ_CMP) {
free(lstate, M_SBP_TARG);
+ lstate = NULL;
xpt_print_path(ccb->ccb_h.path);
printf("Couldn't allocate path\n");
ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
@@ -443,6 +483,7 @@ sbp_targ_en_lun(struct sbp_targ_softc *sc, union ccb *ccb)
if (lstate == NULL) {
ccb->ccb_h.status = CAM_LUN_INVALID;
+ printf("Invalid lstate for this target\n");
return;
}
ccb->ccb_h.status = CAM_REQ_CMP;
@@ -458,6 +499,7 @@ sbp_targ_en_lun(struct sbp_targ_softc *sc, union ccb *ccb)
}
if (ccb->ccb_h.status != CAM_REQ_CMP) {
+ printf("status != CAM_REQ_CMP\n");
return;
}
@@ -475,7 +517,10 @@ sbp_targ_en_lun(struct sbp_targ_softc *sc, union ccb *ccb)
sc->black_hole = NULL;
else
sc->lstate[ccb->ccb_h.target_lun] = NULL;
+ if (debug)
+ printf("%s: free lstate %p\n", __func__, lstate);
free(lstate, M_SBP_TARG);
+ lstate = NULL;
/* bus reset */
sc->fd.fc->ibr(sc->fd.fc);
@@ -538,7 +583,7 @@ sbp_targ_get_orb_info(struct sbp_targ_lstate *lstate,
if (orbi->orb_lo == tag_id)
goto found;
printf("%s: orb not found tag_id=0x%08x init_id=%d\n",
- __func__, tag_id, init_id);
+ __func__, tag_id, init_id);
return (NULL);
found:
return (orbi);
@@ -559,12 +604,13 @@ sbp_targ_abort(struct sbp_targ_softc *sc, struct orb_info *orbi)
xpt_done(orbi->ccb);
orbi->ccb = NULL;
}
-#if 0
if (orbi->state <= ORBI_STATUS_ATIO) {
sbp_targ_remove_orb_info_locked(orbi->login, orbi);
+ if (debug)
+ printf("%s: free orbi %p\n", __func__, orbi);
free(orbi, M_SBP_TARG);
+ orbi = NULL;
} else
-#endif
orbi->state = ORBI_STATUS_ABORTED;
}
}
@@ -576,12 +622,21 @@ sbp_targ_free_orbi(struct fw_xfer *xfer)
{
struct orb_info *orbi;
- orbi = (struct orb_info *)xfer->sc;
if (xfer->resp != 0) {
/* XXX */
printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
}
+ orbi = (struct orb_info *)xfer->sc;
+ if ( orbi->page_table != NULL ) {
+ if (debug)
+ printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table);
+ free(orbi->page_table, M_SBP_TARG);
+ orbi->page_table = NULL;
+ }
+ if (debug)
+ printf("%s: free orbi %p\n", __func__, orbi);
free(orbi, M_SBP_TARG);
+ orbi = NULL;
fw_xfer_free(xfer);
}
@@ -595,7 +650,7 @@ sbp_targ_status_FIFO(struct orb_info *orbi,
sbp_targ_remove_orb_info(orbi->login, orbi);
xfer = fwmem_write_block(orbi->fwdev, (void *)orbi,
- /*spd*/2, fifo_hi, fifo_lo,
+ /*spd*/FWSPD_S400, fifo_hi, fifo_lo,
sizeof(uint32_t) * (orbi->status.len + 1), (char *)&orbi->status,
sbp_targ_free_orbi);
@@ -605,6 +660,10 @@ sbp_targ_status_FIFO(struct orb_info *orbi,
}
}
+/*
+ * Generate the appropriate CAM status for the
+ * target.
+ */
static void
sbp_targ_send_status(struct orb_info *orbi, union ccb *ccb)
{
@@ -621,6 +680,8 @@ sbp_targ_send_status(struct orb_info *orbi, union ccb *ccb)
sbp_status->status = 0; /* XXX */
sbp_status->dead = 0; /* XXX */
+ ccb->ccb_h.status= CAM_REQ_CMP;
+
switch (ccb->csio.scsi_status) {
case SCSI_STATUS_OK:
if (debug)
@@ -628,8 +689,15 @@ sbp_targ_send_status(struct orb_info *orbi, union ccb *ccb)
sbp_status->len = 1;
break;
case SCSI_STATUS_CHECK_COND:
+ if (debug)
+ printf("%s: STATUS SCSI_STATUS_CHECK_COND\n", __func__);
+ goto process_scsi_status;
case SCSI_STATUS_BUSY:
+ if (debug)
+ printf("%s: STATUS SCSI_STATUS_BUSY\n", __func__);
+ goto process_scsi_status;
case SCSI_STATUS_CMD_TERMINATED:
+process_scsi_status:
{
struct sbp_cmd_status *sbp_cmd_status;
struct scsi_sense_data *sense;
@@ -640,9 +708,6 @@ sbp_targ_send_status(struct orb_info *orbi, union ccb *ccb)
int64_t sinfo;
int sense_len;
- if (debug)
- printf("%s: STATUS %d\n", __func__,
- ccb->csio.scsi_status);
sbp_cmd_status = (struct sbp_cmd_status *)&sbp_status->data[0];
sbp_cmd_status->status = ccb->csio.scsi_status;
sense = &ccb->csio.sense_data;
@@ -734,6 +799,7 @@ sbp_targ_send_status(struct orb_info *orbi, union ccb *ccb)
if (scsi_get_sks(sense, sense_len, sks) == 0) {
bcopy(sks, &sbp_cmd_status->s_keydep[0], sizeof(sks));
sbp_status->len = 5;
+ ccb->ccb_h.status |= CAM_SENT_SENSE;
}
break;
@@ -743,13 +809,20 @@ sbp_targ_send_status(struct orb_info *orbi, union ccb *ccb)
sbp_status->status);
}
- if (orbi->page_table != NULL)
- free(orbi->page_table, M_SBP_TARG);
sbp_targ_status_FIFO(orbi,
orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1);
}
+/*
+ * Invoked as a callback handler from fwmem_read/write_block
+ *
+ * Process read/write of initiator address space
+ * completion and pass status onto the backend target.
+ * If this is a partial read/write for a CCB then
+ * we decrement the orbi's refcount to indicate
+ * the status of the read/write is complete
+ */
static void
sbp_targ_cam_done(struct fw_xfer *xfer)
{
@@ -758,7 +831,7 @@ sbp_targ_cam_done(struct fw_xfer *xfer)
orbi = (struct orb_info *)xfer->sc;
- if (debug > 1)
+ if (debug)
printf("%s: resp=%d refcount=%d\n", __func__,
xfer->resp, orbi->refcount);
@@ -779,13 +852,26 @@ sbp_targ_cam_done(struct fw_xfer *xfer)
if (debug)
printf("%s: orbi aborted\n", __func__);
sbp_targ_remove_orb_info(orbi->login, orbi);
- if (orbi->page_table != NULL)
+ if (orbi->page_table != NULL) {
+ if (debug)
+ printf("%s: free orbi->page_table %p\n",
+ __func__, orbi->page_table);
free(orbi->page_table, M_SBP_TARG);
+ }
+ if (debug)
+ printf("%s: free orbi %p\n", __func__, orbi);
free(orbi, M_SBP_TARG);
- } else if (orbi->status.resp == 0) {
- if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0)
+ orbi = NULL;
+ } else if (orbi->status.resp == ORBI_STATUS_NONE) {
+ if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
+ if (debug)
+ printf("%s: CAM_SEND_STATUS set %0x\n", __func__, ccb->ccb_h.flags);
sbp_targ_send_status(orbi, ccb);
- ccb->ccb_h.status = CAM_REQ_CMP;
+ } else {
+ if (debug)
+ printf("%s: CAM_SEND_STATUS not set %0x\n", __func__, ccb->ccb_h.flags);
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ }
SBP_LOCK(orbi->sc);
xpt_done(ccb);
SBP_UNLOCK(orbi->sc);
@@ -855,6 +941,13 @@ sbp_targ_abort_ccb(struct sbp_targ_softc *sc, union ccb *ccb)
return (CAM_PATH_INVALID);
}
+/*
+ * directly execute a read or write to the initiator
+ * address space and set hand(sbp_targ_cam_done) to
+ * process the completion from the SIM to the target.
+ * set orbi->refcount to inidicate that a read/write
+ * is inflight to/from the initiator.
+ */
static void
sbp_targ_xfer_buf(struct orb_info *orbi, u_int offset,
uint16_t dst_hi, uint32_t dst_lo, u_int size,
@@ -874,16 +967,21 @@ sbp_targ_xfer_buf(struct orb_info *orbi, u_int offset,
len = MIN(size, 2048 /* XXX */);
size -= len;
orbi->refcount ++;
- if (ccb_dir == CAM_DIR_OUT)
+ if (ccb_dir == CAM_DIR_OUT) {
+ if (debug)
+ printf("%s: CAM_DIR_OUT --> read block in?\n",__func__);
xfer = fwmem_read_block(orbi->fwdev,
- (void *)orbi, /*spd*/2,
+ (void *)orbi, /*spd*/FWSPD_S400,
dst_hi, dst_lo + off, len,
ptr + off, hand);
- else
+ } else {
+ if (debug)
+ printf("%s: CAM_DIR_IN --> write block out?\n",__func__);
xfer = fwmem_write_block(orbi->fwdev,
- (void *)orbi, /*spd*/2,
+ (void *)orbi, /*spd*/FWSPD_S400,
dst_hi, dst_lo + off, len,
ptr + off, hand);
+ }
if (xfer == NULL) {
printf("%s: xfer == NULL", __func__);
/* XXX what should we do?? */
@@ -897,18 +995,22 @@ static void
sbp_targ_pt_done(struct fw_xfer *xfer)
{
struct orb_info *orbi;
- union ccb *ccb;
- u_int i, offset, res, len;
- uint32_t t1, t2, *p;
+ struct unrestricted_page_table_fmt *pt;
+ uint32_t i;
orbi = (struct orb_info *)xfer->sc;
- ccb = orbi->ccb;
+
if (orbi->state == ORBI_STATUS_ABORTED) {
if (debug)
printf("%s: orbi aborted\n", __func__);
sbp_targ_remove_orb_info(orbi->login, orbi);
+ if (debug) {
+ printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table);
+ printf("%s: free orbi %p\n", __func__, orbi);
+ }
free(orbi->page_table, M_SBP_TARG);
free(orbi, M_SBP_TARG);
+ orbi = NULL;
fw_xfer_free(xfer);
return;
}
@@ -920,60 +1022,158 @@ sbp_targ_pt_done(struct fw_xfer *xfer)
orbi->status.len = 1;
sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
+ if (debug)
+ printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table);
+
sbp_targ_status_FIFO(orbi,
orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1);
free(orbi->page_table, M_SBP_TARG);
+ orbi->page_table = NULL;
fw_xfer_free(xfer);
return;
}
- res = ccb->csio.dxfer_len;
- offset = 0;
- if (debug)
- printf("%s: dxfer_len=%d\n", __func__, res);
- orbi->refcount ++;
- for (p = orbi->page_table, i = orbi->orb4.data_size; i > 0; i --) {
- t1 = ntohl(*p++);
- t2 = ntohl(*p++);
- if (debug > 1)
- printf("page_table: %04x:%08x %d\n",
- t1 & 0xffff, t2, t1>>16);
- len = MIN(t1 >> 16, res);
- res -= len;
- sbp_targ_xfer_buf(orbi, offset, t1 & 0xffff, t2, len,
- sbp_targ_cam_done);
- offset += len;
- if (res == 0)
- break;
+ orbi->refcount++;
+/*
+ * Set endianess here so we don't have
+ * to deal with is later
+ */
+ for (i = 0, pt = orbi->page_table; i < orbi->orb4.data_size; i++, pt++) {
+ pt->segment_len = ntohs(pt->segment_len);
+ if (debug)
+ printf("%s:segment_len = %u\n", __func__,pt->segment_len);
+ pt->segment_base_high = ntohs(pt->segment_base_high);
+ pt->segment_base_low = ntohl(pt->segment_base_low);
}
- orbi->refcount --;
+
+ sbp_targ_xfer_pt(orbi);
+
+ orbi->refcount--;
if (orbi->refcount == 0)
printf("%s: refcount == 0\n", __func__);
- if (res !=0)
- /* XXX handle res != 0 case */
- printf("%s: page table is too small(%d)\n", __func__, res);
fw_xfer_free(xfer);
return;
}
+static void sbp_targ_xfer_pt(struct orb_info *orbi)
+{
+ union ccb *ccb;
+ uint32_t res, offset, len;
+
+ ccb = orbi->ccb;
+ if (debug)
+ printf("%s: dxfer_len=%d\n", __func__, ccb->csio.dxfer_len);
+ res = ccb->csio.dxfer_len;
+ /*
+ * If the page table required multiple CTIO's to
+ * complete, then cur_pte is non NULL
+ * and we need to start from the last position
+ * If this is the first pass over a page table
+ * then we just start at the beginning of the page
+ * table.
+ *
+ * Parse the unrestricted page table and figure out where we need
+ * to shove the data from this read request.
+ */
+ for (offset = 0, len = 0; (res != 0) && (orbi->cur_pte < orbi->last_pte); offset += len) {
+ len = MIN(orbi->cur_pte->segment_len, res);
+ res -= len;
+ if (debug)
+ printf("%s:page_table: %04x:%08x segment_len(%u) res(%u) len(%u)\n",
+ __func__, orbi->cur_pte->segment_base_high,
+ orbi->cur_pte->segment_base_low,
+ orbi->cur_pte->segment_len,
+ res, len);
+ sbp_targ_xfer_buf(orbi, offset,
+ orbi->cur_pte->segment_base_high,
+ orbi->cur_pte->segment_base_low,
+ len, sbp_targ_cam_done);
+ /*
+ * If we have only written partially to
+ * this page table, then we need to save
+ * our position for the next CTIO. If we
+ * have completed the page table, then we
+ * are safe to move on to the next entry.
+ */
+ if (len == orbi->cur_pte->segment_len) {
+ orbi->cur_pte++;
+ } else {
+ uint32_t saved_base_low;
+
+ /* Handle transfers that cross a 4GB boundary. */
+ saved_base_low = orbi->cur_pte->segment_base_low;
+ orbi->cur_pte->segment_base_low += len;
+ if (orbi->cur_pte->segment_base_low < saved_base_low)
+ orbi->cur_pte->segment_base_high++;
+
+ orbi->cur_pte->segment_len -= len;
+ }
+ }
+ if (debug) {
+ printf("%s: base_low(%08x) page_table_off(%p) last_block(%u)\n",
+ __func__, orbi->cur_pte->segment_base_low,
+ orbi->cur_pte, orbi->last_block_read);
+ }
+ if (res != 0)
+ printf("Warning - short pt encountered. "
+ "Could not transfer all data.\n");
+ return;
+}
+
+/*
+ * Create page table in local memory
+ * and transfer it from the initiator
+ * in order to know where we are supposed
+ * to put the data.
+ */
+
static void
sbp_targ_fetch_pt(struct orb_info *orbi)
{
struct fw_xfer *xfer;
- if (debug)
- printf("%s: page_table_size=%d\n",
- __func__, orbi->orb4.data_size);
- orbi->page_table = malloc(orbi->orb4.data_size*8, M_SBP_TARG, M_NOWAIT);
- if (orbi->page_table == NULL)
- goto error;
- xfer = fwmem_read_block(orbi->fwdev, (void *)orbi, /*spd*/2,
- orbi->data_hi, orbi->data_lo, orbi->orb4.data_size*8,
- (void *)orbi->page_table, sbp_targ_pt_done);
- if (xfer != NULL)
+ /*
+ * Pull in page table from initiator
+ * and setup for data from our
+ * backend device.
+ */
+ if (orbi->page_table == NULL) {
+ orbi->page_table = malloc(orbi->orb4.data_size*
+ sizeof(struct unrestricted_page_table_fmt),
+ M_SBP_TARG, M_NOWAIT|M_ZERO);
+ if (orbi->page_table == NULL)
+ goto error;
+ orbi->cur_pte = orbi->page_table;
+ orbi->last_pte = orbi->page_table + orbi->orb4.data_size;
+ orbi->last_block_read = orbi->orb4.data_size;
+ if (debug && orbi->page_table != NULL)
+ printf("%s: malloc'd orbi->page_table(%p), orb4.data_size(%u)\n",
+ __func__, orbi->page_table, orbi->orb4.data_size);
+
+ xfer = fwmem_read_block(orbi->fwdev, (void *)orbi, /*spd*/FWSPD_S400,
+ orbi->data_hi, orbi->data_lo, orbi->orb4.data_size*
+ sizeof(struct unrestricted_page_table_fmt),
+ (void *)orbi->page_table, sbp_targ_pt_done);
+
+ if (xfer != NULL)
+ return;
+ } else {
+ /*
+ * This is a CTIO for a page table we have
+ * already malloc'd, so just directly invoke
+ * the xfer function on the orbi.
+ */
+ sbp_targ_xfer_pt(orbi);
return;
+ }
error:
orbi->ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ if (debug)
+ printf("%s: free orbi->page_table %p due to xfer == NULL\n", __func__, orbi->page_table);
+ if (orbi->page_table != NULL) {
+ free(orbi->page_table, M_SBP_TARG);
+ orbi->page_table = NULL;
+ }
xpt_done(orbi->ccb);
return;
}
@@ -1016,6 +1216,8 @@ sbp_targ_action1(struct cam_sim *sim, union ccb *ccb)
if (debug)
printf("%s: ctio aborted\n", __func__);
sbp_targ_remove_orb_info_locked(orbi->login, orbi);
+ if (debug)
+ printf("%s: free orbi %p\n", __func__, orbi);
free(orbi, M_SBP_TARG);
ccb->ccb_h.status = CAM_REQ_ABORTED;
xpt_done(ccb);
@@ -1051,17 +1253,16 @@ sbp_targ_action1(struct cam_sim *sim, union ccb *ccb)
}
/* Sanity check */
- if (ccb_dir != CAM_DIR_NONE &&
- orbi->orb4.data_size != ccb->csio.dxfer_len)
- printf("%s: data_size(%d) != dxfer_len(%d)\n",
- __func__, orbi->orb4.data_size,
- ccb->csio.dxfer_len);
-
- if (ccb_dir != CAM_DIR_NONE)
+ if (ccb_dir != CAM_DIR_NONE) {
sbp_targ_xfer_buf(orbi, 0, orbi->data_hi,
orbi->data_lo,
MIN(orbi->orb4.data_size, ccb->csio.dxfer_len),
sbp_targ_cam_done);
+ if ( orbi->orb4.data_size > ccb->csio.dxfer_len ) {
+ orbi->data_lo += ccb->csio.dxfer_len;
+ orbi->orb4.data_size -= ccb->csio.dxfer_len;
+ }
+ }
if (ccb_dir == CAM_DIR_NONE) {
if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
@@ -1125,7 +1326,8 @@ sbp_targ_action1(struct cam_sim *sim, union ccb *ccb)
cpi->target_sprt = PIT_PROCESSOR
| PIT_DISCONNECT
| PIT_TERM_IO;
- cpi->hba_misc = PIM_NOBUSRESET | PIM_NO_6_BYTE;
+ cpi->transport = XPORT_SPI; /* FIXME add XPORT_FW type to cam */
+ cpi->hba_misc = PIM_NOBUSRESET | PIM_NOBUSRESET;
cpi->hba_eng_cnt = 0;
cpi->max_target = 7; /* XXX */
cpi->max_lun = MAX_LUN - 1;
@@ -1163,10 +1365,42 @@ sbp_targ_action1(struct cam_sim *sim, union ccb *ccb)
xpt_done(ccb);
break;
}
+#ifdef CAM_NEW_TRAN_CODE
+ case XPT_SET_TRAN_SETTINGS:
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ break;
+ case XPT_GET_TRAN_SETTINGS:
+ {
+ struct ccb_trans_settings *cts = &ccb->cts;
+ struct ccb_trans_settings_scsi *scsi =
+ &cts->proto_specific.scsi;
+ struct ccb_trans_settings_spi *spi =
+ &cts->xport_specific.spi;
+
+ cts->protocol = PROTO_SCSI;
+ cts->protocol_version = SCSI_REV_2;
+ cts->transport = XPORT_FW; /* should have a FireWire */
+ cts->transport_version = 2;
+ spi->valid = CTS_SPI_VALID_DISC;
+ spi->flags = CTS_SPI_FLAGS_DISC_ENB;
+ scsi->valid = CTS_SCSI_VALID_TQ;
+ scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
+#if 0
+ printf("%s:%d:%d XPT_GET_TRAN_SETTINGS:\n",
+ device_get_nameunit(sc->fd.dev),
+ ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
+#endif
+ cts->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ break;
+ }
+#endif
+
default:
- printf("%s: unknown function %d\n",
+ printf("%s: unknown function 0x%x\n",
__func__, ccb->ccb_h.func_code);
- ccb->ccb_h.status = CAM_REQ_INVALID;
+ ccb->ccb_h.status = CAM_PROVIDE_FAIL;
xpt_done(ccb);
break;
}
@@ -1245,7 +1479,7 @@ sbp_targ_cmd_handler(struct fw_xfer *xfer)
atio->ccb_h.target_id = 0; /* XXX */
atio->ccb_h.target_lun = orbi->login->lstate->lun;
atio->sense_len = 0;
- atio->tag_action = 1; /* XXX */
+ atio->tag_action = MSG_SIMPLE_TASK;
atio->tag_id = orbi->orb_lo;
atio->init_id = orbi->login->id;
@@ -1429,7 +1663,7 @@ sbp_targ_mgm_handler(struct fw_xfer *xfer)
login->loginres.recon_hold = htons(login->hold_sec);
STAILQ_INSERT_TAIL(&lstate->logins, login, link);
- fwmem_write_block(orbi->fwdev, NULL, /*spd*/2, orb[2], orb[3],
+ fwmem_write_block(orbi->fwdev, NULL, /*spd*/FWSPD_S400, orb[2], orb[3],
sizeof(struct sbp_login_res), (void *)&login->loginres,
fw_asy_callback_free);
/* XXX return status after loginres is successfully written */
@@ -1515,10 +1749,11 @@ sbp_targ_fetch_orb(struct sbp_targ_softc *sc, struct fw_device *fwdev,
orbi->orb_lo = orb_lo;
orbi->status.orb_hi = htons(orb_hi);
orbi->status.orb_lo = htonl(orb_lo);
+ orbi->page_table = NULL;
switch (mode) {
case FETCH_MGM:
- fwmem_read_block(fwdev, (void *)orbi, /*spd*/2, orb_hi, orb_lo,
+ fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo,
sizeof(uint32_t) * 8, &orbi->orb[0],
sbp_targ_mgm_handler);
break;
@@ -1545,14 +1780,14 @@ sbp_targ_fetch_orb(struct sbp_targ_softc *sc, struct fw_device *fwdev,
SLIST_REMOVE_HEAD(&login->lstate->accept_tios, sim_links.sle);
STAILQ_INSERT_TAIL(&login->orbs, orbi, link);
SBP_UNLOCK(sc);
- fwmem_read_block(fwdev, (void *)orbi, /*spd*/2, orb_hi, orb_lo,
+ fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo,
sizeof(uint32_t) * 8, &orbi->orb[0],
sbp_targ_cmd_handler);
break;
case FETCH_POINTER:
orbi->state = ORBI_STATUS_POINTER;
login->flags |= F_LINK_ACTIVE;
- fwmem_read_block(fwdev, (void *)orbi, /*spd*/2, orb_hi, orb_lo,
+ fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo,
sizeof(uint32_t) * 2, &orbi->orb[0],
sbp_targ_pointer_handler);
break;
@@ -1709,7 +1944,7 @@ done:
if (rtcode != 0)
printf("%s: rtcode = %d\n", __func__, rtcode);
sfp = &xfer->send.hdr;
- xfer->send.spd = 2; /* XXX */
+ xfer->send.spd = FWSPD_S400;
xfer->hand = sbp_targ_resp_callback;
sfp->mode.wres.dst = fp->mode.wreqb.src;
sfp->mode.wres.tlrt = fp->mode.wreqb.tlrt;
diff --git a/sys/dev/hwpmc/hwpmc_x86.c b/sys/dev/hwpmc/hwpmc_x86.c
index 8c98983..72ed518 100644
--- a/sys/dev/hwpmc/hwpmc_x86.c
+++ b/sys/dev/hwpmc/hwpmc_x86.c
@@ -250,7 +250,7 @@ pmc_md_initialize()
return (NULL);
/* disallow sampling if we do not have an LAPIC */
- if (!lapic_enable_pmc())
+ if (md != NULL && !lapic_enable_pmc())
for (i = 1; i < md->pmd_nclass; i++)
md->pmd_classdep[i].pcd_caps &= ~PMC_CAP_INTERRUPT;
diff --git a/sys/dev/iwn/if_iwn.c b/sys/dev/iwn/if_iwn.c
index 3786456..423d71d 100644
--- a/sys/dev/iwn/if_iwn.c
+++ b/sys/dev/iwn/if_iwn.c
@@ -2813,11 +2813,13 @@ iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes,
bitmap |= 1ULL << bit;
}
tap = sc->qid2tap[qid];
- tid = WME_AC_TO_TID(tap->txa_ac);
- wn = (void *)tap->txa_ni;
- wn->agg[tid].bitmap = bitmap;
- wn->agg[tid].startidx = start;
- wn->agg[tid].nframes = nframes;
+ if (tap != NULL) {
+ tid = WME_AC_TO_TID(tap->txa_ac);
+ wn = (void *)tap->txa_ni;
+ wn->agg[tid].bitmap = bitmap;
+ wn->agg[tid].startidx = start;
+ wn->agg[tid].nframes = nframes;
+ }
seqno = le32toh(*(status + nframes)) & 0xfff;
for (lastidx = (seqno & 0xff); ring->read != lastidx;) {
diff --git a/sys/dev/ixgbe/ixgbe.c b/sys/dev/ixgbe/ixgbe.c
index b4f11bd..bf7e3aa 100644
--- a/sys/dev/ixgbe/ixgbe.c
+++ b/sys/dev/ixgbe/ixgbe.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2011, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,7 @@ int ixgbe_display_debug_stats = 0;
/*********************************************************************
* Driver version
*********************************************************************/
-char ixgbe_driver_version[] = "2.3.11";
+char ixgbe_driver_version[] = "2.4.5";
/*********************************************************************
* PCI Device ID Table
@@ -81,6 +81,8 @@ static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -153,6 +155,7 @@ static void ixgbe_refresh_mbufs(struct rx_ring *, int);
static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
+static int ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS);
static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
struct ixgbe_dma_alloc *, int);
static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
@@ -232,7 +235,7 @@ MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
static int ixgbe_enable_aim = TRUE;
TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
-static int ixgbe_max_interrupt_rate = (8000000 / IXGBE_LOW_LATENCY);
+static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
/* How many packets rxeof tries to clean at a time */
@@ -415,19 +418,29 @@ ixgbe_attach(device_t dev)
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
+ OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "advertise_gig", CTLTYPE_INT | CTLFLAG_RW,
- adapter, 0, ixgbe_set_advertise, "I", "1G Link");
-
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
&ixgbe_enable_aim, 1, "Interrupt Moderation");
+ /*
+ ** Allow a kind of speed control by forcing the autoneg
+ ** advertised speed list to only a certain value, this
+ ** supports 1G on 82599 devices, and 100Mb on x540.
+ */
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
+ adapter, 0, ixgbe_set_advertise, "I", "Link Speed");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "ts", CTLTYPE_INT | CTLFLAG_RW, adapter,
+ 0, ixgbe_set_thermal_test, "I", "Thermal Test");
+
/* Set up the timer callout */
callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
@@ -515,9 +528,10 @@ ixgbe_attach(device_t dev)
/* Get Hardware Flow Control setting */
hw->fc.requested_mode = ixgbe_fc_full;
+ adapter->fc = hw->fc.requested_mode;
hw->fc.pause_time = IXGBE_FC_PAUSE;
hw->fc.low_water = IXGBE_FC_LO;
- hw->fc.high_water = IXGBE_FC_HI;
+ hw->fc.high_water[0] = IXGBE_FC_HI;
hw->fc.send_xon = TRUE;
error = ixgbe_init_hw(hw);
@@ -724,16 +738,20 @@ ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
return;
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+ if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) {
+ txr->queue_status |= IXGBE_QUEUE_DEPLETED;
+ break;
+ }
IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
if (m_head == NULL)
break;
if (ixgbe_xmit(txr, &m_head)) {
- if (m_head == NULL)
- break;
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+ if (m_head != NULL)
+ IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+ if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
+ txr->queue_status |= IXGBE_QUEUE_DEPLETED;
break;
}
/* Send a copy of the frame to the BPF listener */
@@ -782,11 +800,14 @@ ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
/* Which queue to use */
if ((m->m_flags & M_FLOWID) != 0)
i = m->m_pkthdr.flowid % adapter->num_queues;
+ else
+ i = curcpu % adapter->num_queues;
txr = &adapter->tx_rings[i];
que = &adapter->queues[i];
- if (IXGBE_TX_TRYLOCK(txr)) {
+ if (((txr->queue_status & IXGBE_QUEUE_DEPLETED) == 0) &&
+ IXGBE_TX_TRYLOCK(txr)) {
err = ixgbe_mq_start_locked(ifp, txr, m);
IXGBE_TX_UNLOCK(txr);
} else {
@@ -804,8 +825,9 @@ ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
struct mbuf *next;
int enqueued, err = 0;
- if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
- IFF_DRV_RUNNING || adapter->link_active == 0) {
+ if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
+ (txr->queue_status == IXGBE_QUEUE_DEPLETED) ||
+ adapter->link_active == 0) {
if (m != NULL)
err = drbr_enqueue(ifp, txr->br, m);
return (err);
@@ -837,7 +859,7 @@ ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
ixgbe_txeof(txr);
if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ txr->queue_status |= IXGBE_QUEUE_DEPLETED;
break;
}
next = drbr_dequeue(ifp, txr->br);
@@ -845,10 +867,13 @@ ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
if (enqueued > 0) {
/* Set watchdog on */
- txr->queue_status = IXGBE_QUEUE_WORKING;
+ txr->queue_status |= IXGBE_QUEUE_WORKING;
txr->watchdog_time = ticks;
}
+ if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD)
+ ixgbe_txeof(txr);
+
return (err);
}
@@ -916,8 +941,8 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
arp_ifinit(ifp, ifa);
} else
error = ether_ioctl(ifp, command, data);
- break;
#endif
+ break;
case SIOCSIFMTU:
IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
@@ -1087,10 +1112,14 @@ ixgbe_init_locked(struct adapter *adapter)
/* Enable Fan Failure Interrupt */
gpie |= IXGBE_SDP1_GPIEN;
- /* Add for Thermal detection */
+ /* Add for Module detection */
if (hw->mac.type == ixgbe_mac_82599EB)
gpie |= IXGBE_SDP2_GPIEN;
+ /* Thermal Failure Detection */
+ if (hw->mac.type == ixgbe_mac_X540)
+ gpie |= IXGBE_SDP0_GPIEN;
+
if (adapter->msix > 1) {
/* Enable Enhanced MSIX mode */
gpie |= IXGBE_GPIE_MSIX_MODE;
@@ -1196,8 +1225,12 @@ ixgbe_init_locked(struct adapter *adapter)
#ifdef IXGBE_FDIR
/* Init Flow director */
- if (hw->mac.type != ixgbe_mac_82598EB)
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ u32 hdrm = 64 << fdir_pballoc;
+
+ hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
+ }
#endif
/*
@@ -1325,7 +1358,7 @@ ixgbe_handle_que(void *context, int pending)
ixgbe_start_locked(txr, ifp);
#endif
IXGBE_TX_UNLOCK(txr);
- if (more) {
+ if (more || (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
taskqueue_enqueue(que->tq, &que->que_task);
return;
}
@@ -1405,6 +1438,7 @@ ixgbe_msix_que(void *arg)
bool more_tx, more_rx;
u32 newitr = 0;
+ ixgbe_disable_queue(adapter, que->msix);
++que->irqs;
more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
@@ -1539,6 +1573,15 @@ ixgbe_msix_link(void *arg)
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
}
+ /* Check for over temp condition */
+ if ((hw->mac.type == ixgbe_mac_X540) &&
+ (reg_eicr & IXGBE_EICR_GPI_SDP0)) {
+ device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
+ "PHY IS SHUT DOWN!!\n");
+ device_printf(adapter->dev, "System shutdown required\n");
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
+ }
+
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
return;
}
@@ -1571,6 +1614,9 @@ ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_status |= IFM_ACTIVE;
switch (adapter->link_speed) {
+ case IXGBE_LINK_SPEED_100_FULL:
+ ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
+ break;
case IXGBE_LINK_SPEED_1GB_FULL:
ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
break;
@@ -1606,7 +1652,9 @@ ixgbe_media_change(struct ifnet * ifp)
switch (IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_AUTO:
adapter->hw.phy.autoneg_advertised =
- IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL |
+ IXGBE_LINK_SPEED_10GB_FULL;
break;
default:
device_printf(adapter->dev, "Only auto media type\n");
@@ -1878,7 +1926,7 @@ ixgbe_set_multi(struct adapter *adapter)
update_ptr = mta;
ixgbe_update_mc_addr_list(&adapter->hw,
- update_ptr, mcnt, ixgbe_mc_array_itr);
+ update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
return;
}
@@ -1912,11 +1960,15 @@ ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
static void
ixgbe_local_timer(void *arg)
{
- struct adapter *adapter = arg;
+ struct adapter *adapter = arg;
device_t dev = adapter->dev;
- struct tx_ring *txr = adapter->tx_rings;
+ struct ifnet *ifp = adapter->ifp;
+ struct ix_queue *que = adapter->queues;
+ struct tx_ring *txr = adapter->tx_rings;
+ int hung, busy, paused;
mtx_assert(&adapter->core_mtx, MA_OWNED);
+ hung = busy = paused = 0;
/* Check for pluggable optics */
if (adapter->sfp_probe)
@@ -1931,21 +1983,38 @@ ixgbe_local_timer(void *arg)
* then don't do the watchdog check
*/
if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
- goto out;
+ paused = 1;
/*
- ** Check status on the TX queues for a hang
- */
- for (int i = 0; i < adapter->num_queues; i++, txr++)
- if (txr->queue_status == IXGBE_QUEUE_HUNG)
- goto hung;
+ ** Check the TX queues status
+ ** - central locked handling of OACTIVE
+ ** - watchdog only if all queues show hung
+ */
+ for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
+ if ((txr->queue_status & IXGBE_QUEUE_HUNG) &&
+ (paused == 0))
+ ++hung;
+ if (txr->queue_status & IXGBE_QUEUE_DEPLETED)
+ ++busy;
+ if ((txr->queue_status & IXGBE_QUEUE_IDLE) == 0)
+ taskqueue_enqueue(que->tq, &que->que_task);
+ }
+ /* Only truely watchdog if all queues show hung */
+ if (hung == adapter->num_queues)
+ goto watchdog;
+ /* Only turn off the stack flow when ALL are depleted */
+ if (busy == adapter->num_queues)
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ else if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) &&
+ (busy < adapter->num_queues))
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
out:
ixgbe_rearm_queues(adapter, adapter->que_mask);
callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
return;
-hung:
+watchdog:
device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
@@ -2015,9 +2084,11 @@ ixgbe_stop(void *arg)
INIT_DEBUGOUT("ixgbe_stop: begin\n");
ixgbe_disable_intr(adapter);
+ callout_stop(&adapter->timer);
- /* Tell the stack that the interface is no longer active */
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ /* Let the stack know...*/
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
ixgbe_reset_hw(hw);
hw->adapter_stopped = FALSE;
@@ -2025,7 +2096,6 @@ ixgbe_stop(void *arg)
/* Turn off the laser */
if (hw->phy.multispeed_fiber)
ixgbe_disable_tx_laser(hw);
- callout_stop(&adapter->timer);
/* reprogram the RAR[0] in case user changed it. */
ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
@@ -2079,35 +2149,41 @@ ixgbe_setup_optics(struct adapter *adapter)
int layer;
layer = ixgbe_get_supported_physical_layer(hw);
- switch (layer) {
- case IXGBE_PHYSICAL_LAYER_10GBASE_T:
- adapter->optics = IFM_10G_T;
- break;
- case IXGBE_PHYSICAL_LAYER_1000BASE_T:
- adapter->optics = IFM_1000_T;
- break;
- case IXGBE_PHYSICAL_LAYER_10GBASE_LR:
- case IXGBE_PHYSICAL_LAYER_10GBASE_LRM:
- adapter->optics = IFM_10G_LR;
- break;
- case IXGBE_PHYSICAL_LAYER_10GBASE_SR:
- adapter->optics = IFM_10G_SR;
- break;
- case IXGBE_PHYSICAL_LAYER_10GBASE_KX4:
- case IXGBE_PHYSICAL_LAYER_10GBASE_CX4:
- adapter->optics = IFM_10G_CX4;
- break;
- case IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU:
- adapter->optics = IFM_10G_TWINAX;
- break;
- case IXGBE_PHYSICAL_LAYER_1000BASE_KX:
- case IXGBE_PHYSICAL_LAYER_10GBASE_KR:
- case IXGBE_PHYSICAL_LAYER_10GBASE_XAUI:
- case IXGBE_PHYSICAL_LAYER_UNKNOWN:
- default:
- adapter->optics = IFM_ETHER | IFM_AUTO;
- break;
+
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
+ adapter->optics = IFM_10G_T;
+ return;
+ }
+
+ if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
+ adapter->optics = IFM_1000_T;
+ return;
}
+
+ if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
+ IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
+ adapter->optics = IFM_10G_LR;
+ return;
+ }
+
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
+ adapter->optics = IFM_10G_SR;
+ return;
+ }
+
+ if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
+ adapter->optics = IFM_10G_TWINAX;
+ return;
+ }
+
+ if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
+ IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
+ adapter->optics = IFM_10G_CX4;
+ return;
+ }
+
+ /* If we get here just set the default */
+ adapter->optics = IFM_ETHER | IFM_AUTO;
return;
}
@@ -2975,6 +3051,7 @@ ixgbe_initialize_transmit_units(struct adapter *adapter)
txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
default:
txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
break;
@@ -2985,6 +3062,7 @@ ixgbe_initialize_transmit_units(struct adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
default:
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
break;
@@ -3385,22 +3463,41 @@ ixgbe_txeof(struct tx_ring *txr)
#ifdef DEV_NETMAP
if (ifp->if_capenable & IFCAP_NETMAP) {
struct netmap_adapter *na = NA(ifp);
+ struct netmap_kring *kring = &na->tx_rings[txr->me];
+ tx_desc = (struct ixgbe_legacy_tx_desc *)txr->tx_base;
+
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_POSTREAD);
/*
* In netmap mode, all the work is done in the context
* of the client thread. Interrupt handlers only wake up
* clients, which may be sleeping on individual rings
* or on a global resource for all rings.
+ * To implement tx interrupt mitigation, we wake up the client
+ * thread roughly every half ring, even if the NIC interrupts
+ * more frequently. This is implemented as follows:
+ * - ixgbe_txsync() sets kring->nr_kflags with the index of
+ * the slot that should wake up the thread (nkr_num_slots
+ * means the user thread should not be woken up);
+ * - the driver ignores tx interrupts unless netmap_mitigate=0
+ * or the slot has the DD bit set.
+ *
* When the driver has separate locks, we need to
* release and re-acquire txlock to avoid deadlocks.
* XXX see if we can find a better way.
*/
- selwakeuppri(&na->tx_rings[txr->me].si, PI_NET);
- IXGBE_TX_UNLOCK(txr);
- IXGBE_CORE_LOCK(adapter);
- selwakeuppri(&na->tx_rings[na->num_queues + 1].si, PI_NET);
- IXGBE_CORE_UNLOCK(adapter);
- IXGBE_TX_LOCK(txr);
+ if (!netmap_mitigate ||
+ (kring->nr_kflags < kring->nkr_num_slots &&
+ tx_desc[kring->nr_kflags].upper.fields.status & IXGBE_TXD_STAT_DD)) {
+ kring->nr_kflags = kring->nkr_num_slots;
+ selwakeuppri(&na->tx_rings[txr->me].si, PI_NET);
+ IXGBE_TX_UNLOCK(txr);
+ IXGBE_CORE_LOCK(adapter);
+ selwakeuppri(&na->tx_rings[na->num_queues + 1].si, PI_NET);
+ IXGBE_CORE_UNLOCK(adapter);
+ IXGBE_TX_LOCK(txr);
+ }
return FALSE;
}
#endif /* DEV_NETMAP */
@@ -3493,18 +3590,13 @@ ixgbe_txeof(struct tx_ring *txr)
if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG))
txr->queue_status = IXGBE_QUEUE_HUNG;
- /*
- * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
- * it is OK to send packets. If there are no pending descriptors,
- * clear the timeout. Otherwise, if some descriptors have been freed,
- * restart the timeout.
- */
- if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- if (txr->tx_avail == adapter->num_tx_desc) {
- txr->queue_status = IXGBE_QUEUE_IDLE;
- return FALSE;
- }
+ /* With a minimum free clear the depleted state bit. */
+ if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD)
+ txr->queue_status &= ~IXGBE_QUEUE_DEPLETED;
+
+ if (txr->tx_avail == adapter->num_tx_desc) {
+ txr->queue_status = IXGBE_QUEUE_IDLE;
+ return (FALSE);
}
return TRUE;
@@ -3904,6 +3996,7 @@ skip_head:
rxr->rx_split_packets = 0;
rxr->rx_bytes = 0;
rxr->discard = FALSE;
+ rxr->vtag_strip = FALSE;
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -3911,7 +4004,7 @@ skip_head:
/*
** Now set up the LRO interface:
** 82598 uses software LRO, the
- ** 82599 uses a hardware assist.
+ ** 82599 and X540 use a hardware assist.
*/
if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
(ifp->if_capenable & IFCAP_RXCSUM) &&
@@ -3928,21 +4021,6 @@ skip_head:
lro->ifp = adapter->ifp;
}
-#ifdef DEV_NETMAP1 /* XXX experimental CRC strip */
- {
- struct ixgbe_hw *hw = &adapter->hw;
- u32 rdrxctl;
-
- rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
- if (slot)
- rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
- else
- rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
- rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
- IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
- }
-#endif /* DEV_NETMAP1 */
IXGBE_RX_UNLOCK(rxr);
return (0);
@@ -4022,15 +4100,10 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
hlreg |= IXGBE_HLREG0_JUMBOEN;
else
hlreg &= ~IXGBE_HLREG0_JUMBOEN;
-#ifdef DEV_NETMAP1 /* XXX experimental CRCSTRIP */
- if (ifp->if_capenable & IFCAP_NETMAP)
- hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
- else
- hlreg |= IXGBE_HLREG0_RXCRCSTRP;
-#endif /* DEV_NETMAP1 */
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
- bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ bufsz = (adapter->rx_mbuf_sz +
+ BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
for (int i = 0; i < adapter->num_queues; i++, rxr++) {
u64 rdba = rxr->rxdma.dma_paddr;
@@ -4297,11 +4370,14 @@ ixgbe_rxeof(struct ix_queue *que, int count)
#ifdef DEV_NETMAP
if (ifp->if_capenable & IFCAP_NETMAP) {
/*
- * Same as the txeof routine, only wakeup clients
- * and make sure there are no deadlocks.
+ * Same as the txeof routine: only wakeup clients on intr.
+ * NKR_PENDINTR in nr_kflags is used to implement interrupt
+ * mitigation (ixgbe_rxsync() will not look for new packets
+ * unless NKR_PENDINTR is set).
*/
struct netmap_adapter *na = NA(ifp);
+ na->rx_rings[rxr->me].nr_kflags |= NKR_PENDINTR;
selwakeuppri(&na->rx_rings[rxr->me].si, PI_NET);
IXGBE_RX_UNLOCK(rxr);
IXGBE_CORE_LOCK(adapter);
@@ -4313,7 +4389,8 @@ ixgbe_rxeof(struct ix_queue *que, int count)
for (i = rxr->next_to_check; count != 0;) {
struct mbuf *sendmp, *mh, *mp;
u32 rsc, ptype;
- u16 hlen, plen, hdr, vtag;
+ u16 hlen, plen, hdr;
+ u16 vtag = 0;
bool eop;
/* Sync the ring. */
@@ -4341,9 +4418,12 @@ ixgbe_rxeof(struct ix_queue *que, int count)
ptype = le32toh(cur->wb.lower.lo_dword.data) &
IXGBE_RXDADV_PKTTYPE_MASK;
hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
- vtag = le16toh(cur->wb.upper.vlan);
eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
+ /* Process vlan info */
+ if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
+ vtag = le16toh(cur->wb.upper.vlan);
+
/* Make sure bad packets are discarded */
if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
(rxr->discard)) {
@@ -4444,8 +4524,8 @@ ixgbe_rxeof(struct ix_queue *que, int count)
} else {
/* Singlet, prepare to send */
sendmp = mh;
- if ((adapter->num_vlans) &&
- (staterr & IXGBE_RXD_STAT_VP)) {
+ /* If hardware handled vtag */
+ if (vtag) {
sendmp->m_pkthdr.ether_vtag = vtag;
sendmp->m_flags |= M_VLANTAG;
}
@@ -4654,6 +4734,7 @@ ixgbe_setup_vlan_hw_support(struct adapter *adapter)
{
struct ifnet *ifp = adapter->ifp;
struct ixgbe_hw *hw = &adapter->hw;
+ struct rx_ring *rxr;
u32 ctrl;
@@ -4685,13 +4766,17 @@ ixgbe_setup_vlan_hw_support(struct adapter *adapter)
ctrl |= IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- /* On 82599 the VLAN enable is per/queue in RXDCTL */
- if (hw->mac.type != ixgbe_mac_82598EB)
- for (int i = 0; i < adapter->num_queues; i++) {
+ /* Setup the queues for vlans */
+ for (int i = 0; i < adapter->num_queues; i++) {
+ rxr = &adapter->rx_rings[i];
+ /* On 82599 the VLAN enable is per/queue in RXDCTL */
+ if (hw->mac.type != ixgbe_mac_82598EB) {
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
- ctrl |= IXGBE_RXDCTL_VME;
+ ctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
}
+ rxr->vtag_strip = TRUE;
+ }
}
static void
@@ -4707,6 +4792,7 @@ ixgbe_enable_intr(struct adapter *adapter)
mask |= IXGBE_EIMS_GPI_SDP1;
else {
mask |= IXGBE_EIMS_ECC;
+ mask |= IXGBE_EIMS_GPI_SDP0;
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
#ifdef IXGBE_FDIR
@@ -4804,6 +4890,7 @@ ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
if (type == -1) { /* MISC IVAR */
index = (entry & 1) * 8;
ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
@@ -4830,7 +4917,7 @@ ixgbe_configure_ivars(struct adapter *adapter)
u32 newitr;
if (ixgbe_max_interrupt_rate > 0)
- newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
+ newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
else
newitr = 0;
@@ -5193,12 +5280,21 @@ ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
usec = ((reg & 0x0FF8) >> 3);
if (usec > 0)
- rate = 1000000 / usec;
+ rate = 500000 / usec;
else
rate = 0;
error = sysctl_handle_int(oidp, &rate, 0, req);
if (error || !req->newptr)
return error;
+ reg &= ~0xfff; /* default, no limitation */
+ ixgbe_max_interrupt_rate = 0;
+ if (rate > 0 && rate < 500000) {
+ if (rate < 1000)
+ rate = 1000;
+ ixgbe_max_interrupt_rate = rate;
+ reg |= ((4000000/rate) & 0xff8 );
+ }
+ IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
return 0;
}
@@ -5252,10 +5348,13 @@ ixgbe_add_hw_stats(struct adapter *adapter)
queue_list = SYSCTL_CHILDREN(queue_node);
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
- CTLTYPE_UINT | CTLFLAG_RD, &adapter->queues[i],
+ CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
sizeof(&adapter->queues[i]),
ixgbe_sysctl_interrupt_rate_handler, "IU",
"Interrupt Rate");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
+ CTLFLAG_RD, &(adapter->queues[i].irqs),
+ "irqs on this queue");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
ixgbe_sysctl_tdh_handler, "IU",
@@ -5479,24 +5578,23 @@ ixgbe_add_hw_stats(struct adapter *adapter)
static int
ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
{
- int error;
- int last = ixgbe_flow_control;
- struct adapter *adapter;
+ int error, last;
+ struct adapter *adapter = (struct adapter *) arg1;
- error = sysctl_handle_int(oidp, &ixgbe_flow_control, 0, req);
- if (error)
+ last = adapter->fc;
+ error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
+ if ((error) || (req->newptr == NULL))
return (error);
/* Don't bother if it's not changed */
- if (ixgbe_flow_control == last)
+ if (adapter->fc == last)
return (0);
- adapter = (struct adapter *) arg1;
- switch (ixgbe_flow_control) {
+ switch (adapter->fc) {
case ixgbe_fc_rx_pause:
case ixgbe_fc_tx_pause:
case ixgbe_fc_full:
- adapter->hw.fc.requested_mode = ixgbe_flow_control;
+ adapter->hw.fc.requested_mode = adapter->fc;
break;
case ixgbe_fc_none:
default:
@@ -5521,16 +5619,19 @@ ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
** Control link advertise speed:
** 0 - normal
** 1 - advertise only 1G
+** 2 - advertise 100Mb
*/
static int
ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
{
int error = 0;
struct adapter *adapter;
+ device_t dev;
struct ixgbe_hw *hw;
ixgbe_link_speed speed, last;
adapter = (struct adapter *) arg1;
+ dev = adapter->dev;
hw = &adapter->hw;
last = hw->phy.autoneg_advertised;
@@ -5543,8 +5644,15 @@ ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
(hw->phy.multispeed_fiber)))
return (error);
+ if ((adapter->advertise == 2) && (hw->mac.type != ixgbe_mac_X540)) {
+ device_printf(dev, "Set Advertise: 100Mb on X540 only\n");
+ return (error);
+ }
+
if (adapter->advertise == 1)
speed = IXGBE_LINK_SPEED_1GB_FULL;
+ else if (adapter->advertise == 2)
+ speed = IXGBE_LINK_SPEED_100_FULL;
else
speed = IXGBE_LINK_SPEED_1GB_FULL |
IXGBE_LINK_SPEED_10GB_FULL;
@@ -5557,3 +5665,31 @@ ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
return (error);
}
+
+/*
+** Thermal Shutdown Trigger
+** - cause a Thermal Overtemp IRQ
+*/
+static int
+ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS)
+{
+ int error, fire = 0;
+ struct adapter *adapter = (struct adapter *) arg1;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+
+ if (hw->mac.type != ixgbe_mac_X540)
+ return (0);
+
+ error = sysctl_handle_int(oidp, &fire, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (fire) {
+ u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
+ reg |= IXGBE_EICR_TS;
+ IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
+ }
+
+ return (0);
+}
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index d0314b9..bffde72 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2011, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -184,9 +184,11 @@
#define IXGBE_RX_HDR 128
#define IXGBE_VFTA_SIZE 128
#define IXGBE_BR_SIZE 4096
-#define IXGBE_QUEUE_IDLE 0
-#define IXGBE_QUEUE_WORKING 1
-#define IXGBE_QUEUE_HUNG 2
+#define IXGBE_QUEUE_MIN_FREE 32
+#define IXGBE_QUEUE_IDLE 1
+#define IXGBE_QUEUE_WORKING 2
+#define IXGBE_QUEUE_HUNG 4
+#define IXGBE_QUEUE_DEPLETED 8
/* Offload bits in mbuf flag */
#if __FreeBSD_version >= 800000
@@ -323,6 +325,7 @@ struct rx_ring {
bool hdr_split;
bool hw_rsc;
bool discard;
+ bool vtag_strip;
u32 next_to_refresh;
u32 next_to_check;
char mtx_name[16];
@@ -387,6 +390,7 @@ struct adapter {
/* Info about the interface */
u32 optics;
+ u32 fc; /* local flow ctrl setting */
int advertise; /* link speeds */
bool link_active;
u16 max_frame_size;
diff --git a/sys/dev/ixgbe/ixgbe_82598.c b/sys/dev/ixgbe/ixgbe_82598.c
index 607a8e7..ab41c7b 100644
--- a/sys/dev/ixgbe/ixgbe_82598.c
+++ b/sys/dev/ixgbe/ixgbe_82598.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -33,46 +33,33 @@
/*$FreeBSD$*/
#include "ixgbe_type.h"
+#include "ixgbe_82598.h"
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
-s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
+ ixgbe_link_speed *speed,
+ bool *autoneg);
static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
-s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete);
+ bool autoneg_wait_to_complete);
static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed, bool *link_up,
- bool link_up_wait_to_complete);
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete);
static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
-void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
-s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
-s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
-u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
-s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
-void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
-void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy);
/**
* ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
@@ -131,7 +118,7 @@ u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
if (hw->mac.msix_vectors_from_pcie) {
msix_count = IXGBE_READ_PCIE_WORD(hw,
- IXGBE_PCIE_MSIX_82598_CAPS);
+ IXGBE_PCIE_MSIX_82598_CAPS);
msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
/* MSI-X count is zero-based in HW, so increment to give
@@ -168,7 +155,7 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
mac->ops.reset_hw = &ixgbe_reset_hw_82598;
mac->ops.get_media_type = &ixgbe_get_media_type_82598;
mac->ops.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_82598;
+ &ixgbe_get_supported_physical_layer_82598;
mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
@@ -177,18 +164,19 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
mac->ops.set_vfta = &ixgbe_set_vfta_82598;
+ mac->ops.set_vlvf = NULL;
mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
/* Flow Control */
mac->ops.fc_enable = &ixgbe_fc_enable_82598;
- mac->mcft_size = 128;
- mac->vft_size = 128;
- mac->num_rar_entries = 16;
- mac->rx_pb_size = 512;
- mac->max_tx_queues = 32;
- mac->max_rx_queues = 64;
- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
+ mac->mcft_size = 128;
+ mac->vft_size = 128;
+ mac->num_rar_entries = 16;
+ mac->rx_pb_size = 512;
+ mac->max_tx_queues = 32;
+ mac->max_rx_queues = 64;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
/* SFP+ Module */
phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
@@ -197,8 +185,11 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
mac->ops.check_link = &ixgbe_check_mac_link_82598;
mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
mac->ops.flap_tx_laser = NULL;
- mac->ops.get_link_capabilities =
- &ixgbe_get_link_capabilities_82598;
+ mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
+ mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
+
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = NULL;
return ret_val;
}
@@ -228,7 +219,7 @@ s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_generic;
+ &ixgbe_get_copper_link_capabilities_generic;
}
switch (hw->phy.type) {
@@ -236,11 +227,7 @@ s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
- break;
- case ixgbe_phy_aq:
- phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_generic;
+ &ixgbe_get_phy_firmware_version_tnx;
break;
case ixgbe_phy_nl:
phy->ops.reset = &ixgbe_reset_phy_nl;
@@ -256,8 +243,8 @@ s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
/* Check to see if SFP+ module is supported */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
- &list_offset,
- &data_offset);
+ &list_offset,
+ &data_offset);
if (ret_val != IXGBE_SUCCESS) {
ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -301,7 +288,7 @@ s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
@@ -321,8 +308,8 @@ s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
* Determines the link capabilities by reading the AUTOC register.
**/
static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+ ixgbe_link_speed *speed,
+ bool *autoneg)
{
s32 status = IXGBE_SUCCESS;
u32 autoc = 0;
@@ -389,7 +376,6 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
switch (hw->phy.type) {
case ixgbe_phy_cu_unknown:
case ixgbe_phy_tn:
- case ixgbe_phy_aq:
media_type = ixgbe_media_type_copper;
goto out;
default:
@@ -440,7 +426,6 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
u32 fctrl_reg;
u32 rmcs_reg;
u32 reg;
- u32 rx_pba_size;
u32 link_speed = 0;
bool link_up;
@@ -532,16 +517,13 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
- rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
-
- reg = (rx_pba_size - hw->fc.low_water) << 6;
+ reg = hw->fc.low_water << 6;
if (hw->fc.send_xon)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
- reg = (rx_pba_size - hw->fc.high_water) << 6;
+ reg = hw->fc.high_water[packetbuf_num] << 6;
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
@@ -569,7 +551,7 @@ out:
* Restarts the link. Performs autonegotiation if needed.
**/
static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+ bool autoneg_wait_to_complete)
{
u32 autoc_reg;
u32 links_reg;
@@ -627,7 +609,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
for (timeout = 0;
timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
(an_reg & IXGBE_MII_AUTONEG_LINK_UP))
@@ -654,8 +636,8 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
* Reads the links register to determine if link is up and the current speed
**/
static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed, bool *link_up,
- bool link_up_wait_to_complete)
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete)
{
u32 links_reg;
u32 i;
@@ -673,7 +655,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
- &adapt_comp_reg);
+ &adapt_comp_reg);
if (link_up_wait_to_complete) {
for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
if ((link_reg & 1) &&
@@ -685,11 +667,11 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
}
msec_delay(100);
hw->phy.ops.read_reg(hw, 0xC79F,
- IXGBE_TWINAX_DEV,
- &link_reg);
+ IXGBE_TWINAX_DEV,
+ &link_reg);
hw->phy.ops.read_reg(hw, 0xC00C,
- IXGBE_TWINAX_DEV,
- &adapt_comp_reg);
+ IXGBE_TWINAX_DEV,
+ &adapt_comp_reg);
}
} else {
if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
@@ -730,11 +712,6 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
(ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
*link_up = FALSE;
- /* if link is down, zero out the current_mode */
- if (*link_up == FALSE) {
- hw->fc.current_mode = ixgbe_fc_none;
- hw->fc.fc_was_autonegged = FALSE;
- }
out:
return IXGBE_SUCCESS;
}
@@ -749,14 +726,14 @@ out:
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
{
- s32 status = IXGBE_SUCCESS;
+ s32 status = IXGBE_SUCCESS;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
- u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- u32 autoc = curr_autoc;
- u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+ u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc = curr_autoc;
+ u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
DEBUGFUNC("ixgbe_setup_mac_link_82598");
@@ -769,7 +746,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
/* Set KX4/KX support according to speed requested */
else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
- link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
autoc |= IXGBE_AUTOC_KX4_SUPP;
@@ -786,7 +763,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
* stored values
*/
status = ixgbe_start_mac_link_82598(hw,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
}
return status;
@@ -803,9 +780,9 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
{
s32 status;
@@ -813,7 +790,7 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
@@ -841,7 +818,9 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_reset_hw_82598");
/* Call adapter stop to disable tx/rx and clear interrupts */
- hw->mac.ops.stop_adapter(hw);
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
/*
* Power up the Atlas Tx lanes if they are currently powered down.
@@ -852,28 +831,28 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
/* Enable Tx Atlas so packets can be transmitted again */
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
- analog_val);
+ analog_val);
}
/* Reset PHY */
@@ -884,26 +863,19 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
phy_status = hw->phy.ops.init(hw);
if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
goto reset_hw_out;
- else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
- goto no_phy_reset;
+ if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto mac_reset_top;
hw->phy.ops.reset(hw);
}
-no_phy_reset:
- /*
- * Prevent the PCI-E bus from from hanging by disabling PCI-E master
- * access and verify no pending requests before reset
- */
- ixgbe_disable_pcie_master(hw);
-
mac_reset_top:
/*
* Issue global reset to the MAC. This needs to be a SW reset.
* If link reset is used, it might reset the MAC when mng is using it
*/
- ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
/* Poll for reset bit to self-clear indicating reset is complete */
@@ -918,21 +890,18 @@ mac_reset_top:
DEBUGOUT("Reset polling failed to complete.\n");
}
+ msec_delay(50);
+
/*
* Double resets are required for recovery from certain error
* conditions. Between resets, it is necessary to stall to allow time
- * for any pending HW events to complete. We use 1usec since that is
- * what is needed for ixgbe_disable_pcie_master(). The second reset
- * then clears out any effects of those events.
+ * for any pending HW events to complete.
*/
if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
- usec_delay(1);
goto mac_reset_top;
}
- msec_delay(50);
-
gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
@@ -1003,7 +972,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
- UNREFERENCED_PARAMETER(vmdq);
+ UNREFERENCED_1PARAMETER(vmdq);
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
@@ -1030,7 +999,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* Turn on/off specified VLAN in the VLAN filter table.
**/
s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
+ bool vlan_on)
{
u32 regindex;
u32 bitindex;
@@ -1089,7 +1058,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
for (offset = 0; offset < hw->mac.vft_size; offset++)
IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
- 0);
+ 0);
return IXGBE_SUCCESS;
}
@@ -1109,7 +1078,7 @@ s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
DEBUGFUNC("ixgbe_read_analog_reg8_82598");
IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
- IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+ IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
IXGBE_WRITE_FLUSH(hw);
usec_delay(10);
atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
@@ -1149,7 +1118,7 @@ s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
* Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data)
+ u8 *eeprom_data)
{
s32 status = IXGBE_SUCCESS;
u16 sfp_addr = 0;
@@ -1168,16 +1137,16 @@ s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
hw->phy.ops.write_reg(hw,
- IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- sfp_addr);
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ sfp_addr);
/* Poll status */
for (i = 0; i < 100; i++) {
hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &sfp_stat);
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &sfp_stat);
sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
break;
@@ -1192,7 +1161,7 @@ s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
/* Read data */
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
*eeprom_data = (u8)(sfp_data >> 8);
} else {
@@ -1226,7 +1195,6 @@ u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
* physical layer because 10GBase-T PHYs use LMS = KX4/KX */
switch (hw->phy.type) {
case ixgbe_phy_tn:
- case ixgbe_phy_aq:
case ixgbe_phy_cu_unknown:
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
@@ -1365,8 +1333,50 @@ void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
}
+
+/**
+ * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy)
+{
+ u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
+ u8 i = 0;
+ UNREFERENCED_1PARAMETER(headroom);
+
+ if (!num_pb)
+ return;
+
+ /* Setup Rx packet buffer sizes */
+ switch (strategy) {
+ case PBA_STRATEGY_WEIGHTED:
+ /* Setup the first four at 80KB */
+ rxpktsize = IXGBE_RXPBSIZE_80KB;
+ for (; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Setup the last four at 48KB...don't re-init i */
+ rxpktsize = IXGBE_RXPBSIZE_48KB;
+ /* Fall Through */
+ case PBA_STRATEGY_EQUAL:
+ default:
+ /* Divide the remaining Rx packet buffer evenly among the TCs */
+ for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ }
+
+ /* Setup Tx packet buffer sizes */
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
+
+ return;
+}
diff --git a/sys/dev/ixgbe/ixgbe_82598.h b/sys/dev/ixgbe/ixgbe_82598.h
new file mode 100755
index 0000000..01ddef5
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_82598.h
@@ -0,0 +1,52 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2012, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXGBE_82598_H_
+#define _IXGBE_82598_H_
+
+u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
+void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
+#endif /* _IXGBE_82598_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_82599.c b/sys/dev/ixgbe/ixgbe_82599.c
index 0c27e23..a5fa15e 100644
--- a/sys/dev/ixgbe/ixgbe_82599.c
+++ b/sys/dev/ixgbe/ixgbe_82599.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -33,47 +33,20 @@
/*$FreeBSD$*/
#include "ixgbe_type.h"
+#include "ixgbe_82599.h"
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
-s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
-enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
-void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete);
-s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete);
-s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete);
-s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
-s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
-void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
-s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
-s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw);
-s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-
+static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+ u16 offset, u16 *data);
+static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
{
@@ -84,9 +57,9 @@ void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
/* enable the laser control functions for SFP+ fiber */
if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
mac->ops.disable_tx_laser =
- &ixgbe_disable_tx_laser_multispeed_fiber;
+ &ixgbe_disable_tx_laser_multispeed_fiber;
mac->ops.enable_tx_laser =
- &ixgbe_enable_tx_laser_multispeed_fiber;
+ &ixgbe_enable_tx_laser_multispeed_fiber;
mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
} else {
@@ -141,7 +114,7 @@ s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_generic;
+ &ixgbe_get_copper_link_capabilities_generic;
}
/* Set necessary function pointers based on phy type */
@@ -150,11 +123,7 @@ s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
- break;
- case ixgbe_phy_aq:
- phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_generic;
+ &ixgbe_get_phy_firmware_version_tnx;
break;
default:
break;
@@ -178,12 +147,13 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
hw->phy.ops.reset = NULL;
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
- &data_offset);
+ &data_offset);
if (ret_val != IXGBE_SUCCESS)
goto setup_sfp_out;
/* PHY config will finish before releasing the semaphore */
- ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
if (ret_val != IXGBE_SUCCESS) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto setup_sfp_out;
@@ -197,14 +167,14 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
}
/* Release the semaphore */
- ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
/* Delay obtaining semaphore again to allow FW access */
msec_delay(hw->eeprom.semaphore_delay);
/* Now restart DSP by setting Restart_AN and clearing LMS */
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
- IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
- IXGBE_AUTOC_AN_RESTART));
+ IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
+ IXGBE_AUTOC_AN_RESTART));
/* Wait for AN to leave state 0 */
for (i = 0; i < 10; i++) {
@@ -221,8 +191,8 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
/* Restart DSP by setting Restart_AN and return to SFI mode */
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
- IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
- IXGBE_AUTOC_AN_RESTART));
+ IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
+ IXGBE_AUTOC_AN_RESTART));
}
setup_sfp_out:
@@ -241,6 +211,7 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
s32 ret_val;
DEBUGFUNC("ixgbe_init_ops_82599");
@@ -257,11 +228,13 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
mac->ops.get_media_type = &ixgbe_get_media_type_82599;
mac->ops.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_82599;
+ &ixgbe_get_supported_physical_layer_82599;
+ mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
+ mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
- mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599;
+ mac->ops.start_hw = &ixgbe_start_hw_82599;
mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
@@ -274,6 +247,7 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
mac->rar_highwater = 1;
mac->ops.set_vfta = &ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
@@ -282,19 +256,31 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
/* Link */
mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
- mac->ops.check_link = &ixgbe_check_mac_link_generic;
+ mac->ops.check_link = &ixgbe_check_mac_link_generic;
+ mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
ixgbe_init_mac_link_ops_82599(hw);
- mac->mcft_size = 128;
- mac->vft_size = 128;
- mac->num_rar_entries = 128;
- mac->rx_pb_size = 512;
- mac->max_tx_queues = 128;
- mac->max_rx_queues = 128;
- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+ mac->mcft_size = 128;
+ mac->vft_size = 128;
+ mac->num_rar_entries = 128;
+ mac->rx_pb_size = 512;
+ mac->max_tx_queues = 128;
+ mac->max_rx_queues = 128;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
+ IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+ /* EEPROM */
+ eeprom->ops.read = &ixgbe_read_eeprom_82599;
+ eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
+
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
+
+
return ret_val;
}
@@ -307,8 +293,8 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
* Determines the link capabilities by reading the AUTOC register.
**/
s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *negotiation)
+ ixgbe_link_speed *speed,
+ bool *negotiation)
{
s32 status = IXGBE_SUCCESS;
u32 autoc = 0;
@@ -391,7 +377,7 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
if (hw->phy.multispeed_fiber) {
*speed |= IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
+ IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = TRUE;
}
@@ -415,7 +401,6 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
switch (hw->phy.type) {
case ixgbe_phy_cu_unknown:
case ixgbe_phy_tn:
- case ixgbe_phy_aq:
media_type = ixgbe_media_type_copper;
goto out;
default:
@@ -426,6 +411,7 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_KX4:
case IXGBE_DEV_ID_82599_KX4_MEZZ:
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_KR:
case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
case IXGBE_DEV_ID_82599_XAUI_LOM:
/* Default device ID is mezzanine card KX/KX4 */
@@ -433,6 +419,8 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
break;
case IXGBE_DEV_ID_82599_SFP:
case IXGBE_DEV_ID_82599_SFP_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599EN_SFP:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_82599_CX4:
@@ -458,7 +446,7 @@ out:
* Restarts the link. Performs autonegotiation if needed.
**/
s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+ bool autoneg_wait_to_complete)
{
u32 autoc_reg;
u32 links_reg;
@@ -572,8 +560,8 @@ void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
* Set the link speed in the AUTOC register and restarts link.
**/
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
{
s32 status = IXGBE_SUCCESS;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -618,9 +606,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
msec_delay(40);
status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_10GB_FULL,
- autoneg,
- autoneg_wait_to_complete);
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg,
+ autoneg_wait_to_complete);
if (status != IXGBE_SUCCESS)
return status;
@@ -638,7 +626,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
/* If we have link, just jump out */
status = ixgbe_check_link(hw, &link_speed,
- &link_up, FALSE);
+ &link_up, FALSE);
if (status != IXGBE_SUCCESS)
return status;
@@ -698,7 +686,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
*/
if (speedcnt > 1)
status = ixgbe_setup_mac_link_multispeed_fiber(hw,
- highest_link_speed, autoneg, autoneg_wait_to_complete);
+ highest_link_speed, autoneg, autoneg_wait_to_complete);
out:
/* Set autoneg_advertised value based on input link speed */
@@ -723,8 +711,8 @@ out:
* Implements the Intel SmartSpeed algorithm.
**/
s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
{
s32 status = IXGBE_SUCCESS;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -836,8 +824,8 @@ out:
* Set the link speed in the AUTOC register and restarts link.
**/
s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
{
s32 status = IXGBE_SUCCESS;
u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -885,8 +873,8 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
- link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
+ (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+ link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
/* Switch from 1G SFI to 10G SFI if requested */
if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
(pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
@@ -894,7 +882,7 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
}
} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+ (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
/* Switch from 10G SFI to 1G SFI if requested */
if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
(pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
@@ -950,9 +938,9 @@ out:
* Restarts link on PHY and MAC based on settings passed in.
**/
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
{
s32 status;
@@ -960,7 +948,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
@@ -977,16 +965,20 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
**/
s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
- s32 status = IXGBE_SUCCESS;
- u32 ctrl;
- u32 i;
- u32 autoc;
- u32 autoc2;
+ ixgbe_link_speed link_speed;
+ s32 status;
+ u32 ctrl, i, autoc, autoc2;
+ bool link_up = FALSE;
DEBUGFUNC("ixgbe_reset_hw_82599");
/* Call adapter stop to disable tx/rx and clear interrupts */
- hw->mac.ops.stop_adapter(hw);
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
/* PHY ops must be identified and initialized prior to reset */
@@ -1009,48 +1001,49 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
hw->phy.ops.reset(hw);
- /*
- * Prevent the PCI-E bus from from hanging by disabling PCI-E master
- * access and verify no pending requests before reset
- */
- ixgbe_disable_pcie_master(hw);
-
mac_reset_top:
/*
- * Issue global reset to the MAC. This needs to be a SW reset.
- * If link reset is used, it might reset the MAC when mng is using it
+ * Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
*/
- ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+ ctrl = IXGBE_CTRL_LNK_RST;
+ if (!hw->force_full_reset) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
+ if (link_up)
+ ctrl = IXGBE_CTRL_RST;
+ }
+
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
/* Poll for reset bit to self-clear indicating reset is complete */
for (i = 0; i < 10; i++) {
usec_delay(1);
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- if (!(ctrl & IXGBE_CTRL_RST))
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
break;
}
- if (ctrl & IXGBE_CTRL_RST) {
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
status = IXGBE_ERR_RESET_FAILED;
DEBUGOUT("Reset polling failed to complete.\n");
}
+ msec_delay(50);
+
/*
* Double resets are required for recovery from certain error
* conditions. Between resets, it is necessary to stall to allow time
- * for any pending HW events to complete. We use 1usec since that is
- * what is needed for ixgbe_disable_pcie_master(). The second reset
- * then clears out any effects of those events.
+ * for any pending HW events to complete.
*/
if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
- usec_delay(1);
goto mac_reset_top;
}
- msec_delay(50);
-
/*
* Store the original AUTOC/AUTOC2 values if they have not been
* stored off yet. Otherwise restore the stored original
@@ -1071,7 +1064,7 @@ mac_reset_top:
(hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
autoc2 |= (hw->mac.orig_autoc2 &
- IXGBE_AUTOC2_UPPER_MASK);
+ IXGBE_AUTOC2_UPPER_MASK);
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
}
}
@@ -1093,7 +1086,7 @@ mac_reset_top:
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
@@ -1101,7 +1094,7 @@ mac_reset_top:
/* Store the alternative WWNN/WWPN prefix */
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
- &hw->mac.wwpn_prefix);
+ &hw->mac.wwpn_prefix);
reset_hw_out:
return status;
@@ -1131,7 +1124,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
}
if (i >= IXGBE_FDIRCMD_CMD_POLL) {
DEBUGOUT("Flow Director previous command isn't complete, "
- "aborting table re-initialization. \n");
+ "aborting table re-initialization.\n");
return IXGBE_ERR_FDIR_REINIT_FAILED;
}
@@ -1145,12 +1138,12 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
* - write 0 to bit 8 of FDIRCMD register
*/
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
- (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
- IXGBE_FDIRCMD_CLEARHT));
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+ IXGBE_FDIRCMD_CLEARHT));
IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
- (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
- ~IXGBE_FDIRCMD_CLEARHT));
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ ~IXGBE_FDIRCMD_CLEARHT));
IXGBE_WRITE_FLUSH(hw);
/*
* Clear FDIR Hash register to clear any leftover hashes
@@ -1165,7 +1158,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
/* Poll init-done after we write FDIRCTRL register */
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
+ IXGBE_FDIRCTRL_INIT_DONE)
break;
usec_delay(10);
}
@@ -1185,63 +1178,15 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
}
/**
- * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
+ * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
* @hw: pointer to hardware structure
- * @pballoc: which mode to allocate filters with
+ * @fdirctrl: value to write to flow director control register
**/
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
+static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
- u32 fdirctrl = 0;
- u32 pbsize;
int i;
- DEBUGFUNC("ixgbe_init_fdir_signature_82599");
-
- /*
- * Before enabling Flow Director, the Rx Packet Buffer size
- * must be reduced. The new value is the current size minus
- * flow director memory usage size.
- */
- pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
- (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
-
- /*
- * The defaults in the HW for RX PB 1-7 are not zero and so should be
- * intialized to zero for non DCB mode otherwise actual total RX PB
- * would be bigger than programmed and filter space would run into
- * the PB 0 region.
- */
- for (i = 1; i < 8; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-
- /* Send interrupt when 64 filters are left */
- fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
-
- /* Set the maximum length per hash bucket to 0xA filters */
- fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
-
- switch (pballoc) {
- case IXGBE_FDIR_PBALLOC_64K:
- /* 8k - 1 signature filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
- break;
- case IXGBE_FDIR_PBALLOC_128K:
- /* 16k - 1 signature filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
- break;
- case IXGBE_FDIR_PBALLOC_256K:
- /* 32k - 1 signature filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
- break;
- default:
- /* bad value */
- return IXGBE_ERR_CONFIG;
- };
-
- /* Move the flexible bytes to use the ethertype - shift 6 words */
- fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
-
+ DEBUGFUNC("ixgbe_fdir_enable_82599");
/* Prime the keys for hashing */
IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
@@ -1264,201 +1209,71 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
IXGBE_WRITE_FLUSH(hw);
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
+ IXGBE_FDIRCTRL_INIT_DONE)
break;
msec_delay(1);
}
- if (i >= IXGBE_FDIR_INIT_DONE_POLL)
- DEBUGOUT("Flow Director Signature poll time exceeded!\n");
- return IXGBE_SUCCESS;
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+ DEBUGOUT("Flow Director poll time exceeded!\n");
}
/**
- * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
+ * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
* @hw: pointer to hardware structure
- * @pballoc: which mode to allocate filters with
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
**/
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
- u32 fdirctrl = 0;
- u32 pbsize;
- int i;
-
- DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
-
- /*
- * Before enabling Flow Director, the Rx Packet Buffer size
- * must be reduced. The new value is the current size minus
- * flow director memory usage size.
- */
- pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
- (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
-
- /*
- * The defaults in the HW for RX PB 1-7 are not zero and so should be
- * intialized to zero for non DCB mode otherwise actual total RX PB
- * would be bigger than programmed and filter space would run into
- * the PB 0 region.
- */
- for (i = 1; i < 8; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-
- /* Send interrupt when 64 filters are left */
- fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
-
- /* Initialize the drop queue to Rx queue 127 */
- fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
-
- switch (pballoc) {
- case IXGBE_FDIR_PBALLOC_64K:
- /* 2k - 1 perfect filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
- break;
- case IXGBE_FDIR_PBALLOC_128K:
- /* 4k - 1 perfect filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
- break;
- case IXGBE_FDIR_PBALLOC_256K:
- /* 8k - 1 perfect filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
- break;
- default:
- /* bad value */
- return IXGBE_ERR_CONFIG;
- };
-
- /* Turn perfect match filtering on */
- fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
- fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
-
- /* Move the flexible bytes to use the ethertype - shift 6 words */
- fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
-
- /* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,IXGBE_ATR_SIGNATURE_HASH_KEY);
+ DEBUGFUNC("ixgbe_init_fdir_signature_82599");
/*
- * Poll init-done after we write the register. Estimated times:
- * 10G: PBALLOC = 11b, timing is 60us
- * 1G: PBALLOC = 11b, timing is 600us
- * 100M: PBALLOC = 11b, timing is 6ms
- *
- * Multiple these timings by 4 if under full Rx load
- *
- * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
- * 1 msec per poll time. If we're at line rate and drop to 100M, then
- * this might not finish in our poll time, but we can live with that
- * for now.
+ * Continue setup of fdirctrl register bits:
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 filters are left
*/
+ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+ (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
- /* Set the maximum length per hash bucket to 0xA filters */
- fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
- IXGBE_WRITE_FLUSH(hw);
- for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
- if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
- break;
- msec_delay(1);
- }
- if (i >= IXGBE_FDIR_INIT_DONE_POLL)
- DEBUGOUT("Flow Director Perfect poll time exceeded!\n");
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
return IXGBE_SUCCESS;
}
/**
- * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
- * @stream: input bitstream to compute the hash on
- * @key: 32-bit hash key
+ * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
**/
-u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
- u32 key)
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
- /*
- * The algorithm is as follows:
- * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
- * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
- * and A[n] x B[n] is bitwise AND between same length strings
- *
- * K[n] is 16 bits, defined as:
- * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
- * for n modulo 32 < 15, K[n] =
- * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
- *
- * S[n] is 16 bits, defined as:
- * for n >= 15, S[n] = S[n:n - 15]
- * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
- *
- * To simplify for programming, the algorithm is implemented
- * in software this way:
- *
- * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
- *
- * for (i = 0; i < 352; i+=32)
- * hi_hash_dword[31:0] ^= Stream[(i+31):i];
- *
- * lo_hash_dword[15:0] ^= Stream[15:0];
- * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
- * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
- *
- * hi_hash_dword[31:0] ^= Stream[351:320];
- *
- * if(key[0])
- * hash[15:0] ^= Stream[15:0];
- *
- * for (i = 0; i < 16; i++) {
- * if (key[i])
- * hash[15:0] ^= lo_hash_dword[(i+15):i];
- * if (key[i + 16])
- * hash[15:0] ^= hi_hash_dword[(i+15):i];
- * }
- *
- */
- __be32 common_hash_dword = 0;
- u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
- u32 hash_result = 0;
- u8 i;
-
- /* record the flow_vm_vlan bits as they are a key part to the hash */
- flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
-
- /* generate common hash dword */
- for (i = 10; i; i -= 2)
- common_hash_dword ^= atr_input->dword_stream[i] ^
- atr_input->dword_stream[i - 1];
-
- hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
-
- /* low dword is word swapped version of common */
- lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
-
- /* apply flow ID/VM pool/VLAN ID bits to hash words */
- hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
-
- /* Process bits 0 and 16 */
- if (key & 0x0001) hash_result ^= lo_hash_dword;
- if (key & 0x00010000) hash_result ^= hi_hash_dword;
+ DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
/*
- * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
- * delay this because bit 0 of the stream should not be processed
- * so we do not add the vlan until after bit 0 was processed
+ * Continue setup of fdirctrl register bits:
+ * Turn perfect match filtering on
+ * Report hash in RSS field of Rx wb descriptor
+ * Initialize the drop queue
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 (0x4 * 16) filters are left
*/
- lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
-
+ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
+ IXGBE_FDIRCTRL_REPORT_STATUS |
+ (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
+ (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+ (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
- /* process the remaining 30 bits in the key 2 bits at a time */
- for (i = 15; i; i-- ) {
- if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
- if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
- }
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
- return hash_result & IXGBE_ATR_HASH_MASK;
+ return IXGBE_SUCCESS;
}
/*
@@ -1495,8 +1310,8 @@ do { \
* defines, and computing two keys at once since the hashed dword stream
* will be the same for both keys.
**/
-static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common)
+u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common)
{
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
@@ -1554,13 +1369,14 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
/**
* ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
* @hw: pointer to hardware structure
- * @stream: input bitstream
+ * @input: unique input dword
+ * @common: compressed common input dword
* @queue: queue index to direct traffic to
**/
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common,
- u8 queue)
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue)
{
u64 fdirhashcmd;
u32 fdircmd;
@@ -1586,7 +1402,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
/* configure FDIRCMD register */
fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
@@ -1603,6 +1419,101 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
return IXGBE_SUCCESS;
}
+#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+} while (0);
+
+/**
+ * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
+ * @atr_input: input bitstream to compute the hash on
+ * @input_mask: mask for the input bitstream
+ *
+ * This function serves two main purposes. First it applys the input_mask
+ * to the atr_input resulting in a cleaned up atr_input data stream.
+ * Secondly it computes the hash and stores it in the bkt_hash field at
+ * the end of the input byte stream. This way it will be available for
+ * future use without needing to recompute the hash.
+ **/
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *input_mask)
+{
+
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 bucket_hash = 0;
+
+ /* Apply masks to input data */
+ input->dword_stream[0] &= input_mask->dword_stream[0];
+ input->dword_stream[1] &= input_mask->dword_stream[1];
+ input->dword_stream[2] &= input_mask->dword_stream[2];
+ input->dword_stream[3] &= input_mask->dword_stream[3];
+ input->dword_stream[4] &= input_mask->dword_stream[4];
+ input->dword_stream[5] &= input_mask->dword_stream[5];
+ input->dword_stream[6] &= input_mask->dword_stream[6];
+ input->dword_stream[7] &= input_mask->dword_stream[7];
+ input->dword_stream[8] &= input_mask->dword_stream[8];
+ input->dword_stream[9] &= input_mask->dword_stream[9];
+ input->dword_stream[10] &= input_mask->dword_stream[10];
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
+
+ /* generate common hash dword */
+ hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
+ input->dword_stream[2] ^
+ input->dword_stream[3] ^
+ input->dword_stream[4] ^
+ input->dword_stream[5] ^
+ input->dword_stream[6] ^
+ input->dword_stream[7] ^
+ input->dword_stream[8] ^
+ input->dword_stream[9] ^
+ input->dword_stream[10]);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
+
+ /*
+ * Limit hash to 13 bits since max bucket count is 8K.
+ * Store result at the end of the input stream.
+ */
+ input->formatted.bkt_hash = bucket_hash & 0x1FFF;
+}
+
/**
* ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
* @input_mask: mask to be bit swapped
@@ -1612,11 +1523,11 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* generate a correctly swapped value we need to bit swap the mask and that
* is what is accomplished by this function.
**/
-static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
+static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
{
- u32 mask = IXGBE_NTOHS(input_masks->dst_port_mask);
+ u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
- mask |= IXGBE_NTOHS(input_masks->src_port_mask);
+ mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
@@ -1638,55 +1549,16 @@ static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
#define IXGBE_STORE_AS_BE16(_value) \
- (((u16)(_value) >> 8) | ((u16)(_value) << 8))
-
+ IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
-/**
- * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
- * @hw: pointer to hardware structure
- * @input: input bitstream
- * @input_masks: masks for the input bitstream
- * @soft_id: software index for the filters
- * @queue: queue index to direct traffic to
- *
- * Note that the caller to this function must lock before calling, since the
- * hardware writes must be protected from one another.
- **/
-s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- struct ixgbe_atr_input_masks *input_masks,
- u16 soft_id, u8 queue)
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask)
{
- u32 fdirhash;
- u32 fdircmd;
- u32 fdirport, fdirtcpm;
- u32 fdirvlan;
- /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
- u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
- IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
+ /* mask IPv6 since it is currently not supported */
+ u32 fdirm = IXGBE_FDIRM_DIPv6;
+ u32 fdirtcpm;
- DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
-
- /*
- * Check flow_type formatting, and bail out before we touch the hardware
- * if there's a configuration issue
- */
- switch (input->formatted.flow_type) {
- case IXGBE_ATR_FLOW_TYPE_IPV4:
- /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
- fdirm |= IXGBE_FDIRM_L4P;
- case IXGBE_ATR_FLOW_TYPE_SCTPV4:
- if (input_masks->dst_port_mask || input_masks->src_port_mask) {
- DEBUGOUT(" Error on src/dst port mask\n");
- return IXGBE_ERR_CONFIG;
- }
- case IXGBE_ATR_FLOW_TYPE_TCPV4:
- case IXGBE_ATR_FLOW_TYPE_UDPV4:
- break;
- default:
- DEBUGOUT(" Error on flow type input\n");
- return IXGBE_ERR_CONFIG;
- }
+ DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
/*
* Program the relevant mask registers. If src/dst_port or src/dst_addr
@@ -1698,41 +1570,71 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
* point in time.
*/
- /* Program FDIRM */
- switch (IXGBE_NTOHS(input_masks->vlan_id_mask) & 0xEFFF) {
- case 0xEFFF:
- /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
- fdirm &= ~IXGBE_FDIRM_VLANID;
- case 0xE000:
- /* Unmask VLAN prio - bit 1 */
- fdirm &= ~IXGBE_FDIRM_VLANP;
+ /* verify bucket hash is cleared on hash generation */
+ if (input_mask->formatted.bkt_hash)
+ DEBUGOUT(" bucket hash should always be 0 in mask\n");
+
+ /* Program FDIRM and verify partial masks */
+ switch (input_mask->formatted.vm_pool & 0x7F) {
+ case 0x0:
+ fdirm |= IXGBE_FDIRM_POOL;
+ case 0x7F:
break;
- case 0x0FFF:
- /* Unmask VLAN ID - bit 0 */
- fdirm &= ~IXGBE_FDIRM_VLANID;
+ default:
+ DEBUGOUT(" Error on vm pool mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
+ case 0x0:
+ fdirm |= IXGBE_FDIRM_L4P;
+ if (input_mask->formatted.dst_port ||
+ input_mask->formatted.src_port) {
+ DEBUGOUT(" Error on src/dst port mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_L4TYPE_MASK:
break;
+ default:
+ DEBUGOUT(" Error on flow type mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
case 0x0000:
- /* do nothing, vlans already masked */
+ /* mask VLAN ID, fall through to mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ case 0x0FFF:
+ /* mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+ break;
+ case 0xE000:
+ /* mask VLAN ID only, fall through */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ case 0xEFFF:
+ /* no VLAN fields masked */
break;
default:
DEBUGOUT(" Error on VLAN mask\n");
return IXGBE_ERR_CONFIG;
}
- if (input_masks->flex_mask & 0xFFFF) {
- if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
- DEBUGOUT(" Error on flexible byte mask\n");
- return IXGBE_ERR_CONFIG;
- }
- /* Unmask Flex Bytes - bit 4 */
- fdirm &= ~IXGBE_FDIRM_FLEX;
+ switch (input_mask->formatted.flex_bytes & 0xFFFF) {
+ case 0x0000:
+ /* Mask Flex Bytes, fall through */
+ fdirm |= IXGBE_FDIRM_FLEX;
+ case 0xFFFF:
+ break;
+ default:
+ DEBUGOUT(" Error on flexible byte mask\n");
+ return IXGBE_ERR_CONFIG;
}
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
/* store the TCP/UDP port masks, bit reversed from port layout */
- fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
+ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
/* write both the same so that UDP and TCP use the same mask */
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
@@ -1740,24 +1642,34 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
/* store source and destination IP masks (big-enian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
- ~input_masks->src_ip_mask[0]);
+ ~input_mask->formatted.src_ip[0]);
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
- ~input_masks->dst_ip_mask[0]);
+ ~input_mask->formatted.dst_ip[0]);
- /* Apply masks to input data */
- input->formatted.vlan_id &= input_masks->vlan_id_mask;
- input->formatted.flex_bytes &= input_masks->flex_mask;
- input->formatted.src_port &= input_masks->src_port_mask;
- input->formatted.dst_port &= input_masks->dst_port_mask;
- input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
- input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
+ return IXGBE_SUCCESS;
+}
- /* record vlan (little-endian) and flex_bytes(big-endian) */
- fdirvlan =
- IXGBE_STORE_AS_BE16(IXGBE_NTOHS(input->formatted.flex_bytes));
- fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
- fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue)
+{
+ u32 fdirport, fdirvlan, fdirhash, fdircmd;
+
+ DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
+
+ /* currently IPv6 is not supported, must be programmed with 0 */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
+ input->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
+ input->formatted.src_ip[1]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
+ input->formatted.src_ip[2]);
+
+ /* record the source address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+ /* record the first 32 bits of the destination address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
/* record source and destination port (little-endian)*/
fdirport = IXGBE_NTOHS(input->formatted.dst_port);
@@ -1765,29 +1677,140 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
fdirport |= IXGBE_NTOHS(input->formatted.src_port);
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
- /* record the first 32 bits of the destination address (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+ /* record vlan (little-endian) and flex_bytes(big-endian) */
+ fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
- /* record the source address (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+ /* configure FDIRHASH register */
+ fdirhash = input->formatted.bkt_hash;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /*
+ * flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ IXGBE_WRITE_FLUSH(hw);
/* configure FDIRCMD register */
fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ if (queue == IXGBE_FDIR_DROP_QUEUE)
+ fdircmd |= IXGBE_FDIRCMD_DROP;
fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
- /* we only want the bucket hash so drop the upper 16 bits */
- fdirhash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY);
- fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
return IXGBE_SUCCESS;
}
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id)
+{
+ u32 fdirhash;
+ u32 fdircmd = 0;
+ u32 retry_count;
+ s32 err = IXGBE_SUCCESS;
+
+ /* configure FDIRHASH register */
+ fdirhash = input->formatted.bkt_hash;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /* flush hash to HW */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Query if filter is present */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
+
+ for (retry_count = 10; retry_count; retry_count--) {
+ /* allow 10us for query to process */
+ usec_delay(10);
+ /* verify query completed successfully */
+ fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+ if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+ break;
+ }
+
+ if (!retry_count)
+ err = IXGBE_ERR_FDIR_REINIT_FAILED;
+
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
+ * @hw: pointer to hardware structure
+ * @input: input bitstream
+ * @input_mask: mask for the input bitstream
+ * @soft_id: software index for the filters
+ * @queue: queue index to direct traffic to
+ *
+ * Note that the caller to this function must lock before calling, since the
+ * hardware writes must be protected from one another.
+ **/
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ union ixgbe_atr_input *input_mask,
+ u16 soft_id, u8 queue)
+{
+ s32 err = IXGBE_ERR_CONFIG;
+
+ DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
+
+ /*
+ * Check flow_type formatting, and bail out before we touch the hardware
+ * if there's a configuration issue
+ */
+ switch (input->formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_IPV4:
+ input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
+ if (input->formatted.dst_port || input->formatted.src_port) {
+ DEBUGOUT(" Error on src/dst port\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ break;
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ if (input->formatted.dst_port || input->formatted.src_port) {
+ DEBUGOUT(" Error on src/dst port\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+ break;
+ default:
+ DEBUGOUT(" Error on flow type input\n");
+ return err;
+ }
+
+ /* program input mask into the HW */
+ err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
+ if (err)
+ return err;
+
+ /* apply mask and compute/store hash */
+ ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
+
+ /* program filters to filter memory */
+ return ixgbe_fdir_write_perfect_filter_82599(hw, input,
+ soft_id, queue);
+}
+
/**
* ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
* @hw: pointer to hardware structure
@@ -1803,7 +1826,7 @@ s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
DEBUGFUNC("ixgbe_read_analog_reg8_82599");
IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
- (reg << 8));
+ (reg << 8));
IXGBE_WRITE_FLUSH(hw);
usec_delay(10);
core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
@@ -1835,18 +1858,18 @@ s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
}
/**
- * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx
+ * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
* Starts the hardware using the generic start_hw function
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
-s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
- DEBUGFUNC("ixgbe_start_hw_rev_1__82599");
+ DEBUGFUNC("ixgbe_start_hw_82599");
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val != IXGBE_SUCCESS)
@@ -1886,7 +1909,7 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
goto out;
else
- status = ixgbe_identify_sfp_module_generic(hw);
+ status = ixgbe_identify_module_generic(hw);
}
/* Set PHY type none if no PHY detected */
@@ -1927,7 +1950,6 @@ u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
switch (hw->phy.type) {
case ixgbe_phy_tn:
- case ixgbe_phy_aq:
case ixgbe_phy_cu_unknown:
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
@@ -2033,9 +2055,6 @@ out:
**/
s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
{
-#define IXGBE_MAX_SECRX_POLL 30
- int i;
- int secrxreg;
DEBUGFUNC("ixgbe_enable_rx_dma_82599");
@@ -2045,28 +2064,12 @@ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
* the Rx DMA unit. Therefore, make sure the security engine is
* completely disabled prior to enabling the Rx unit.
*/
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
- secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
- IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
- for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
- if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
- break;
- else
- /* Use interrupt-safe sleep just in case */
- usec_delay(10);
- }
- /* For informational purposes only */
- if (i >= IXGBE_MAX_SECRX_POLL)
- DEBUGOUT("Rx unit being enabled before security "
- "path fully disabled. Continuing with init.\n");
+ hw->mac.ops.disable_sec_rx_path(hw);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
- secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
- IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
- IXGBE_WRITE_FLUSH(hw);
+
+ hw->mac.ops.enable_sec_rx_path(hw);
return IXGBE_SUCCESS;
}
@@ -2103,16 +2106,15 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
/* get the offset to the Pass Through Patch Configuration block */
hw->eeprom.ops.read(hw, (fw_offset +
- IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
- &fw_ptp_cfg_offset);
+ IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
+ &fw_ptp_cfg_offset);
if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
goto fw_version_out;
/* get the firmware version */
hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
- IXGBE_FW_PATCH_VERSION_4),
- &fw_version);
+ IXGBE_FW_PATCH_VERSION_4), &fw_version);
if (fw_version > 0x5)
status = IXGBE_SUCCESS;
@@ -2145,8 +2147,8 @@ bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
/* get the offset to the LESM Parameters block */
status = hw->eeprom.ops.read(hw, (fw_offset +
- IXGBE_FW_LESM_PARAMETERS_PTR),
- &fw_lesm_param_offset);
+ IXGBE_FW_LESM_PARAMETERS_PTR),
+ &fw_lesm_param_offset);
if ((status != IXGBE_SUCCESS) ||
(fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
@@ -2154,8 +2156,8 @@ bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
/* get the lesm state word */
status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
- IXGBE_FW_LESM_STATE_1),
- &fw_lesm_state);
+ IXGBE_FW_LESM_STATE_1),
+ &fw_lesm_state);
if ((status == IXGBE_SUCCESS) &&
(fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
@@ -2165,4 +2167,69 @@ out:
return lesm_enabled;
}
+/**
+ * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
+ * fastest available method
+ *
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Retrieves 16 bit word(s) read from EEPROM
+ **/
+static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
+
+ /*
+ * If EEPROM is detected and can be addressed using 14 bits,
+ * use EERD otherwise use bit bang
+ */
+ if ((eeprom->type == ixgbe_eeprom_spi) &&
+ (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
+ ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
+ data);
+ else
+ ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
+ words,
+ data);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_read_eeprom_82599 - Read EEPROM word using
+ * fastest available method
+ *
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM
+ **/
+static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+ u16 offset, u16 *data)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ DEBUGFUNC("ixgbe_read_eeprom_82599");
+
+ /*
+ * If EEPROM is detected and can be addressed using 14 bits,
+ * use EERD otherwise use bit bang
+ */
+ if ((eeprom->type == ixgbe_eeprom_spi) &&
+ (offset <= IXGBE_EERD_MAX_ADDR))
+ ret_val = ixgbe_read_eerd_generic(hw, offset, data);
+ else
+ ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
+
+ return ret_val;
+}
diff --git a/sys/dev/ixgbe/ixgbe_82599.h b/sys/dev/ixgbe/ixgbe_82599.h
new file mode 100755
index 0000000..dca39b7
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_82599.h
@@ -0,0 +1,65 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2012, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXGBE_82599_H_
+#define _IXGBE_82599_H_
+
+s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg, bool autoneg_wait_to_complete);
+s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
+void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+#endif /* _IXGBE_82599_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c
index 4b632d2..b8b94b8 100644
--- a/sys/dev/ixgbe/ixgbe_api.c
+++ b/sys/dev/ixgbe/ixgbe_api.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -35,10 +35,6 @@
#include "ixgbe_api.h"
#include "ixgbe_common.h"
-extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
-
/**
* ixgbe_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
@@ -70,8 +66,12 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
status = ixgbe_init_ops_82599(hw);
break;
case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
status = ixgbe_init_ops_vf(hw);
break;
+ case ixgbe_mac_X540:
+ status = ixgbe_init_ops_X540(hw);
+ break;
default:
status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
break;
@@ -113,9 +113,12 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_KX4_MEZZ:
case IXGBE_DEV_ID_82599_XAUI_LOM:
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_KR:
case IXGBE_DEV_ID_82599_SFP:
case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
case IXGBE_DEV_ID_82599_SFP_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599EN_SFP:
case IXGBE_DEV_ID_82599_CX4:
case IXGBE_DEV_ID_82599_T3_LOM:
hw->mac.type = ixgbe_mac_82599EB;
@@ -123,6 +126,12 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_VF:
hw->mac.type = ixgbe_mac_82599_vf;
break;
+ case IXGBE_DEV_ID_X540_VF:
+ hw->mac.type = ixgbe_mac_X540_vf;
+ break;
+ case IXGBE_DEV_ID_X540T:
+ hw->mac.type = ixgbe_mac_X540;
+ break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
break;
@@ -132,7 +141,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
}
DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
- hw->mac.type, ret_val);
+ hw->mac.type, ret_val);
return ret_val;
}
@@ -145,7 +154,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
s32 ixgbe_init_hw(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -158,7 +167,7 @@ s32 ixgbe_init_hw(struct ixgbe_hw *hw)
s32 ixgbe_reset_hw(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -174,7 +183,7 @@ s32 ixgbe_reset_hw(struct ixgbe_hw *hw)
s32 ixgbe_start_hw(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -201,7 +210,7 @@ void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw)
s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -213,7 +222,7 @@ s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
- ixgbe_media_type_unknown);
+ ixgbe_media_type_unknown);
}
/**
@@ -229,7 +238,7 @@ enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
{
return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr,
- (hw, mac_addr), IXGBE_NOT_IMPLEMENTED);
+ (hw, mac_addr), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -243,7 +252,7 @@ s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
{
return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr,
- (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
+ (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -256,7 +265,7 @@ s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
{
return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr,
- (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
+ (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -269,7 +278,7 @@ s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps)
{
return ixgbe_call_func(hw, hw->mac.ops.get_device_caps,
- (hw, device_caps), IXGBE_NOT_IMPLEMENTED);
+ (hw, device_caps), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -282,11 +291,11 @@ s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps)
* block to check the support for the alternative WWNN/WWPN prefix support.
**/
s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
+ u16 *wwpn_prefix)
{
return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix,
- (hw, wwnn_prefix, wwpn_prefix),
- IXGBE_NOT_IMPLEMENTED);
+ (hw, wwnn_prefix, wwpn_prefix),
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -299,8 +308,8 @@ s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs)
{
return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status,
- (hw, bs),
- IXGBE_NOT_IMPLEMENTED);
+ (hw, bs),
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -312,7 +321,7 @@ s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs)
s32 ixgbe_get_bus_info(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -349,7 +358,7 @@ u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw)
s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -366,19 +375,6 @@ s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
}
/**
- * ixgbe_read_pba_length - Reads part number string length from EEPROM
- * @hw: pointer to hardware structure
- * @pba_num_size: part number string buffer length
- *
- * Reads the part number length from the EEPROM.
- * Returns expected buffer size in pba_num_size.
- **/
-s32 ixgbe_read_pba_length(struct ixgbe_hw *hw, u32 *pba_num_size)
-{
- return ixgbe_read_pba_length_generic(hw, pba_num_size);
-}
-
-/**
* ixgbe_read_pba_num - Reads part number from EEPROM
* @hw: pointer to hardware structure
* @pba_num: stores the part number from the EEPROM
@@ -402,7 +398,7 @@ s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
if (hw->phy.type == ixgbe_phy_unknown) {
status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
return status;
@@ -423,7 +419,7 @@ s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
if (status == IXGBE_SUCCESS) {
status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
return status;
}
@@ -438,8 +434,8 @@ s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version)
s32 status = IXGBE_SUCCESS;
status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version,
- (hw, firmware_version),
- IXGBE_NOT_IMPLEMENTED);
+ (hw, firmware_version),
+ IXGBE_NOT_IMPLEMENTED);
return status;
}
@@ -452,13 +448,13 @@ s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version)
* Reads a value from a specified PHY register
**/
s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 *phy_data)
+ u16 *phy_data)
{
if (hw->phy.id == 0)
ixgbe_identify_phy(hw);
return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr,
- device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -470,13 +466,13 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* Writes a value to specified PHY register
**/
s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 phy_data)
+ u16 phy_data)
{
if (hw->phy.id == 0)
ixgbe_identify_phy(hw);
return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr,
- device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -488,7 +484,7 @@ s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -499,10 +495,10 @@ s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
* the PHY.
**/
s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up)
+ bool *link_up)
{
return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed,
- link_up), IXGBE_NOT_IMPLEMENTED);
+ link_up), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -514,12 +510,12 @@ s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* Sets the auto advertised capabilities
**/
s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete)
+ bool autoneg,
+ bool autoneg_wait_to_complete)
{
return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
- autoneg, autoneg_wait_to_complete),
- IXGBE_NOT_IMPLEMENTED);
+ autoneg, autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -529,11 +525,11 @@ s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* Reads the links register to determine if link is up and the current speed
**/
s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete)
+ bool *link_up, bool link_up_wait_to_complete)
{
return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed,
- link_up, link_up_wait_to_complete),
- IXGBE_NOT_IMPLEMENTED);
+ link_up, link_up_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -584,12 +580,12 @@ void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
* Performs autonegotiation if needed.
**/
s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete)
+ bool autoneg,
+ bool autoneg_wait_to_complete)
{
return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed,
- autoneg, autoneg_wait_to_complete),
- IXGBE_NOT_IMPLEMENTED);
+ autoneg, autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -599,10 +595,10 @@ s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* Determines the link capabilities of the current configuration.
**/
s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *autoneg)
+ bool *autoneg)
{
return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw,
- speed, autoneg), IXGBE_NOT_IMPLEMENTED);
+ speed, autoneg), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -615,7 +611,7 @@ s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
{
return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -628,7 +624,7 @@ s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
{
return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -641,7 +637,7 @@ s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
{
return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -653,7 +649,7 @@ s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index)
{
return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -666,7 +662,7 @@ s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index)
s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
@@ -683,7 +679,26 @@ s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw)
s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data)
{
return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_eeprom_buffer - Write word(s) to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word(s) to be written to the EEPROM
+ * @words: number of words
+ *
+ * Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not
+ * called after this function, the EEPROM will most likely contain an
+ * invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer,
+ (hw, offset, words, data),
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -697,7 +712,24 @@ s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data)
s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_eeprom_buffer - Read word(s) from EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit word(s) from EEPROM
+ * @words: number of words
+ *
+ * Reads 16 bit word(s) from EEPROM
+ **/
+s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer,
+ (hw, offset, words, data),
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -710,7 +742,7 @@ s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
{
return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum,
- (hw, checksum_val), IXGBE_NOT_IMPLEMENTED);
+ (hw, checksum_val), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -720,7 +752,7 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -735,7 +767,7 @@ s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
{
return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr,
- (hw, addr, vmdq),
+ (hw, addr, vmdq),
IXGBE_NOT_IMPLEMENTED);
}
@@ -750,10 +782,10 @@ s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
* Puts an ethernet address into a receive address register.
**/
s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr)
+ u32 enable_addr)
{
return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq,
- enable_addr), IXGBE_NOT_IMPLEMENTED);
+ enable_addr), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -766,7 +798,7 @@ s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
{
return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -778,7 +810,7 @@ s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -790,7 +822,7 @@ s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -804,7 +836,7 @@ s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -828,11 +860,11 @@ u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw)
* first secondary addresses, and falls back to promiscuous mode as needed.
**/
s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
- u32 addr_count, ixgbe_mc_addr_itr func)
+ u32 addr_count, ixgbe_mc_addr_itr func)
{
return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw,
- addr_list, addr_count, func),
- IXGBE_NOT_IMPLEMENTED);
+ addr_list, addr_count, func),
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -848,11 +880,12 @@ s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
* multicast table.
**/
s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, ixgbe_mc_addr_itr func)
+ u32 mc_addr_count, ixgbe_mc_addr_itr func,
+ bool clear)
{
return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw,
- mc_addr_list, mc_addr_count, func),
- IXGBE_NOT_IMPLEMENTED);
+ mc_addr_list, mc_addr_count, func, clear),
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -864,7 +897,7 @@ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
s32 ixgbe_enable_mc(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -876,7 +909,7 @@ s32 ixgbe_enable_mc(struct ixgbe_hw *hw)
s32 ixgbe_disable_mc(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -888,7 +921,7 @@ s32 ixgbe_disable_mc(struct ixgbe_hw *hw)
s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -903,7 +936,25 @@ s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
{
return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
- vlan_on), IXGBE_NOT_IMPLEMENTED);
+ vlan_on), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_vlvf - Set VLAN Pool Filter
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ * @vfta_changed: pointer to boolean flag which indicates whether VFTA
+ * should be changed
+ *
+ * Turn on/off specified bit in VLVF table.
+ **/
+s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
+ bool *vfta_changed)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind,
+ vlan_on, vfta_changed), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -916,9 +967,25 @@ s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
{
return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw, packetbuf_num),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_fw_drv_ver - Try to send the driver version number FW
+ * @hw: pointer to hardware structure
+ * @maj: driver major number to be sent to firmware
+ * @min: driver minor number to be sent to firmware
+ * @build: driver build number to be sent to firmware
+ * @ver: driver version number to be sent to firmware
+ **/
+s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+ u8 ver)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min,
+ build, ver), IXGBE_NOT_IMPLEMENTED);
}
+
/**
* ixgbe_read_analog_reg8 - Reads 8 bit analog register
* @hw: pointer to hardware structure
@@ -930,7 +997,7 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
{
return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg,
- val), IXGBE_NOT_IMPLEMENTED);
+ val), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -944,7 +1011,7 @@ s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
{
return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg,
- val), IXGBE_NOT_IMPLEMENTED);
+ val), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -957,7 +1024,7 @@ s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw),
- IXGBE_NOT_IMPLEMENTED);
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -969,10 +1036,10 @@ s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
* Performs byte read operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
- u8 *data)
+ u8 *data)
{
return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset,
- dev_addr, data), IXGBE_NOT_IMPLEMENTED);
+ dev_addr, data), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -985,10 +1052,10 @@ s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
* at a specified device address.
**/
s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
- u8 data)
+ u8 data)
{
return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset,
- dev_addr, data), IXGBE_NOT_IMPLEMENTED);
+ dev_addr, data), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -1000,11 +1067,11 @@ s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
* Performs byte write operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw,
- u8 byte_offset, u8 eeprom_data)
+ u8 byte_offset, u8 eeprom_data)
{
return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom,
- (hw, byte_offset, eeprom_data),
- IXGBE_NOT_IMPLEMENTED);
+ (hw, byte_offset, eeprom_data),
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -1018,8 +1085,8 @@ s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw,
s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
{
return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom,
- (hw, byte_offset, eeprom_data),
- IXGBE_NOT_IMPLEMENTED);
+ (hw, byte_offset, eeprom_data),
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -1031,7 +1098,7 @@ s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
- (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
+ (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
}
/**
@@ -1044,7 +1111,31 @@ u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval)
{
return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma,
- (hw, regval), IXGBE_NOT_IMPLEMENTED);
+ (hw, regval), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_disable_sec_rx_path - Stops the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the receive data path.
+ **/
+s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path,
+ (hw), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_enable_sec_rx_path - Enables the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the receive data path.
+ **/
+s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path,
+ (hw), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -1058,7 +1149,7 @@ s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval)
s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
{
return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync,
- (hw, mask), IXGBE_NOT_IMPLEMENTED);
+ (hw, mask), IXGBE_NOT_IMPLEMENTED);
}
/**
diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h
index 6460f2a..7c612e5 100644
--- a/sys/dev/ixgbe/ixgbe_api.h
+++ b/sys/dev/ixgbe/ixgbe_api.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -39,6 +39,11 @@
s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
+
s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
s32 ixgbe_init_hw(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
@@ -53,32 +58,31 @@ u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num);
s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
-s32 ixgbe_read_pba_length(struct ixgbe_hw *hw, u32 *pba_num_size);
s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 *phy_data);
+ u16 *phy_data);
s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 phy_data);
+ u16 phy_data);
s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up);
+ ixgbe_link_speed *speed,
+ bool *link_up);
s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg, bool autoneg_wait_to_complete);
+ bool autoneg, bool autoneg_wait_to_complete);
s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete);
+ bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *autoneg);
+ bool *autoneg);
s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
@@ -86,57 +90,78 @@ s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+
s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr);
+ u32 enable_addr);
s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
- u32 addr_count, ixgbe_mc_addr_itr func);
+ u32 addr_count, ixgbe_mc_addr_itr func);
s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, ixgbe_mc_addr_itr func);
+ u32 mc_addr_count, ixgbe_mc_addr_itr func,
+ bool clear);
void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq);
s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
-
+ u32 vind, bool vlan_on);
+s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool *vfta_changed);
s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num);
-
+s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+ u8 ver);
void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
- u16 *firmware_version);
+ u16 *firmware_version);
s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
+s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
- u8 queue);
+ u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id);
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- struct ixgbe_atr_input_masks *masks,
- u16 soft_id,
- u8 queue);
-u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *input, u32 key);
+ union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask,
+ u16 soft_id,
+ u8 queue);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask);
+u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common);
s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
- u8 *data);
+ u8 *data);
s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
- u8 data);
+ u8 data);
s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
@@ -144,8 +169,7 @@ s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix);
+ u16 *wwpn_prefix);
s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
-
#endif /* _IXGBE_API_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index da7d95c..304b6d1 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -42,7 +42,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
- u16 count);
+ u16 count);
static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
@@ -50,15 +50,20 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
- u16 *san_mac_offset);
+ u16 *san_mac_offset);
static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
-
-s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
+static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
+static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+ u16 offset);
/**
* ixgbe_init_ops_generic - Inits function ptrs
@@ -77,13 +82,18 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
/* EEPROM */
eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
- if (eec & (1 << 8))
+ if (eec & IXGBE_EEC_PRES) {
eeprom->ops.read = &ixgbe_read_eerd_generic;
- else
+ eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
+ } else {
eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
+ eeprom->ops.read_buffer =
+ &ixgbe_read_eeprom_buffer_bit_bang_generic;
+ }
eeprom->ops.write = &ixgbe_write_eeprom_generic;
+ eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
eeprom->ops.validate_checksum =
- &ixgbe_validate_eeprom_checksum_generic;
+ &ixgbe_validate_eeprom_checksum_generic;
eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
@@ -121,6 +131,7 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
mac->ops.disable_mc = &ixgbe_disable_mc_generic;
mac->ops.clear_vfta = NULL;
mac->ops.set_vfta = NULL;
+ mac->ops.set_vlvf = NULL;
mac->ops.init_uta_tables = NULL;
/* Flow Control */
@@ -207,7 +218,7 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
@@ -302,8 +313,9 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
IXGBE_READ_REG(hw, IXGBE_GORCH);
IXGBE_READ_REG(hw, IXGBE_GOTCL);
IXGBE_READ_REG(hw, IXGBE_GOTCH);
- for (i = 0; i < 8; i++)
- IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_RNBC(i));
IXGBE_READ_REG(hw, IXGBE_RUC);
IXGBE_READ_REG(hw, IXGBE_RFC);
IXGBE_READ_REG(hw, IXGBE_ROC);
@@ -338,6 +350,19 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
+ if (hw->mac.type == ixgbe_mac_X540) {
+ if (hw->phy.id == 0)
+ ixgbe_identify_phy(hw);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ }
+
return IXGBE_SUCCESS;
}
@@ -350,7 +375,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
* Reads the part number string from the EEPROM.
**/
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
- u32 pba_num_size)
+ u32 pba_num_size)
{
s32 ret_val;
u16 data;
@@ -453,66 +478,6 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
}
/**
- * ixgbe_read_pba_length_generic - Reads part number length from EEPROM
- * @hw: pointer to hardware structure
- * @pba_num_size: part number string buffer length
- *
- * Reads the part number length from the EEPROM.
- * Returns expected buffer size in pba_num_size
- **/
-s32 ixgbe_read_pba_length_generic(struct ixgbe_hw *hw, u32 *pba_num_size)
-{
- s32 ret_val;
- u16 data;
- u16 pba_ptr;
- u16 length;
-
- DEBUGFUNC("ixgbe_read_pba_length_generic");
-
- if (pba_num_size == NULL) {
- DEBUGOUT("PBA buffer size was null\n");
- return IXGBE_ERR_INVALID_ARGUMENT;
- }
-
- ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- /* if data is not ptr guard the PBA must be in legacy format */
- if (data != IXGBE_PBANUM_PTR_GUARD) {
- *pba_num_size = 11;
- return IXGBE_SUCCESS;
- }
-
- ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- if (length == 0xFFFF || length == 0) {
- DEBUGOUT("NVM PBA number section invalid length\n");
- return IXGBE_ERR_PBA_SECTION;
- }
-
- /*
- * Convert from length in u16 values to u8 chars, add 1 for NULL,
- * and subtract 2 because length field is included in length.
- */
- *pba_num_size = ((u32)length * 2) - 1;
-
- return IXGBE_SUCCESS;
-}
-
-/**
* ixgbe_read_pba_num_generic - Reads part number from EEPROM
* @hw: pointer to hardware structure
* @pba_num: stores the part number from the EEPROM
@@ -663,7 +628,6 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
**/
s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
{
- u32 number_of_queues;
u32 reg_val;
u16 i;
@@ -676,35 +640,35 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
hw->adapter_stopped = TRUE;
/* Disable the receive unit */
- reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- reg_val &= ~(IXGBE_RXCTRL_RXEN);
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
- IXGBE_WRITE_FLUSH(hw);
- msec_delay(2);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
- /* Clear interrupt mask to stop from interrupts being generated */
+ /* Clear interrupt mask to stop interrupts from being generated */
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
- /* Clear any pending interrupts */
+ /* Clear any pending interrupts, flush previous writes */
IXGBE_READ_REG(hw, IXGBE_EICR);
/* Disable the transmit unit. Each queue must be disabled. */
- number_of_queues = hw->mac.max_tx_queues;
- for (i = 0; i < number_of_queues; i++) {
- reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
- if (reg_val & IXGBE_TXDCTL_ENABLE) {
- reg_val &= ~IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val);
- }
+ for (i = 0; i < hw->mac.max_tx_queues; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ reg_val |= IXGBE_RXDCTL_SWFLSH;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
}
+ /* flush all queues disables */
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(2);
+
/*
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests
*/
- ixgbe_disable_pcie_master(hw);
-
- return IXGBE_SUCCESS;
+ return ixgbe_disable_pcie_master(hw);
}
/**
@@ -767,6 +731,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
/* Set default semaphore delay to 10ms which is a well
* tested value */
eeprom->semaphore_delay = 10;
+ /* Clear EEPROM page size, it will be initialized as needed */
+ eeprom->word_page_size = 0;
/*
* Check for EEPROM present first.
@@ -781,9 +747,9 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
* change if a future EEPROM is not SPI.
*/
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT);
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
}
if (eec & IXGBE_EEC_ADDR_SIZE)
@@ -791,36 +757,90 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
else
eeprom->address_bits = 8;
DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
- "%d\n", eeprom->type, eeprom->word_size,
- eeprom->address_bits);
+ "%d\n", eeprom->type, eeprom->word_size,
+ eeprom->address_bits);
}
return IXGBE_SUCCESS;
}
/**
- * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
+ * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
* @hw: pointer to hardware structure
- * @offset: offset within the EEPROM to be written to
- * @data: 16 bit word to be written to the EEPROM
+ * @offset: offset within the EEPROM to write
+ * @words: number of word(s)
+ * @data: 16 bit word(s) to write to EEPROM
*
- * If ixgbe_eeprom_update_checksum is not called after this function, the
- * EEPROM will most likely contain an invalid checksum.
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
{
- s32 status;
- u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
+ s32 status = IXGBE_SUCCESS;
+ u16 i, count;
- DEBUGFUNC("ixgbe_write_eeprom_generic");
+ DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
hw->eeprom.ops.init_params(hw);
- if (offset >= hw->eeprom.word_size) {
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset + words > hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
+ /*
+ * The EEPROM page size cannot be queried from the chip. We do lazy
+ * initialization. It is worth to do that when we write large buffer.
+ */
+ if ((hw->eeprom.word_page_size == 0) &&
+ (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
+ ixgbe_detect_eeprom_page_size_generic(hw, offset);
+
+ /*
+ * We cannot hold synchronization semaphores for too long
+ * to avoid other entity starvation. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+ count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
+ count, &data[i]);
+
+ if (status != IXGBE_SUCCESS)
+ break;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of word(s)
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+ u16 word;
+ u16 page_size;
+ u16 i;
+ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
+
+ DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
+
/* Prepare the EEPROM for writing */
status = ixgbe_acquire_eeprom(hw);
@@ -832,56 +852,71 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
}
if (status == IXGBE_SUCCESS) {
- ixgbe_standby_eeprom(hw);
+ for (i = 0; i < words; i++) {
+ ixgbe_standby_eeprom(hw);
- /* Send the WRITE ENABLE command (8 bit opcode ) */
- ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI,
- IXGBE_EEPROM_OPCODE_BITS);
-
- ixgbe_standby_eeprom(hw);
+ /* Send the WRITE ENABLE command (8 bit opcode ) */
+ ixgbe_shift_out_eeprom_bits(hw,
+ IXGBE_EEPROM_WREN_OPCODE_SPI,
+ IXGBE_EEPROM_OPCODE_BITS);
- /*
- * Some SPI eeproms use the 8th address bit embedded in the
- * opcode
- */
- if ((hw->eeprom.address_bits == 8) && (offset >= 128))
- write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
-
- /* Send the Write command (8-bit opcode + addr) */
- ixgbe_shift_out_eeprom_bits(hw, write_opcode,
- IXGBE_EEPROM_OPCODE_BITS);
- ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
- hw->eeprom.address_bits);
-
- /* Send the data */
- data = (data >> 8) | (data << 8);
- ixgbe_shift_out_eeprom_bits(hw, data, 16);
- ixgbe_standby_eeprom(hw);
+ ixgbe_standby_eeprom(hw);
+ /*
+ * Some SPI eeproms use the 8th address bit embedded
+ * in the opcode
+ */
+ if ((hw->eeprom.address_bits == 8) &&
+ ((offset + i) >= 128))
+ write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, write_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+ hw->eeprom.address_bits);
+
+ page_size = hw->eeprom.word_page_size;
+
+ /* Send the data in burst via SPI*/
+ do {
+ word = data[i];
+ word = (word >> 8) | (word << 8);
+ ixgbe_shift_out_eeprom_bits(hw, word, 16);
+
+ if (page_size == 0)
+ break;
+
+ /* do not wrap around page */
+ if (((offset + i) & (page_size - 1)) ==
+ (page_size - 1))
+ break;
+ } while (++i < words);
+
+ ixgbe_standby_eeprom(hw);
+ msec_delay(10);
+ }
/* Done with writing - release the EEPROM */
ixgbe_release_eeprom(hw);
}
-out:
return status;
}
/**
- * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
+ * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
* @hw: pointer to hardware structure
- * @offset: offset within the EEPROM to be read
- * @data: read 16 bit value from EEPROM
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word to be written to the EEPROM
*
- * Reads 16 bit value from EEPROM through bit-bang method
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
**/
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
- u16 *data)
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
s32 status;
- u16 word_in;
- u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
- DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
+ DEBUGFUNC("ixgbe_write_eeprom_generic");
hw->eeprom.ops.init_params(hw);
@@ -890,6 +925,80 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
goto out;
}
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit words(s) from EEPROM
+ * @words: number of word(s)
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset + words > hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ /*
+ * We cannot hold synchronization semaphores for too long
+ * to avoid other entity starvation. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+ count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
+ count, &data[i]);
+
+ if (status != IXGBE_SUCCESS)
+ break;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @words: number of word(s)
+ * @data: read 16 bit word(s) from EEPROM
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+ u16 word_in;
+ u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
+
/* Prepare the EEPROM for reading */
status = ixgbe_acquire_eeprom(hw);
@@ -901,108 +1010,216 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
}
if (status == IXGBE_SUCCESS) {
- ixgbe_standby_eeprom(hw);
+ for (i = 0; i < words; i++) {
+ ixgbe_standby_eeprom(hw);
+ /*
+ * Some SPI eeproms use the 8th address bit embedded
+ * in the opcode
+ */
+ if ((hw->eeprom.address_bits == 8) &&
+ ((offset + i) >= 128))
+ read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, read_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+ hw->eeprom.address_bits);
+
+ /* Read the data. */
+ word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
- /*
- * Some SPI eeproms use the 8th address bit embedded in the
- * opcode
- */
- if ((hw->eeprom.address_bits == 8) && (offset >= 128))
- read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+ /* End this read operation */
+ ixgbe_release_eeprom(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit value from EEPROM
+ *
+ * Reads 16 bit value from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 *data)
+{
+ s32 status;
- /* Send the READ command (opcode + addr) */
- ixgbe_shift_out_eeprom_bits(hw, read_opcode,
- IXGBE_EEPROM_OPCODE_BITS);
- ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
- hw->eeprom.address_bits);
+ DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
- /* Read the data. */
- word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
- *data = (word_in >> 8) | (word_in << 8);
+ hw->eeprom.ops.init_params(hw);
- /* End this read operation */
- ixgbe_release_eeprom(hw);
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
}
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+
out:
return status;
}
/**
- * ixgbe_read_eerd_generic - Read EEPROM word using EERD
+ * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
* @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
- * @data: word read from the EEPROM
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of word(s)
+ * @data: 16 bit word(s) from the EEPROM
*
- * Reads a 16 bit word from the EEPROM using the EERD register.
+ * Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
{
u32 eerd;
- s32 status;
+ s32 status = IXGBE_SUCCESS;
+ u32 i;
- DEBUGFUNC("ixgbe_read_eerd_generic");
+ DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
hw->eeprom.ops.init_params(hw);
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
if (offset >= hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
- eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
- IXGBE_EEPROM_RW_REG_START;
+ for (i = 0; i < words; i++) {
+ eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
+ IXGBE_EEPROM_RW_REG_START;
- IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
- status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
+ IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
- if (status == IXGBE_SUCCESS)
- *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
- IXGBE_EEPROM_RW_REG_DATA);
- else
- DEBUGOUT("Eeprom read timed out\n");
+ if (status == IXGBE_SUCCESS) {
+ data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
+ IXGBE_EEPROM_RW_REG_DATA);
+ } else {
+ DEBUGOUT("Eeprom read timed out\n");
+ goto out;
+ }
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be used as a scratch pad
+ *
+ * Discover EEPROM page size by writing marching data at given offset.
+ * This function is called only when we are writing a new large buffer
+ * at given offset so the data would be overwritten anyway.
+ **/
+static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+ u16 offset)
+{
+ u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
+ s32 status = IXGBE_SUCCESS;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
+
+ for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
+ data[i] = i;
+
+ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
+ IXGBE_EEPROM_PAGE_SIZE_MAX, data);
+ hw->eeprom.word_page_size = 0;
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+ /*
+ * When writing in burst more than the actual page size
+ * EEPROM address wraps around current page.
+ */
+ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
+
+ DEBUGOUT1("Detected EEPROM page size = %d words.",
+ hw->eeprom.word_page_size);
out:
return status;
}
/**
- * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
+ * ixgbe_read_eerd_generic - Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
+}
+
+/**
+ * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
- * @data: word write to the EEPROM
+ * @words: number of word(s)
+ * @data: word(s) write to the EEPROM
*
- * Write a 16 bit word to the EEPROM using the EEWR register.
+ * Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
{
u32 eewr;
- s32 status;
+ s32 status = IXGBE_SUCCESS;
+ u16 i;
DEBUGFUNC("ixgbe_write_eewr_generic");
hw->eeprom.ops.init_params(hw);
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
if (offset >= hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
- eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
- (data << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START;
+ for (i = 0; i < words; i++) {
+ eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+ (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
+ IXGBE_EEPROM_RW_REG_START;
- status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
- if (status != IXGBE_SUCCESS) {
- DEBUGOUT("Eeprom write EEWR timed out\n");
- goto out;
- }
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Eeprom write EEWR timed out\n");
+ goto out;
+ }
- IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+ IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
- status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
- if (status != IXGBE_SUCCESS) {
- DEBUGOUT("Eeprom write EEWR timed out\n");
- goto out;
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Eeprom write EEWR timed out\n");
+ goto out;
+ }
}
out:
@@ -1010,6 +1227,19 @@ out:
}
/**
+ * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
+}
+
+/**
* ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
* @hw: pointer to hardware structure
* @ee_reg: EEPROM flag for polling
@@ -1055,7 +1285,8 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_acquire_eeprom");
- if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != IXGBE_SUCCESS)
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
+ != IXGBE_SUCCESS)
status = IXGBE_ERR_SWFW_SYNC;
if (status == IXGBE_SUCCESS) {
@@ -1078,7 +1309,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
DEBUGOUT("Could not acquire EEPROM grant\n");
- ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
status = IXGBE_ERR_EEPROM;
}
@@ -1124,6 +1355,28 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
usec_delay(50);
}
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
+ "not granted.\n");
+ /*
+ * this release is particularly important because our attempts
+ * above to get the semaphore may have succeeded, and if there
+ * was a timeout, we should unconditionally clear the semaphore
+ * bits to free the driver to make progress
+ */
+ ixgbe_release_eeprom_semaphore(hw);
+
+ usec_delay(50);
+ /*
+ * one last try
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI))
+ status = IXGBE_SUCCESS;
+ }
+
/* Now get the semaphore between SW/FW through the SWESMBI bit */
if (status == IXGBE_SUCCESS) {
for (i = 0; i < timeout; i++) {
@@ -1150,13 +1403,13 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
*/
if (i >= timeout) {
DEBUGOUT("SWESMBI Software EEPROM semaphore "
- "not granted.\n");
+ "not granted.\n");
ixgbe_release_eeprom_semaphore(hw);
status = IXGBE_ERR_EEPROM;
}
} else {
DEBUGOUT("Software semaphore SMBI between device drivers "
- "not granted.\n");
+ "not granted.\n");
}
return status;
@@ -1202,7 +1455,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
*/
for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
- IXGBE_EEPROM_OPCODE_BITS);
+ IXGBE_EEPROM_OPCODE_BITS);
spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
break;
@@ -1253,7 +1506,7 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
* @count: number of bits to shift out
**/
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
- u16 count)
+ u16 count)
{
u32 eec;
u32 mask;
@@ -1404,7 +1657,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
eec &= ~IXGBE_EEC_REQ;
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
- ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
/* Delay before attempt to obtain semaphore again to allow FW access */
msec_delay(hw->eeprom.semaphore_delay);
@@ -1465,7 +1718,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
* caller does not need checksum_val, the value can be NULL.
**/
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
- u16 *checksum_val)
+ u16 *checksum_val)
{
s32 status;
u16 checksum;
@@ -1523,7 +1776,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
if (status == IXGBE_SUCCESS) {
checksum = hw->eeprom.ops.calc_checksum(hw);
status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
- checksum);
+ checksum);
} else {
DEBUGOUT("EEPROM read failed\n");
}
@@ -1553,7 +1806,7 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr)
status = IXGBE_ERR_INVALID_MAC_ADDR;
/* Reject the zero address */
} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
- mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
DEBUGOUT("MAC address is all zeros\n");
status = IXGBE_ERR_INVALID_MAC_ADDR;
}
@@ -1571,7 +1824,7 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr)
* Puts an ethernet address into a receive address register.
**/
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr)
+ u32 enable_addr)
{
u32 rar_low, rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -1592,9 +1845,9 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
* order from network order (big endian) to little endian
*/
rar_low = ((u32)addr[0] |
- ((u32)addr[1] << 8) |
- ((u32)addr[2] << 16) |
- ((u32)addr[3] << 24));
+ ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) |
+ ((u32)addr[3] << 24));
/*
* Some parts put the VMDq setting in the extra RAH bits,
* so save everything except the lower 16 bits that hold part
@@ -1676,18 +1929,18 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
+ hw->mac.addr[0], hw->mac.addr[1],
+ hw->mac.addr[2]);
DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
+ hw->mac.addr[4], hw->mac.addr[5]);
} else {
/* Setup the receive address. */
DEBUGOUT("Overriding MAC Address in RAR[0]\n");
DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
+ hw->mac.addr[0], hw->mac.addr[1],
+ hw->mac.addr[2]);
DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
+ hw->mac.addr[4], hw->mac.addr[5]);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
@@ -1733,7 +1986,7 @@ void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
DEBUGFUNC("ixgbe_add_uc_addr");
DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
/*
* Place this address in the RAR if there is room,
@@ -1766,7 +2019,7 @@ void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
* manually putting the device into promiscuous mode.
**/
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
- u32 addr_count, ixgbe_mc_addr_itr next)
+ u32 addr_count, ixgbe_mc_addr_itr next)
{
u8 *addr;
u32 i;
@@ -1903,14 +2156,14 @@ void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
* @mc_addr_list: the list of new multicast addresses
* @mc_addr_count: number of addresses
* @next: iterator function to walk the multicast address list
+ * @clear: flag, when set clears the table beforehand
*
- * The given list replaces any existing list. Clears the MC addrs from receive
- * address registers and the multicast table. Uses unused receive address
- * registers for the first multicast addresses, and hashes the rest into the
- * multicast table.
+ * When the clear flag is set, the given list replaces any existing list.
+ * Hashes the given addresses into the multicast table.
**/
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, ixgbe_mc_addr_itr next)
+ u32 mc_addr_count, ixgbe_mc_addr_itr next,
+ bool clear)
{
u32 i;
u32 vmdq;
@@ -1925,8 +2178,10 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
hw->addr_ctrl.mta_in_use = 0;
/* Clear mta_shadow */
- DEBUGOUT(" Clearing MTA\n");
- memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+ if (clear) {
+ DEBUGOUT(" Clearing MTA\n");
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+ }
/* Update mta_shadow */
for (i = 0; i < mc_addr_count; i++) {
@@ -1941,7 +2196,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
if (hw->addr_ctrl.mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
- IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
return IXGBE_SUCCESS;
@@ -1961,7 +2216,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
if (a->mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
- hw->mac.mc_filter_type);
+ hw->mac.mc_filter_type);
return IXGBE_SUCCESS;
}
@@ -1996,7 +2251,6 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
s32 ret_val = IXGBE_SUCCESS;
u32 mflcn_reg, fccfg_reg;
u32 reg;
- u32 rx_pba_size;
u32 fcrtl, fcrth;
DEBUGFUNC("ixgbe_fc_enable_generic");
@@ -2065,11 +2319,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
- rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
-
- fcrth = (rx_pba_size - hw->fc.high_water) << 10;
- fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
+ fcrth = hw->fc.high_water[packetbuf_num] << 10;
+ fcrtl = hw->fc.low_water << 10;
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
fcrth |= IXGBE_FCRTH_FCEN;
@@ -2162,8 +2413,6 @@ out:
/**
* ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
* @hw: pointer to hardware structure
- * @speed:
- * @link_up
*
* Enable flow control according on 1 gig fiber.
**/
@@ -2179,8 +2428,8 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
*/
linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
- if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
- ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+ if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+ (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
}
@@ -2189,10 +2438,10 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
- pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
- IXGBE_PCS1GANA_ASM_PAUSE,
- IXGBE_PCS1GANA_SYM_PAUSE,
- IXGBE_PCS1GANA_ASM_PAUSE);
+ pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE,
+ IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE);
out:
return ret_val;
@@ -2325,7 +2574,7 @@ static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
*
* Called at init time to set up flow control.
**/
-s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
+static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
{
s32 ret_val = IXGBE_SUCCESS;
u32 reg = 0, reg_bp = 0;
@@ -2335,8 +2584,8 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
/* Validate the packetbuf configuration */
if (packetbuf_num < 0 || packetbuf_num > 7) {
- DEBUGOUT1("Invalid packet buffer number [%d], expected range is"
- " 0-7\n", packetbuf_num);
+ DEBUGOUT1("Invalid packet buffer number [%d], expected range "
+ "is 0-7\n", packetbuf_num);
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
@@ -2345,7 +2594,9 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
* Validate the water mark configuration. Zero water marks are invalid
* because it causes the controller to just blast out fc packets.
*/
- if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
+ if (!hw->fc.low_water ||
+ !hw->fc.high_water[packetbuf_num] ||
+ !hw->fc.pause_time) {
DEBUGOUT("Invalid water mark configuration\n");
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
@@ -2383,7 +2634,7 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
case ixgbe_media_type_copper:
hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
break;
default:
@@ -2457,19 +2708,21 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
break;
}
- /*
- * Enable auto-negotiation between the MAC & PHY;
- * the MAC will advertise clause 37 flow control.
- */
- IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+ if (hw->mac.type != ixgbe_mac_X540) {
+ /*
+ * Enable auto-negotiation between the MAC & PHY;
+ * the MAC will advertise clause 37 flow control.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
- /* Disable AN timeout */
- if (hw->fc.strict_ieee)
- reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+ /* Disable AN timeout */
+ if (hw->fc.strict_ieee)
+ reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
- IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
- DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+ DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+ }
/*
* AUTOC restart handles negotiation of 1G and 10G on backplane
@@ -2501,78 +2754,60 @@ out:
**/
s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
{
- u32 i;
- u32 reg_val;
- u32 number_of_queues;
s32 status = IXGBE_SUCCESS;
+ u32 i;
DEBUGFUNC("ixgbe_disable_pcie_master");
- /* Just jump out if bus mastering is already disabled */
+ /* Always set this bit to ensure any future transactions are blocked */
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
+
+ /* Exit if master requets are blocked */
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
goto out;
- /* Disable the receive unit by stopping each queue */
- number_of_queues = hw->mac.max_rx_queues;
- for (i = 0; i < number_of_queues; i++) {
- reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
- if (reg_val & IXGBE_RXDCTL_ENABLE) {
- reg_val &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
- }
- }
-
- reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
- reg_val |= IXGBE_CTRL_GIO_DIS;
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
-
+ /* Poll for master request bit to clear */
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
- if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
- goto check_device_status;
usec_delay(100);
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ goto out;
}
+ /*
+ * Two consecutive resets are required via CTRL.RST per datasheet
+ * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new master requests from
+ * being issued by our device. We then must wait 1usec or more for any
+ * remaining completions from the PCIe bus to trickle in, and then reset
+ * again to clear out any effects they may have had on our device.
+ */
DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
- status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+ hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
/*
* Before proceeding, make sure that the PCIe block does not have
* transactions pending.
*/
-check_device_status:
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
- if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
- IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
- break;
usec_delay(100);
+ if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
+ IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+ goto out;
}
- if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
- DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
- else
- goto out;
-
- /*
- * Two consecutive resets are required via CTRL.RST per datasheet
- * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
- * of this need. The first reset prevents new master requests from
- * being issued by our device. We then must wait 1usec for any
- * remaining completions from the PCIe bus to trickle in, and then reset
- * again to clear out any effects they may have had on our device.
- */
- hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
+ status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
out:
return status;
}
-
/**
* ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to acquire
*
- * Acquires the SWFW semaphore thought the GSSR register for the specified
+ * Acquires the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2622,7 +2857,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to release
*
- * Releases the SWFW semaphore thought the GSSR register for the specified
+ * Releases the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2642,6 +2877,63 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
}
/**
+ * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the receive data path and waits for the HW to internally empty
+ * the Rx security block
+ **/
+s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECRX_POLL 40
+
+ int i;
+ int secrxreg;
+
+ DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
+
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
+ if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
+ break;
+ else
+ /* Use interrupt-safe sleep just in case */
+ usec_delay(1000);
+ }
+
+ /* For informational purposes only */
+ if (i >= IXGBE_MAX_SECRX_POLL)
+ DEBUGOUT("Rx unit being enabled before security "
+ "path fully disabled. Continuing with init.\n");
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the receive data path.
+ **/
+s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
+{
+ int secrxreg;
+
+ DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
* @hw: pointer to hardware structure
* @regval: register value to write to RXCTRL
@@ -2681,6 +2973,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
msec_delay(10);
}
@@ -2728,7 +3021,7 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
* get and set mac_addr routines.
**/
static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
- u16 *san_mac_offset)
+ u16 *san_mac_offset)
{
DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
@@ -2779,7 +3072,7 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
hw->mac.ops.set_lan_id(hw);
/* apply the port offset to the address offset */
(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
- (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
for (i = 0; i < 3; i++) {
hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
san_mac_addr[i * 2] = (u8)(san_mac_data);
@@ -2818,7 +3111,7 @@ s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
hw->mac.ops.set_lan_id(hw);
/* Apply the port offset to the address offset */
(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
- (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
for (i = 0; i < 3; i++) {
san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
@@ -2845,7 +3138,7 @@ u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
if (hw->mac.msix_vectors_from_pcie) {
msix_count = IXGBE_READ_PCIE_WORD(hw,
- IXGBE_PCIE_MSIX_82599_CAPS);
+ IXGBE_PCIE_MSIX_82599_CAPS);
msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
/* MSI-X count is zero-based in HW, so increment to give
@@ -3079,14 +3372,13 @@ s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
* Turn on/off specified VLAN in the VLAN filter table.
**/
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
+ bool vlan_on)
{
s32 regindex;
u32 bitindex;
u32 vfta;
- u32 bits;
- u32 vt;
u32 targetbit;
+ s32 ret_val = IXGBE_SUCCESS;
bool vfta_changed = FALSE;
DEBUGFUNC("ixgbe_set_vfta_generic");
@@ -3124,7 +3416,41 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
}
/* Part 2
- * If VT Mode is set
+ * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
+ */
+ ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
+ &vfta_changed);
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ if (vfta_changed)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ * @vfta_changed: pointer to boolean flag which indicates whether VFTA
+ * should be changed
+ *
+ * Turn on/off specified bit in VLVF table.
+ **/
+s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool *vfta_changed)
+{
+ u32 vt;
+
+ DEBUGFUNC("ixgbe_set_vlvf_generic");
+
+ if (vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /* If VT Mode is set
* Either vlan_on
* make sure the vlan is in VLVF
* set the vind bit in the matching VLVFB
@@ -3134,6 +3460,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (vt & IXGBE_VT_CTL_VT_ENABLE) {
s32 vlvf_index;
+ u32 bits;
vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
if (vlvf_index < 0)
@@ -3143,39 +3470,39 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/* set the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index*2));
+ IXGBE_VLVFB(vlvf_index * 2));
bits |= (1 << vind);
IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index*2),
+ IXGBE_VLVFB(vlvf_index * 2),
bits);
} else {
bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index*2)+1));
- bits |= (1 << (vind-32));
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits |= (1 << (vind - 32));
IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index*2)+1),
- bits);
+ IXGBE_VLVFB((vlvf_index * 2) + 1),
+ bits);
}
} else {
/* clear the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index*2));
+ IXGBE_VLVFB(vlvf_index * 2));
bits &= ~(1 << vind);
IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index*2),
+ IXGBE_VLVFB(vlvf_index * 2),
bits);
bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index*2)+1));
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
} else {
bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index*2)+1));
- bits &= ~(1 << (vind-32));
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits &= ~(1 << (vind - 32));
IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index*2)+1),
- bits);
+ IXGBE_VLVFB((vlvf_index * 2) + 1),
+ bits);
bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index*2));
+ IXGBE_VLVFB(vlvf_index * 2));
}
}
@@ -3197,20 +3524,16 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
if (bits) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
(IXGBE_VLVF_VIEN | vlan));
- if (!vlan_on) {
+ if ((!vlan_on) && (vfta_changed != NULL)) {
/* someone wants to clear the vfta entry
* but some pools/VFs are still using it.
* Ignore it. */
- vfta_changed = FALSE;
+ *vfta_changed = FALSE;
}
- }
- else
+ } else
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
}
- if (vfta_changed)
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
-
return IXGBE_SUCCESS;
}
@@ -3231,8 +3554,8 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
}
return IXGBE_SUCCESS;
@@ -3248,7 +3571,7 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
* Reads the links register to determine if link is up and the current speed
**/
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete)
+ bool *link_up, bool link_up_wait_to_complete)
{
u32 links_reg, links_orig;
u32 i;
@@ -3262,7 +3585,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
if (links_orig != links_reg) {
DEBUGOUT2("LINKS changed from %08X to %08X\n",
- links_orig, links_reg);
+ links_orig, links_reg);
}
if (link_up_wait_to_complete) {
@@ -3287,20 +3610,14 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
IXGBE_LINKS_SPEED_10G_82599)
*speed = IXGBE_LINK_SPEED_10GB_FULL;
else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_1G_82599)
+ IXGBE_LINKS_SPEED_1G_82599)
*speed = IXGBE_LINK_SPEED_1GB_FULL;
else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_100_82599)
+ IXGBE_LINKS_SPEED_100_82599)
*speed = IXGBE_LINK_SPEED_100_FULL;
else
*speed = IXGBE_LINK_SPEED_UNKNOWN;
- /* if link is down, zero out the current_mode */
- if (*link_up == FALSE) {
- hw->fc.current_mode = ixgbe_fc_none;
- hw->fc.fc_was_autonegged = FALSE;
- }
-
return IXGBE_SUCCESS;
}
@@ -3315,7 +3632,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* block to check the support for the alternative WWNN/WWPN prefix support.
**/
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
+ u16 *wwpn_prefix)
{
u16 offset, caps;
u16 alt_san_mac_blk_offset;
@@ -3328,7 +3645,7 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
/* check if alternative SAN MAC is supported */
hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
- &alt_san_mac_blk_offset);
+ &alt_san_mac_blk_offset);
if ((alt_san_mac_blk_offset == 0) ||
(alt_san_mac_blk_offset == 0xFFFF))
@@ -3415,6 +3732,8 @@ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
switch (hw->device_id) {
+ case IXGBE_DEV_ID_X540T:
+ return IXGBE_SUCCESS;
case IXGBE_DEV_ID_82599_T3_LOM:
return IXGBE_SUCCESS;
default:
@@ -3525,8 +3844,292 @@ void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
}
+
+/**
+ * ixgbe_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ DEBUGFUNC("ixgbe_calculate_checksum");
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8) (0 - sum);
+}
+
+/**
+ * ixgbe_host_interface_command - Issue command to manageability block
+ * @hw: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @lenght: lenght of buffer, must be multiple of 4 bytes
+ *
+ * Communicates with the manageability block. On success return IXGBE_SUCCESS
+ * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ **/
+static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+ u32 length)
+{
+ u32 hicr, i, bi;
+ u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+ u8 buf_len, dword_len;
+
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_host_interface_command");
+
+ if (length == 0 || length & 0x3 ||
+ length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ DEBUGOUT("Buffer length failure.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if ((hicr & IXGBE_HICR_EN) == 0) {
+ DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs */
+ dword_len = length >> 2;
+
+ /*
+ * The device driver writes the relevant command block
+ * into the ram area.
+ */
+ for (i = 0; i < dword_len; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ i, IXGBE_CPU_TO_LE32(buffer[i]));
+
+ /* Setting this bit tells the ARC that a new command is pending. */
+ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
+
+ for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if (!(hicr & IXGBE_HICR_C))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check command successful completion. */
+ if (i == IXGBE_HI_COMMAND_TIMEOUT ||
+ (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
+ DEBUGOUT("Command has failed with no status valid.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs */
+ dword_len = hdr_size >> 2;
+
+ /* first pull in the header so we know the buffer length */
+ for (bi = 0; bi < dword_len; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ }
+
+ /* If there is any thing in data position pull it in */
+ buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
+ if (buf_len == 0)
+ goto out;
+
+ if (length < (buf_len + hdr_size)) {
+ DEBUGOUT("Buffer not large enough for reply message.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs, add 3 for odd lengths */
+ dword_len = (buf_len + 3) >> 2;
+
+ /* Pull in the rest of the buffer (bi is where we left off)*/
+ for (; bi <= dword_len; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return IXGBE_SUCCESS
+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub)
+{
+ struct ixgbe_hic_drv_info fw_cmd;
+ int i;
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
+ != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+ fw_cmd.pad = 0;
+ fw_cmd.pad2 = 0;
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(fw_cmd));
+ if (ret_val != IXGBE_SUCCESS)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = IXGBE_SUCCESS;
+ else
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ break;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy)
+{
+ u32 pbsize = hw->mac.rx_pb_size;
+ int i = 0;
+ u32 rxpktsize, txpktsize, txpbthresh;
+
+ /* Reserve headroom */
+ pbsize -= headroom;
+
+ if (!num_pb)
+ num_pb = 1;
+
+ /* Divide remaining packet buffer space amongst the number of packet
+ * buffers requested using supplied strategy.
+ */
+ switch (strategy) {
+ case (PBA_STRATEGY_WEIGHTED):
+ /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
+ * buffer with 5/8 of the packet buffer space.
+ */
+ rxpktsize = (pbsize * 5 * 2) / (num_pb * 8);
+ pbsize -= rxpktsize * (num_pb / 2);
+ rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
+ for (; i < (num_pb / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Fall through to configure remaining packet buffers */
+ case (PBA_STRATEGY_EQUAL):
+ rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
+ for (; i < num_pb; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ default:
+ break;
+ }
+
+ /* Only support an equally distributed Tx packet buffer strategy. */
+ txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
+ txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
+ for (i = 0; i < num_pb; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+ }
+
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < IXGBE_MAX_PB; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+ }
+}
+
+/**
+ * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
+ * @hw: pointer to the hardware structure
+ *
+ * The 82599 and x540 MACs can experience issues if TX work is still pending
+ * when a reset occurs. This function prevents this by flushing the PCIe
+ * buffers on the system.
+ **/
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
+{
+ u32 gcr_ext, hlreg0;
+
+ /*
+ * If double reset is not requested then all transactions should
+ * already be clear and as such there is no work to do
+ */
+ if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
+ return;
+
+ /*
+ * Set loopback enable to prevent any transmits from being sent
+ * should the link come up. This assumes that the RXCTRL.RXEN bit
+ * has already been cleared.
+ */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
+
+ /* initiate cleaning flow for buffers in the PCIe transaction layer */
+ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
+ gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
+
+ /* Flush all writes and allow 20usec for all transactions to clear */
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(20);
+
+ /* restore previous register values */
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+}
+
diff --git a/sys/dev/ixgbe/ixgbe_common.h b/sys/dev/ixgbe/ixgbe_common.h
index d37f521..4bd4b75 100644
--- a/sys/dev/ixgbe/ixgbe_common.h
+++ b/sys/dev/ixgbe/ixgbe_common.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -51,8 +51,7 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
- u32 pba_num_size);
-s32 ixgbe_read_pba_length_generic(struct ixgbe_hw *hw, u32 *pba_num_size);
+ u32 pba_num_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@@ -63,30 +62,39 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
- u16 *data);
+ u16 *data);
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
- u16 *checksum_val);
+ u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr);
+ u32 enable_addr);
s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count,
- ixgbe_mc_addr_itr func);
+ u32 mc_addr_count,
+ ixgbe_mc_addr_itr func, bool clear);
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
- u32 addr_count, ixgbe_mc_addr_itr func);
+ u32 addr_count, ixgbe_mc_addr_itr func);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num);
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
@@ -106,19 +114,27 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
+ u32 vind, bool vlan_on);
+s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool *vfta_changed);
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete);
+ ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix);
+ u16 *wwpn_prefix);
s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy);
void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 ver);
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
#endif /* IXGBE_COMMON */
diff --git a/sys/dev/ixgbe/ixgbe_mbx.c b/sys/dev/ixgbe/ixgbe_mbx.c
index 0e08ea5..f9c1efa 100644
--- a/sys/dev/ixgbe/ixgbe_mbx.c
+++ b/sys/dev/ixgbe/ixgbe_mbx.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -242,7 +242,7 @@ out:
* received an ack to that message within delay * timeout period
**/
s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+ u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
@@ -326,7 +326,7 @@ static s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
s32 ret_val = IXGBE_ERR_MBX;
- UNREFERENCED_PARAMETER(mbx_id);
+ UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_msg_vf");
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
@@ -348,7 +348,7 @@ static s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
s32 ret_val = IXGBE_ERR_MBX;
- UNREFERENCED_PARAMETER(mbx_id);
+ UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_ack_vf");
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
@@ -370,11 +370,11 @@ static s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
s32 ret_val = IXGBE_ERR_MBX;
- UNREFERENCED_PARAMETER(mbx_id);
+ UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_rst_vf");
if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
- IXGBE_VFMAILBOX_RSTI))) {
+ IXGBE_VFMAILBOX_RSTI))) {
ret_val = IXGBE_SUCCESS;
hw->mbx.stats.rsts++;
}
@@ -414,12 +414,12 @@ static s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+ u16 mbx_id)
{
s32 ret_val;
u16 i;
- UNREFERENCED_PARAMETER(mbx_id);
+ UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_write_mbx_vf");
@@ -456,13 +456,13 @@ out_no_write:
* returns SUCCESS if it successfuly read message from buffer
**/
static s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+ u16 mbx_id)
{
s32 ret_val = IXGBE_SUCCESS;
u16 i;
DEBUGFUNC("ixgbe_read_mbx_vf");
- UNREFERENCED_PARAMETER(mbx_id);
+ UNREFERENCED_1PARAMETER(mbx_id);
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_vf(hw);
@@ -544,7 +544,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
DEBUGFUNC("ixgbe_check_for_msg_pf");
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
- index)) {
+ index)) {
ret_val = IXGBE_SUCCESS;
hw->mbx.stats.reqs++;
}
@@ -568,7 +568,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
DEBUGFUNC("ixgbe_check_for_ack_pf");
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
- index)) {
+ index)) {
ret_val = IXGBE_SUCCESS;
hw->mbx.stats.acks++;
}
@@ -596,8 +596,10 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
case ixgbe_mac_82599EB:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
break;
+ case ixgbe_mac_X540:
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
+ break;
default:
- goto out;
break;
}
@@ -607,7 +609,6 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
hw->mbx.stats.rsts++;
}
-out:
return ret_val;
}
@@ -646,7 +647,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_number)
{
s32 ret_val;
u16 i;
@@ -689,7 +690,7 @@ out_no_write:
* a message due to a VF request so no polling for message is needed.
**/
static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_number)
{
s32 ret_val;
u16 i;
@@ -725,7 +726,8 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (hw->mac.type != ixgbe_mac_82599EB)
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540)
return;
mbx->timeout = 0;
@@ -747,4 +749,3 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
}
-
diff --git a/sys/dev/ixgbe/ixgbe_mbx.h b/sys/dev/ixgbe/ixgbe_mbx.h
index 0ebecec..8ad18cb 100644
--- a/sys/dev/ixgbe/ixgbe_mbx.h
+++ b/sys/dev/ixgbe/ixgbe_mbx.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -37,67 +37,66 @@
#include "ixgbe_type.h"
-#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX -100
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
-#define IXGBE_VFMAILBOX 0x002FC
-#define IXGBE_VFMBMEM 0x00200
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
/* Define mailbox register bits */
-#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
-#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
-#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
-#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
-#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
-#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
-#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
-#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
-#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
-
-#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
-#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
-
-#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
-#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
-#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
-#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
-#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
-
-#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
-#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
-#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
-#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is TRUE if it is IXGBE_PF_*.
* Message ACK's are the value or'd with 0xF0000000
*/
-#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
- * this are the ACK */
-#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
- * this are the NACK */
-#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
- clear to send requests */
-#define IXGBE_VT_MSGINFO_SHIFT 16
-/* bits 23:16 are used for exra info for certain messages */
-#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
-
-#define IXGBE_VF_RESET 0x01 /* VF requests reset */
-#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
-#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
-#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
-#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for extra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
/* length of permanent address message returned from PF */
-#define IXGBE_VF_PERMADDR_MSG_LEN 4
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
-#define IXGBE_VF_MC_TYPE_WORD 3
+#define IXGBE_VF_MC_TYPE_WORD 3
-#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
-#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
-#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h
index 19dbf6b..4ba3f10 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.h
+++ b/sys/dev/ixgbe/ixgbe_osdep.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -37,6 +37,7 @@
#include <sys/types.h>
#include <sys/param.h>
+#include <sys/endian.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
@@ -83,12 +84,22 @@
#define true 1
#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */
#define PCI_COMMAND_REGISTER PCIR_COMMAND
+
+/* Bunch of defines for shared code bogosity */
#define UNREFERENCED_PARAMETER(_p)
+#define UNREFERENCED_1PARAMETER(_p)
+#define UNREFERENCED_2PARAMETER(_p, _q)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r)
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
#define IXGBE_NTOHL(_i) ntohl(_i)
#define IXGBE_NTOHS(_i) ntohs(_i)
+/* XXX these need to be revisited */
+#define IXGBE_CPU_TO_LE32 le32toh
+#define IXGBE_LE32_TO_CPUS le32dec
+
typedef uint8_t u8;
typedef int8_t s8;
typedef uint16_t u16;
diff --git a/sys/dev/ixgbe/ixgbe_phy.c b/sys/dev/ixgbe/ixgbe_phy.c
index a2e3e38..a8693ce 100644
--- a/sys/dev/ixgbe/ixgbe_phy.c
+++ b/sys/dev/ixgbe/ixgbe_phy.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -43,11 +43,10 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
-static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
+static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
static bool ixgbe_get_i2c_data(u32 *i2cctl);
-void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
/**
* ixgbe_init_phy_ops_generic - Inits PHY function ptrs
@@ -75,7 +74,7 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
- phy->ops.identify_sfp = &ixgbe_identify_sfp_module_generic;
+ phy->ops.identify_sfp = &ixgbe_identify_module_generic;
phy->sfp_type = ixgbe_sfp_type_unknown;
phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
return IXGBE_SUCCESS;
@@ -101,21 +100,21 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
hw->phy.addr = phy_addr;
ixgbe_get_phy_id(hw);
hw->phy.type =
- ixgbe_get_phy_type_from_id(hw->phy.id);
+ ixgbe_get_phy_type_from_id(hw->phy.id);
if (hw->phy.type == ixgbe_phy_unknown) {
hw->phy.ops.read_reg(hw,
IXGBE_MDIO_PHY_EXT_ABILITY,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &ext_ability);
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &ext_ability);
if (ext_ability &
(IXGBE_MDIO_PHY_10GBASET_ABILITY |
IXGBE_MDIO_PHY_1000BASET_ABILITY))
hw->phy.type =
- ixgbe_phy_cu_unknown;
+ ixgbe_phy_cu_unknown;
else
hw->phy.type =
- ixgbe_phy_generic;
+ ixgbe_phy_generic;
}
status = IXGBE_SUCCESS;
@@ -146,7 +145,7 @@ bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
hw->phy.addr = phy_addr;
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
if (phy_id != 0xFFFF && phy_id != 0x0)
valid = TRUE;
@@ -168,14 +167,14 @@ s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_phy_id");
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &phy_id_high);
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &phy_id_high);
if (status == IXGBE_SUCCESS) {
hw->phy.id = (u32)(phy_id_high << 16);
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &phy_id_low);
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &phy_id_low);
hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
}
@@ -197,7 +196,7 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
- case AQ1002_PHY_ID:
+ case X540_PHY_ID:
phy_type = ixgbe_phy_aq;
break;
case QT2022_PHY_ID:
@@ -243,8 +242,8 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
* This will cause a soft reset to the PHY
*/
hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE,
- IXGBE_MDIO_PHY_XS_RESET);
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ IXGBE_MDIO_PHY_XS_RESET);
/*
* Poll for reset bit to self-clear indicating reset is complete.
@@ -254,7 +253,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
for (i = 0; i < 30; i++) {
msec_delay(100);
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
usec_delay(2);
break;
@@ -277,7 +276,7 @@ out:
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data)
+ u32 device_type, u16 *phy_data)
{
u32 command;
u32 i;
@@ -292,15 +291,15 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
else
gssr = IXGBE_GSSR_PHY0_SM;
- if (ixgbe_acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
status = IXGBE_ERR_SWFW_SYNC;
if (status == IXGBE_SUCCESS) {
/* Setup and write the address cycle command */
command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
@@ -329,9 +328,9 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
* command
*/
command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
@@ -363,7 +362,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
}
}
- ixgbe_release_swfw_sync(hw, gssr);
+ hw->mac.ops.release_swfw_sync(hw, gssr);
}
return status;
@@ -377,7 +376,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
* @phy_data: Data to write to the PHY register
**/
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data)
+ u32 device_type, u16 phy_data)
{
u32 command;
u32 i;
@@ -391,7 +390,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
else
gssr = IXGBE_GSSR_PHY0_SM;
- if (ixgbe_acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
status = IXGBE_ERR_SWFW_SYNC;
if (status == IXGBE_SUCCESS) {
@@ -400,9 +399,9 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
/* Setup and write the address cycle command */
command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
@@ -431,9 +430,9 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
* command
*/
command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
@@ -457,7 +456,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
}
}
- ixgbe_release_swfw_sync(hw, gssr);
+ hw->mac.ops.release_swfw_sync(hw, gssr);
}
return status;
@@ -485,71 +484,71 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
/* Set or unset auto-negotiation 10G advertisement */
hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
}
if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
/* Set or unset auto-negotiation 1G advertisement */
hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
}
if (speed & IXGBE_LINK_SPEED_100_FULL) {
/* Set or unset auto-negotiation 100M advertisement */
hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
- autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE;
+ autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
+ IXGBE_MII_100BASE_T_ADVERTISE_HALF);
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
}
/* Restart PHY autonegotiation and wait for completion */
hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
autoneg_reg |= IXGBE_MII_RESTART;
hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
/* Wait for autonegotiation to finish */
for (time_out = 0; time_out < max_time_out; time_out++) {
usec_delay(10);
/* Restart PHY autonegotiation and wait for completion */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
- if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
+ if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
break;
- }
}
if (time_out == max_time_out) {
@@ -567,12 +566,11 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* @autoneg: TRUE if autonegotiation enabled
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
{
- UNREFERENCED_PARAMETER(autoneg);
- UNREFERENCED_PARAMETER(autoneg_wait_to_complete);
+ UNREFERENCED_2PARAMETER(autoneg, autoneg_wait_to_complete);
DEBUGFUNC("ixgbe_setup_phy_link_speed_generic");
@@ -606,8 +604,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
* Determines the link capabilities by reading the AUTOC register.
**/
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+ ixgbe_link_speed *speed,
+ bool *autoneg)
{
s32 status = IXGBE_ERR_LINK_SETUP;
u16 speed_ability;
@@ -618,8 +616,8 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
*autoneg = TRUE;
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &speed_ability);
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &speed_ability);
if (status == IXGBE_SUCCESS) {
if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
@@ -641,7 +639,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
* the PHY.
**/
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up)
+ bool *link_up)
{
s32 status = IXGBE_SUCCESS;
u32 time_out;
@@ -664,13 +662,12 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
for (time_out = 0; time_out < max_time_out; time_out++) {
usec_delay(10);
status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- &phy_data);
- phy_link = phy_data &
- IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &phy_data);
+ phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
phy_speed = phy_data &
- IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
*link_up = TRUE;
if (phy_speed ==
@@ -705,69 +702,68 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
/* Set or unset auto-negotiation 10G advertisement */
hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
}
if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
/* Set or unset auto-negotiation 1G advertisement */
hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
}
if (speed & IXGBE_LINK_SPEED_100_FULL) {
/* Set or unset auto-negotiation 100M advertisement */
hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
}
/* Restart PHY autonegotiation and wait for completion */
hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
autoneg_reg |= IXGBE_MII_RESTART;
hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
/* Wait for autonegotiation to finish */
for (time_out = 0; time_out < max_time_out; time_out++) {
usec_delay(10);
/* Restart PHY autonegotiation and wait for completion */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
- if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
+ if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
break;
- }
}
if (time_out == max_time_out) {
@@ -784,15 +780,15 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
* @firmware_version: pointer to the PHY Firmware Version
**/
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
- u16 *firmware_version)
+ u16 *firmware_version)
{
s32 status = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx");
status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- firmware_version);
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ firmware_version);
return status;
}
@@ -803,15 +799,15 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
* @firmware_version: pointer to the PHY Firmware Version
**/
s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
- u16 *firmware_version)
+ u16 *firmware_version)
{
s32 status = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_get_phy_firmware_version_generic");
status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- firmware_version);
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ firmware_version);
return status;
}
@@ -832,16 +828,16 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_reset_phy_nl");
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
/* reset the PHY and poll for completion */
hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE,
- (phy_data | IXGBE_MDIO_PHY_XS_RESET));
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ (phy_data | IXGBE_MDIO_PHY_XS_RESET));
for (i = 0; i < 100; i++) {
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
break;
msec_delay(10);
@@ -855,7 +851,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
/* Get init offsets */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
- &data_offset);
+ &data_offset);
if (ret_val != IXGBE_SUCCESS)
goto out;
@@ -867,7 +863,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
*/
ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
control = (eword & IXGBE_CONTROL_MASK_NL) >>
- IXGBE_CONTROL_SHIFT_NL;
+ IXGBE_CONTROL_SHIFT_NL;
edata = eword & IXGBE_DATA_MASK_NL;
switch (control) {
case IXGBE_DELAY_NL:
@@ -876,23 +872,23 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
msec_delay(edata);
break;
case IXGBE_DATA_NL:
- DEBUGOUT("DATA: \n");
+ DEBUGOUT("DATA:\n");
data_offset++;
hw->eeprom.ops.read(hw, data_offset++,
- &phy_offset);
+ &phy_offset);
for (i = 0; i < edata; i++) {
hw->eeprom.ops.read(hw, data_offset, &eword);
hw->phy.ops.write_reg(hw, phy_offset,
- IXGBE_TWINAX_DEV, eword);
+ IXGBE_TWINAX_DEV, eword);
DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword,
- phy_offset);
+ phy_offset);
data_offset++;
phy_offset++;
}
break;
case IXGBE_CONTROL_NL:
data_offset++;
- DEBUGOUT("CONTROL: \n");
+ DEBUGOUT("CONTROL:\n");
if (edata == IXGBE_CONTROL_EOL_NL) {
DEBUGOUT("EOL\n");
end_data = TRUE;
@@ -916,6 +912,33 @@ out:
}
/**
+ * ixgbe_identify_module_generic - Identifies module type
+ * @hw: pointer to hardware structure
+ *
+ * Determines HW type and calls appropriate function.
+ **/
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
+
+ DEBUGFUNC("ixgbe_identify_module_generic");
+
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ status = ixgbe_identify_sfp_module_generic(hw);
+ break;
+
+
+ default:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ break;
+ }
+
+ return status;
+}
+
+/**
* ixgbe_identify_sfp_module_generic - Identifies SFP modules
* @hw: pointer to hardware structure
*
@@ -943,8 +966,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
}
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_IDENTIFIER,
- &identifier);
+ IXGBE_SFF_IDENTIFIER,
+ &identifier);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
@@ -959,8 +982,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
} else {
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_1GBE_COMP_CODES,
- &comp_codes_1g);
+ IXGBE_SFF_1GBE_COMP_CODES,
+ &comp_codes_1g);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
@@ -968,16 +991,16 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_10GBE_COMP_CODES,
- &comp_codes_10g);
+ IXGBE_SFF_10GBE_COMP_CODES,
+ &comp_codes_10g);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_CABLE_TECHNOLOGY,
- &cable_tech);
+ IXGBE_SFF_CABLE_TECHNOLOGY,
+ &cable_tech);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
@@ -1011,10 +1034,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
- ixgbe_sfp_type_da_cu_core0;
+ ixgbe_sfp_type_da_cu_core0;
else
hw->phy.sfp_type =
- ixgbe_sfp_type_da_cu_core1;
+ ixgbe_sfp_type_da_cu_core1;
} else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
hw->phy.ops.read_i2c_eeprom(
hw, IXGBE_SFF_CABLE_SPEC_COMP,
@@ -1029,17 +1052,17 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
ixgbe_sfp_type_da_act_lmt_core1;
} else {
hw->phy.sfp_type =
- ixgbe_sfp_type_unknown;
+ ixgbe_sfp_type_unknown;
}
} else if (comp_codes_10g &
(IXGBE_SFF_10GBASESR_CAPABLE |
IXGBE_SFF_10GBASELR_CAPABLE)) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core0;
+ ixgbe_sfp_type_srlr_core0;
else
hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core1;
+ ixgbe_sfp_type_srlr_core1;
} else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
@@ -1067,8 +1090,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (hw->phy.type != ixgbe_phy_nl) {
hw->phy.id = identifier;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE0,
- &oui_bytes[0]);
+ IXGBE_SFF_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
@@ -1076,8 +1099,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE1,
- &oui_bytes[1]);
+ IXGBE_SFF_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
@@ -1085,8 +1108,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE2,
- &oui_bytes[2]);
+ IXGBE_SFF_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
@@ -1102,7 +1125,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
case IXGBE_SFF_VENDOR_OUI_TYCO:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.type =
- ixgbe_phy_sfp_passive_tyco;
+ ixgbe_phy_sfp_passive_tyco;
break;
case IXGBE_SFF_VENDOR_OUI_FTL:
if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
@@ -1119,7 +1142,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
default:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.type =
- ixgbe_phy_sfp_passive_unknown;
+ ixgbe_phy_sfp_passive_unknown;
else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
hw->phy.type =
ixgbe_phy_sfp_active_unknown;
@@ -1180,6 +1203,8 @@ err_read_i2c_eeprom:
return IXGBE_ERR_SFP_NOT_PRESENT;
}
+
+
/**
* ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
* @hw: pointer to hardware structure
@@ -1190,8 +1215,8 @@ err_read_i2c_eeprom:
* so it returns the offsets to the phy init sequence block.
**/
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
- u16 *list_offset,
- u16 *data_offset)
+ u16 *list_offset,
+ u16 *data_offset)
{
u16 sfp_id;
u16 sfp_type = hw->phy.sfp_type;
@@ -1268,13 +1293,13 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
* Performs byte read operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data)
+ u8 *eeprom_data)
{
DEBUGFUNC("ixgbe_read_i2c_eeprom_generic");
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
- IXGBE_I2C_EEPROM_DEV_ADDR,
- eeprom_data);
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
}
/**
@@ -1286,13 +1311,13 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 eeprom_data)
+ u8 eeprom_data)
{
DEBUGFUNC("ixgbe_write_i2c_eeprom_generic");
return hw->phy.ops.write_i2c_byte(hw, byte_offset,
- IXGBE_I2C_EEPROM_DEV_ADDR,
- eeprom_data);
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
}
/**
@@ -1302,16 +1327,17 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
- * a specified deivce address.
+ * a specified device address.
**/
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
+ u8 dev_addr, u8 *data)
{
s32 status = IXGBE_SUCCESS;
u32 max_retry = 10;
u32 retry = 0;
u16 swfw_mask = 0;
bool nack = 1;
+ *data = 0;
DEBUGFUNC("ixgbe_read_i2c_byte_generic");
@@ -1321,7 +1347,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
swfw_mask = IXGBE_GSSR_PHY0_SM;
do {
- if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
+ != IXGBE_SUCCESS) {
status = IXGBE_ERR_SWFW_SYNC;
goto read_byte_out;
}
@@ -1368,7 +1395,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
break;
fail:
- ixgbe_release_swfw_sync(hw, swfw_mask);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
msec_delay(100);
ixgbe_i2c_bus_clear(hw);
retry++;
@@ -1379,7 +1406,7 @@ fail:
} while (retry < max_retry);
- ixgbe_release_swfw_sync(hw, swfw_mask);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
read_byte_out:
return status;
@@ -1395,7 +1422,7 @@ read_byte_out:
* a specified device address.
**/
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
+ u8 dev_addr, u8 data)
{
s32 status = IXGBE_SUCCESS;
u32 max_retry = 1;
@@ -1409,7 +1436,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
else
swfw_mask = IXGBE_GSSR_PHY0_SM;
- if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) {
status = IXGBE_ERR_SWFW_SYNC;
goto write_byte_out;
}
@@ -1453,7 +1480,7 @@ fail:
DEBUGOUT("I2C byte write error.\n");
} while (retry < max_retry);
- ixgbe_release_swfw_sync(hw, swfw_mask);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
write_byte_out:
return status;
@@ -1524,21 +1551,17 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
**/
static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
{
- s32 status = IXGBE_SUCCESS;
s32 i;
bool bit = 0;
DEBUGFUNC("ixgbe_clock_in_i2c_byte");
for (i = 7; i >= 0; i--) {
- status = ixgbe_clock_in_i2c_bit(hw, &bit);
+ ixgbe_clock_in_i2c_bit(hw, &bit);
*data |= bit << i;
-
- if (status != IXGBE_SUCCESS)
- break;
}
- return status;
+ return IXGBE_SUCCESS;
}
/**
@@ -1569,6 +1592,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
i2cctl |= IXGBE_I2C_DATA_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
return status;
}
@@ -1581,7 +1605,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
**/
static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
- s32 status;
+ s32 status = IXGBE_SUCCESS;
u32 i = 0;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
u32 timeout = 10;
@@ -1589,10 +1613,8 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_i2c_ack");
- status = ixgbe_raise_i2c_clk(hw, &i2cctl);
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
- if (status != IXGBE_SUCCESS)
- goto out;
/* Minimum high period of clock is 4us */
usec_delay(IXGBE_I2C_T_HIGH);
@@ -1618,7 +1640,6 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
/* Minimum low period of clock is 4.7 us */
usec_delay(IXGBE_I2C_T_LOW);
-out:
return status;
}
@@ -1631,12 +1652,11 @@ out:
**/
static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
- s32 status;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
DEBUGFUNC("ixgbe_clock_in_i2c_bit");
- status = ixgbe_raise_i2c_clk(hw, &i2cctl);
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
/* Minimum high period of clock is 4us */
usec_delay(IXGBE_I2C_T_HIGH);
@@ -1649,7 +1669,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
/* Minimum low period of clock is 4.7 us */
usec_delay(IXGBE_I2C_T_LOW);
- return status;
+ return IXGBE_SUCCESS;
}
/**
@@ -1668,7 +1688,7 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
status = ixgbe_set_i2c_data(hw, &i2cctl, data);
if (status == IXGBE_SUCCESS) {
- status = ixgbe_raise_i2c_clk(hw, &i2cctl);
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
/* Minimum high period of clock is 4us */
usec_delay(IXGBE_I2C_T_HIGH);
@@ -1693,20 +1713,17 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
*
* Raises the I2C clock line '0'->'1'
**/
-static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
+static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
- s32 status = IXGBE_SUCCESS;
-
DEBUGFUNC("ixgbe_raise_i2c_clk");
*i2cctl |= IXGBE_I2C_CLK_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
/* SCL rise time (1000ns) */
usec_delay(IXGBE_I2C_T_RISE);
-
- return status;
}
/**
@@ -1724,6 +1741,7 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
*i2cctl &= ~IXGBE_I2C_CLK_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
/* SCL fall time (300ns) */
usec_delay(IXGBE_I2C_T_FALL);
@@ -1749,6 +1767,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
*i2cctl &= ~IXGBE_I2C_DATA_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
diff --git a/sys/dev/ixgbe/ixgbe_phy.h b/sys/dev/ixgbe/ixgbe_phy.h
index 5c5dfa6..017ea88 100644
--- a/sys/dev/ixgbe/ixgbe_phy.h
+++ b/sys/dev/ixgbe/ixgbe_phy.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -39,61 +39,61 @@
#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
/* EEPROM byte offsets */
-#define IXGBE_SFF_IDENTIFIER 0x0
-#define IXGBE_SFF_IDENTIFIER_SFP 0x3
-#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
-#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
-#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
-#define IXGBE_SFF_1GBE_COMP_CODES 0x6
-#define IXGBE_SFF_10GBE_COMP_CODES 0x3
-#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
-#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
+#define IXGBE_SFF_IDENTIFIER 0x0
+#define IXGBE_SFF_IDENTIFIER_SFP 0x3
+#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
+#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
+#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
+#define IXGBE_SFF_1GBE_COMP_CODES 0x6
+#define IXGBE_SFF_10GBE_COMP_CODES 0x3
+#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
/* Bitmasks */
-#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
-#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
-#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
-#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
-#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
-#define IXGBE_SFF_1GBASET_CAPABLE 0x8
-#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
-#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
-#define IXGBE_I2C_EEPROM_READ_MASK 0x100
-#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
-#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
-#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
-#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
-#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
+#define IXGBE_SFF_1GBASET_CAPABLE 0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_I2C_EEPROM_READ_MASK 0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
/* Flow control defines */
-#define IXGBE_TAF_SYM_PAUSE 0x400
-#define IXGBE_TAF_ASM_PAUSE 0x800
+#define IXGBE_TAF_SYM_PAUSE 0x400
+#define IXGBE_TAF_ASM_PAUSE 0x800
/* Bit-shift macros */
-#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
-#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
-#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8
+#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
+#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
+#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8
/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
-#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
-#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
-#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
-#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100
+#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
+#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
+#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
+#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100
/* I2C SDA and SCL timing parameters for standard mode */
-#define IXGBE_I2C_T_HD_STA 4
-#define IXGBE_I2C_T_LOW 5
-#define IXGBE_I2C_T_HIGH 4
-#define IXGBE_I2C_T_SU_STA 5
-#define IXGBE_I2C_T_HD_DATA 5
-#define IXGBE_I2C_T_SU_DATA 1
-#define IXGBE_I2C_T_RISE 1
-#define IXGBE_I2C_T_FALL 1
-#define IXGBE_I2C_T_SU_STO 4
-#define IXGBE_I2C_T_BUF 5
+#define IXGBE_I2C_T_HD_STA 4
+#define IXGBE_I2C_T_LOW 5
+#define IXGBE_I2C_T_HIGH 4
+#define IXGBE_I2C_T_SU_STA 5
+#define IXGBE_I2C_T_HD_DATA 5
+#define IXGBE_I2C_T_SU_DATA 1
+#define IXGBE_I2C_T_RISE 1
+#define IXGBE_I2C_T_FALL 1
+#define IXGBE_I2C_T_SU_STO 4
+#define IXGBE_I2C_T_BUF 5
-#define IXGBE_TN_LASI_STATUS_REG 0x9005
-#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+#define IXGBE_TN_LASI_STATUS_REG 0x9005
+#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
@@ -102,40 +102,42 @@ s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data);
+ u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data);
+ u32 device_type, u16 phy_data);
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
+ ixgbe_link_speed *speed,
+ bool *autoneg);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up);
+ ixgbe_link_speed *speed,
+ bool *link_up);
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
- u16 *firmware_version);
+ u16 *firmware_version);
s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
- u16 *firmware_version);
+ u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
- u16 *list_offset,
- u16 *data_offset);
+ u16 *list_offset,
+ u16 *data_offset);
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data);
+ u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data);
+ u8 dev_addr, u8 data);
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
+ u8 *eeprom_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 eeprom_data);
+ u8 eeprom_data);
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
#endif /* _IXGBE_PHY_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h
index eaa8605..f25b229 100644
--- a/sys/dev/ixgbe/ixgbe_type.h
+++ b/sys/dev/ixgbe/ixgbe_type.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -39,1978 +39,2171 @@
/* Vendor ID */
-#define IXGBE_INTEL_VENDOR_ID 0x8086
+#define IXGBE_INTEL_VENDOR_ID 0x8086
/* Device IDs */
-#define IXGBE_DEV_ID_82598 0x10B6
-#define IXGBE_DEV_ID_82598_BX 0x1508
-#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
-#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
-#define IXGBE_DEV_ID_82598AT 0x10C8
-#define IXGBE_DEV_ID_82598AT2 0x150B
-#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB
-#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
-#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
-#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
-#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
-#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
-#define IXGBE_DEV_ID_82599_KX4 0x10F7
-#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
-#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
-#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
-#define IXGBE_DEV_ID_82599_CX4 0x10F9
-#define IXGBE_DEV_ID_82599_SFP 0x10FB
-#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
-#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
-#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
-#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
-#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
-#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_82598 0x10B6
+#define IXGBE_DEV_ID_82598_BX 0x1508
+#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
+#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
+#define IXGBE_DEV_ID_82598AT 0x10C8
+#define IXGBE_DEV_ID_82598AT2 0x150B
+#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB
+#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
+#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
+#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
+#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
+#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
+#define IXGBE_DEV_ID_82599_KX4 0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
+#define IXGBE_DEV_ID_82599_KR 0x1517
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
+#define IXGBE_DEV_ID_82599_CX4 0x10F9
+#define IXGBE_DEV_ID_82599_SFP 0x10FB
+#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
+#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
+#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
+#define IXGBE_DEV_ID_82599EN_SFP 0x1557
+#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
+#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_X540T 0x1528
/* General Registers */
-#define IXGBE_CTRL 0x00000
-#define IXGBE_STATUS 0x00008
-#define IXGBE_CTRL_EXT 0x00018
-#define IXGBE_ESDP 0x00020
-#define IXGBE_EODSDP 0x00028
-#define IXGBE_I2CCTL 0x00028
-#define IXGBE_LEDCTL 0x00200
-#define IXGBE_FRTIMER 0x00048
-#define IXGBE_TCPTIMER 0x0004C
-#define IXGBE_CORESPARE 0x00600
-#define IXGBE_EXVET 0x05078
+#define IXGBE_CTRL 0x00000
+#define IXGBE_STATUS 0x00008
+#define IXGBE_CTRL_EXT 0x00018
+#define IXGBE_ESDP 0x00020
+#define IXGBE_EODSDP 0x00028
+#define IXGBE_I2CCTL 0x00028
+#define IXGBE_PHY_GPIO 0x00028
+#define IXGBE_MAC_GPIO 0x00030
+#define IXGBE_PHYINT_STATUS0 0x00100
+#define IXGBE_PHYINT_STATUS1 0x00104
+#define IXGBE_PHYINT_STATUS2 0x00108
+#define IXGBE_LEDCTL 0x00200
+#define IXGBE_FRTIMER 0x00048
+#define IXGBE_TCPTIMER 0x0004C
+#define IXGBE_CORESPARE 0x00600
+#define IXGBE_EXVET 0x05078
/* NVM Registers */
-#define IXGBE_EEC 0x10010
-#define IXGBE_EERD 0x10014
-#define IXGBE_EEWR 0x10018
-#define IXGBE_FLA 0x1001C
-#define IXGBE_EEMNGCTL 0x10110
-#define IXGBE_EEMNGDATA 0x10114
-#define IXGBE_FLMNGCTL 0x10118
-#define IXGBE_FLMNGDATA 0x1011C
-#define IXGBE_FLMNGCNT 0x10120
-#define IXGBE_FLOP 0x1013C
-#define IXGBE_GRC 0x10200
+#define IXGBE_EEC 0x10010
+#define IXGBE_EERD 0x10014
+#define IXGBE_EEWR 0x10018
+#define IXGBE_FLA 0x1001C
+#define IXGBE_EEMNGCTL 0x10110
+#define IXGBE_EEMNGDATA 0x10114
+#define IXGBE_FLMNGCTL 0x10118
+#define IXGBE_FLMNGDATA 0x1011C
+#define IXGBE_FLMNGCNT 0x10120
+#define IXGBE_FLOP 0x1013C
+#define IXGBE_GRC 0x10200
+#define IXGBE_SRAMREL 0x10210
+#define IXGBE_PHYDBG 0x10218
/* General Receive Control */
-#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
-#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
+#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
+#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
-#define IXGBE_VPDDIAG0 0x10204
-#define IXGBE_VPDDIAG1 0x10208
+#define IXGBE_VPDDIAG0 0x10204
+#define IXGBE_VPDDIAG1 0x10208
/* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN 0x00000001
-#define IXGBE_I2C_CLK_OUT 0x00000002
-#define IXGBE_I2C_DATA_IN 0x00000004
-#define IXGBE_I2C_DATA_OUT 0x00000008
+#define IXGBE_I2C_CLK_IN 0x00000001
+#define IXGBE_I2C_CLK_OUT 0x00000002
+#define IXGBE_I2C_DATA_IN 0x00000004
+#define IXGBE_I2C_DATA_OUT 0x00000008
/* Interrupt Registers */
-#define IXGBE_EICR 0x00800
-#define IXGBE_EICS 0x00808
-#define IXGBE_EIMS 0x00880
-#define IXGBE_EIMC 0x00888
-#define IXGBE_EIAC 0x00810
-#define IXGBE_EIAM 0x00890
-#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4)
-#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4)
-#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4)
-#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4)
+#define IXGBE_EICR 0x00800
+#define IXGBE_EICS 0x00808
+#define IXGBE_EIMS 0x00880
+#define IXGBE_EIMC 0x00888
+#define IXGBE_EIAC 0x00810
+#define IXGBE_EIAM 0x00890
+#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4)
+#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4)
+#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4)
+#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4)
/* 82599 EITR is only 12 bits, with the lower 3 always zero */
/*
* 82598 EITR is 16 bits but set the limits based on the max
* supported by all ixgbe hardware
*/
-#define IXGBE_MAX_INT_RATE 488281
-#define IXGBE_MIN_INT_RATE 956
-#define IXGBE_MAX_EITR 0x00000FF8
-#define IXGBE_MIN_EITR 8
-#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
- (0x012300 + (((_i) - 24) * 4)))
-#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
-#define IXGBE_EITR_LLI_MOD 0x00008000
-#define IXGBE_EITR_CNT_WDIS 0x80000000
-#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
-#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */
-#define IXGBE_EITRSEL 0x00894
-#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
-#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
-#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
-#define IXGBE_GPIE 0x00898
+#define IXGBE_MAX_INT_RATE 488281
+#define IXGBE_MIN_INT_RATE 956
+#define IXGBE_MAX_EITR 0x00000FF8
+#define IXGBE_MIN_EITR 8
+#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
+ (0x012300 + (((_i) - 24) * 4)))
+#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
+#define IXGBE_EITR_LLI_MOD 0x00008000
+#define IXGBE_EITR_CNT_WDIS 0x80000000
+#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
+#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */
+#define IXGBE_EITRSEL 0x00894
+#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
+#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
+#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
+#define IXGBE_GPIE 0x00898
/* Flow Control Registers */
-#define IXGBE_FCADBUL 0x03210
-#define IXGBE_FCADBUH 0x03214
-#define IXGBE_FCAMACL 0x04328
-#define IXGBE_FCAMACH 0x0432C
-#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_PFCTOP 0x03008
-#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
-#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
-#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
-#define IXGBE_FCRTV 0x032A0
-#define IXGBE_FCCFG 0x03D00
-#define IXGBE_TFCS 0x0CE00
+#define IXGBE_FCADBUL 0x03210
+#define IXGBE_FCADBUH 0x03214
+#define IXGBE_FCAMACL 0x04328
+#define IXGBE_FCAMACH 0x0432C
+#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_PFCTOP 0x03008
+#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTV 0x032A0
+#define IXGBE_FCCFG 0x03D00
+#define IXGBE_TFCS 0x0CE00
/* Receive DMA Registers */
-#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
- (0x0D000 + ((_i - 64) * 0x40)))
-#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
- (0x0D004 + ((_i - 64) * 0x40)))
-#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
- (0x0D008 + ((_i - 64) * 0x40)))
-#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
- (0x0D010 + ((_i - 64) * 0x40)))
-#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
- (0x0D018 + ((_i - 64) * 0x40)))
-#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
- (0x0D028 + ((_i - 64) * 0x40)))
-#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
- (0x0D02C + ((_i - 64) * 0x40)))
-#define IXGBE_RSCDBU 0x03028
-#define IXGBE_RDDCC 0x02F20
-#define IXGBE_RXMEMWRAP 0x03190
-#define IXGBE_STARCTRL 0x03024
+#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+ (0x0D000 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+ (0x0D004 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+ (0x0D008 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+ (0x0D010 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+ (0x0D018 + (((_i) - 64) * 0x40)))
+#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+ (0x0D028 + (((_i) - 64) * 0x40)))
+#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+ (0x0D02C + (((_i) - 64) * 0x40)))
+#define IXGBE_RSCDBU 0x03028
+#define IXGBE_RDDCC 0x02F20
+#define IXGBE_RXMEMWRAP 0x03190
+#define IXGBE_STARCTRL 0x03024
/*
* Split and Replication Receive Control Registers
* 00-15 : 0x02100 + n*4
* 16-64 : 0x01014 + n*0x40
* 64-127: 0x0D014 + (n-64)*0x40
*/
-#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
- (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
- (0x0D014 + ((_i - 64) * 0x40))))
+#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
+ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+ (0x0D014 + (((_i) - 64) * 0x40))))
/*
* Rx DCA Control Register:
* 00-15 : 0x02200 + n*4
* 16-64 : 0x0100C + n*0x40
* 64-127: 0x0D00C + (n-64)*0x40
*/
-#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
- (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
- (0x0D00C + ((_i - 64) * 0x40))))
-#define IXGBE_RDRXCTL 0x02F00
-#define IXGBE_RDRXCTL_RSC_PUSH 0x80
-#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
- /* 8 of these 0x03C00 - 0x03C1C */
-#define IXGBE_RXCTRL 0x03000
-#define IXGBE_DROPEN 0x03D04
-#define IXGBE_RXPBSIZE_SHIFT 10
+#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
+ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+ (0x0D00C + (((_i) - 64) * 0x40))))
+#define IXGBE_RDRXCTL 0x02F00
+#define IXGBE_RDRXCTL_RSC_PUSH 0x80
+/* 8 of these 0x03C00 - 0x03C1C */
+#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
+#define IXGBE_RXCTRL 0x03000
+#define IXGBE_DROPEN 0x03D04
+#define IXGBE_RXPBSIZE_SHIFT 10
/* Receive Registers */
-#define IXGBE_RXCSUM 0x05000
-#define IXGBE_RFCTL 0x05008
-#define IXGBE_DRECCCTL 0x02F08
-#define IXGBE_DRECCCTL_DISABLE 0
+#define IXGBE_RXCSUM 0x05000
+#define IXGBE_RFCTL 0x05008
+#define IXGBE_DRECCCTL 0x02F08
+#define IXGBE_DRECCCTL_DISABLE 0
+#define IXGBE_DRECCCTL2 0x02F8C
/* Multicast Table Array - 128 entries */
-#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
-#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x0A200 + ((_i) * 8)))
-#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x0A204 + ((_i) * 8)))
-#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
-#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
+#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
+#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x0A200 + ((_i) * 8)))
+#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x0A204 + ((_i) * 8)))
+#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
+#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
/* Packet split receive type */
-#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
- (0x0EA00 + ((_i) * 4)))
+#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
+ (0x0EA00 + ((_i) * 4)))
/* array of 4096 1-bit vlan filters */
-#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
+#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
/*array of 4096 4-bit vlan vmdq indices */
-#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
-#define IXGBE_FCTRL 0x05080
-#define IXGBE_VLNCTRL 0x05088
-#define IXGBE_MCSTCTRL 0x05090
-#define IXGBE_MRQC 0x05818
-#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */
-#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */
-#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */
-#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */
-#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */
-#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */
-#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */
-#define IXGBE_RQTC 0x0EC70
-#define IXGBE_MTQC 0x08120
-#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
-#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
-#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
-#define IXGBE_VT_CTL 0x051B0
-#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
-#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
-#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
-#define IXGBE_QDE 0x2F04
-#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
-#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
-#define IXGBE_VMRCTL(_i) (0x0F600 + ((_i) * 4))
-#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
-#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
-#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
-#define IXGBE_LLITHRESH 0x0EC90
-#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_IMIRVP 0x05AC0
-#define IXGBE_VMD_CTL 0x0581C
-#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
-#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
+#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
+#define IXGBE_FCTRL 0x05080
+#define IXGBE_VLNCTRL 0x05088
+#define IXGBE_MCSTCTRL 0x05090
+#define IXGBE_MRQC 0x05818
+#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */
+#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */
+#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */
+#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */
+#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */
+#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */
+#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */
+#define IXGBE_RQTC 0x0EC70
+#define IXGBE_MTQC 0x08120
+#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
+#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_VT_CTL 0x051B0
+#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */
+/* 64 Mailboxes, 16 DW each */
+#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i)))
+#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */
+#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */
+#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
+#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
+#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
+#define IXGBE_QDE 0x2F04
+#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */
+#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
+#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
+#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
+#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
+#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
+#define IXGBE_RXFECCERR0 0x051B8
+#define IXGBE_LLITHRESH 0x0EC90
+#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_IMIRVP 0x05AC0
+#define IXGBE_VMD_CTL 0x0581C
+#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
+#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
/* Flow Director registers */
-#define IXGBE_FDIRCTRL 0x0EE00
-#define IXGBE_FDIRHKEY 0x0EE68
-#define IXGBE_FDIRSKEY 0x0EE6C
-#define IXGBE_FDIRDIP4M 0x0EE3C
-#define IXGBE_FDIRSIP4M 0x0EE40
-#define IXGBE_FDIRTCPM 0x0EE44
-#define IXGBE_FDIRUDPM 0x0EE48
-#define IXGBE_FDIRIP6M 0x0EE74
-#define IXGBE_FDIRM 0x0EE70
+#define IXGBE_FDIRCTRL 0x0EE00
+#define IXGBE_FDIRHKEY 0x0EE68
+#define IXGBE_FDIRSKEY 0x0EE6C
+#define IXGBE_FDIRDIP4M 0x0EE3C
+#define IXGBE_FDIRSIP4M 0x0EE40
+#define IXGBE_FDIRTCPM 0x0EE44
+#define IXGBE_FDIRUDPM 0x0EE48
+#define IXGBE_FDIRIP6M 0x0EE74
+#define IXGBE_FDIRM 0x0EE70
/* Flow Director Stats registers */
-#define IXGBE_FDIRFREE 0x0EE38
-#define IXGBE_FDIRLEN 0x0EE4C
-#define IXGBE_FDIRUSTAT 0x0EE50
-#define IXGBE_FDIRFSTAT 0x0EE54
-#define IXGBE_FDIRMATCH 0x0EE58
-#define IXGBE_FDIRMISS 0x0EE5C
+#define IXGBE_FDIRFREE 0x0EE38
+#define IXGBE_FDIRLEN 0x0EE4C
+#define IXGBE_FDIRUSTAT 0x0EE50
+#define IXGBE_FDIRFSTAT 0x0EE54
+#define IXGBE_FDIRMATCH 0x0EE58
+#define IXGBE_FDIRMISS 0x0EE5C
/* Flow Director Programming registers */
#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
-#define IXGBE_FDIRIPSA 0x0EE18
-#define IXGBE_FDIRIPDA 0x0EE1C
-#define IXGBE_FDIRPORT 0x0EE20
-#define IXGBE_FDIRVLAN 0x0EE24
-#define IXGBE_FDIRHASH 0x0EE28
-#define IXGBE_FDIRCMD 0x0EE2C
+#define IXGBE_FDIRIPSA 0x0EE18
+#define IXGBE_FDIRIPDA 0x0EE1C
+#define IXGBE_FDIRPORT 0x0EE20
+#define IXGBE_FDIRVLAN 0x0EE24
+#define IXGBE_FDIRHASH 0x0EE28
+#define IXGBE_FDIRCMD 0x0EE2C
/* Transmit DMA registers */
-#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
-#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
-#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
-#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40))
-#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40))
-#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
-#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
-#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
-#define IXGBE_DTXCTL 0x07E00
-
-#define IXGBE_DMATXCTL 0x04A80
-#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
-#define IXGBE_PFDTXGSWC 0x08220
-#define IXGBE_DTXMXSZRQ 0x08100
-#define IXGBE_DTXTCPFLGL 0x04A88
-#define IXGBE_DTXTCPFLGH 0x04A8C
-#define IXGBE_LBDRPEN 0x0CA00
-#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
-
-#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
-#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
-#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
-#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
-
-#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
+#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/
+#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
+#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
+#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40))
+#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40))
+#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
+#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
+#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
+#define IXGBE_DTXCTL 0x07E00
+
+#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
+#define IXGBE_PFDTXGSWC 0x08220
+#define IXGBE_DTXMXSZRQ 0x08100
+#define IXGBE_DTXTCPFLGL 0x04A88
+#define IXGBE_DTXTCPFLGH 0x04A8C
+#define IXGBE_LBDRPEN 0x0CA00
+#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
+
+#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
+#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
+#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
+#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
/* Anti-spoofing defines */
-#define IXGBE_SPOOF_MACAS_MASK 0xFF
-#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
-#define IXGBE_SPOOF_VLANAS_SHIFT 8
-#define IXGBE_PFVFSPOOF_REG_COUNT 8
-#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
+#define IXGBE_SPOOF_MACAS_MASK 0xFF
+#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
+#define IXGBE_SPOOF_VLANAS_SHIFT 8
+#define IXGBE_PFVFSPOOF_REG_COUNT 8
+/* 16 of these (0-15) */
+#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4))
/* Tx DCA Control register : 128 of these (0-127) */
-#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
-#define IXGBE_TIPG 0x0CB00
-#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_MNGTXMAP 0x0CD10
-#define IXGBE_TIPG_FIBER_DEFAULT 3
-#define IXGBE_TXPBSIZE_SHIFT 10
+#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
+#define IXGBE_TIPG 0x0CB00
+#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_MNGTXMAP 0x0CD10
+#define IXGBE_TIPG_FIBER_DEFAULT 3
+#define IXGBE_TXPBSIZE_SHIFT 10
/* Wake up registers */
-#define IXGBE_WUC 0x05800
-#define IXGBE_WUFC 0x05808
-#define IXGBE_WUS 0x05810
-#define IXGBE_IPAV 0x05838
-#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
-#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
-
-#define IXGBE_WUPL 0x05900
-#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
-#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */
-#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host
- * Filter Table */
-
-#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
-#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
+#define IXGBE_WUC 0x05800
+#define IXGBE_WUFC 0x05808
+#define IXGBE_WUS 0x05810
+#define IXGBE_IPAV 0x05838
+#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
+#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
+
+#define IXGBE_WUPL 0x05900
+#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */
+/* Ext Flexible Host Filter Table */
+#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100))
+
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
+#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
/* Each Flexible Filter is at most 128 (0x80) bytes in length */
-#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128
-#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
-#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */
+#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128
+#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
+#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
-#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
-#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
-#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
+#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
+#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
/* Wake Up Filter Control */
-#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
-#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
-#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
-#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
-#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
-#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
-#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
-#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
-#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
-
-#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
-#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
-#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
-#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
-#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
-#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
-#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
-#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
-#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */
-#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */
-#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
+#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
+#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+/* Mask for Ext. flex filters */
+#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000
+#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */
+#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
/* Wake Up Status */
-#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC
-#define IXGBE_WUS_MAG IXGBE_WUFC_MAG
-#define IXGBE_WUS_EX IXGBE_WUFC_EX
-#define IXGBE_WUS_MC IXGBE_WUFC_MC
-#define IXGBE_WUS_BC IXGBE_WUFC_BC
-#define IXGBE_WUS_ARP IXGBE_WUFC_ARP
-#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4
-#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6
-#define IXGBE_WUS_MNG IXGBE_WUFC_MNG
-#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0
-#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1
-#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2
-#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3
-#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4
-#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5
-#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS
+#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC
+#define IXGBE_WUS_MAG IXGBE_WUFC_MAG
+#define IXGBE_WUS_EX IXGBE_WUFC_EX
+#define IXGBE_WUS_MC IXGBE_WUFC_MC
+#define IXGBE_WUS_BC IXGBE_WUFC_BC
+#define IXGBE_WUS_ARP IXGBE_WUFC_ARP
+#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4
+#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6
+#define IXGBE_WUS_MNG IXGBE_WUFC_MNG
+#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0
+#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1
+#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2
+#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3
+#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4
+#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5
+#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS
/* Wake Up Packet Length */
-#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
+#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
/* DCB registers */
-#define IXGBE_RMCS 0x03D00
-#define IXGBE_DPMCS 0x07F40
-#define IXGBE_PDPMCS 0x0CD00
-#define IXGBE_RUPPBMR 0x050A0
-#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
-#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
-#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_DCB_MAX_TRAFFIC_CLASS 8
+#define IXGBE_RMCS 0x03D00
+#define IXGBE_DPMCS 0x07F40
+#define IXGBE_PDPMCS 0x0CD00
+#define IXGBE_RUPPBMR 0x050A0
+#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
/* Security Control Registers */
-#define IXGBE_SECTXCTRL 0x08800
-#define IXGBE_SECTXSTAT 0x08804
-#define IXGBE_SECTXBUFFAF 0x08808
-#define IXGBE_SECTXMINIFG 0x08810
-#define IXGBE_SECTXSTAT 0x08804
-#define IXGBE_SECRXCTRL 0x08D00
-#define IXGBE_SECRXSTAT 0x08D04
+#define IXGBE_SECTXCTRL 0x08800
+#define IXGBE_SECTXSTAT 0x08804
+#define IXGBE_SECTXBUFFAF 0x08808
+#define IXGBE_SECTXMINIFG 0x08810
+#define IXGBE_SECRXCTRL 0x08D00
+#define IXGBE_SECRXSTAT 0x08D04
/* Security Bit Fields and Masks */
-#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001
-#define IXGBE_SECTXCTRL_TX_DIS 0x00000002
-#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004
+#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001
+#define IXGBE_SECTXCTRL_TX_DIS 0x00000002
+#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004
-#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001
-#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002
+#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001
+#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002
-#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001
-#define IXGBE_SECRXCTRL_RX_DIS 0x00000002
+#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001
+#define IXGBE_SECRXCTRL_RX_DIS 0x00000002
-#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001
-#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002
+#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001
+#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002
/* LinkSec (MacSec) Registers */
-#define IXGBE_LSECTXCAP 0x08A00
-#define IXGBE_LSECRXCAP 0x08F00
-#define IXGBE_LSECTXCTRL 0x08A04
-#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */
-#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */
-#define IXGBE_LSECTXSA 0x08A10
-#define IXGBE_LSECTXPN0 0x08A14
-#define IXGBE_LSECTXPN1 0x08A18
-#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
-#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
-#define IXGBE_LSECRXCTRL 0x08F04
-#define IXGBE_LSECRXSCL 0x08F08
-#define IXGBE_LSECRXSCH 0x08F0C
-#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
-#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
-#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
-#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */
-#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */
-#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */
-#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */
-#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */
-#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */
-#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */
-#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */
-#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */
-#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */
-#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */
-#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */
-#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */
-#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */
-#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */
-#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */
-#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */
-#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */
-#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */
+#define IXGBE_LSECTXCAP 0x08A00
+#define IXGBE_LSECRXCAP 0x08F00
+#define IXGBE_LSECTXCTRL 0x08A04
+#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */
+#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */
+#define IXGBE_LSECTXSA 0x08A10
+#define IXGBE_LSECTXPN0 0x08A14
+#define IXGBE_LSECTXPN1 0x08A18
+#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECRXCTRL 0x08F04
+#define IXGBE_LSECRXSCL 0x08F08
+#define IXGBE_LSECRXSCH 0x08F0C
+#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
+#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */
+#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */
+#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */
+#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */
+#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */
+#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */
+#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */
+#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */
+#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */
+#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */
+#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */
+#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */
+#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */
+#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */
+#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */
+#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */
+#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */
+#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */
+#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */
/* LinkSec (MacSec) Bit Fields and Masks */
-#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000
-#define IXGBE_LSECTXCAP_SUM_SHIFT 16
-#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000
-#define IXGBE_LSECRXCAP_SUM_SHIFT 16
-
-#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003
-#define IXGBE_LSECTXCTRL_DISABLE 0x0
-#define IXGBE_LSECTXCTRL_AUTH 0x1
-#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2
-#define IXGBE_LSECTXCTRL_AISCI 0x00000020
-#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
-#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8
-
-#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C
-#define IXGBE_LSECRXCTRL_EN_SHIFT 2
-#define IXGBE_LSECRXCTRL_DISABLE 0x0
-#define IXGBE_LSECRXCTRL_CHECK 0x1
-#define IXGBE_LSECRXCTRL_STRICT 0x2
-#define IXGBE_LSECRXCTRL_DROP 0x3
-#define IXGBE_LSECRXCTRL_PLSH 0x00000040
-#define IXGBE_LSECRXCTRL_RP 0x00000080
-#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33
+#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000
+#define IXGBE_LSECTXCAP_SUM_SHIFT 16
+#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000
+#define IXGBE_LSECRXCAP_SUM_SHIFT 16
+
+#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003
+#define IXGBE_LSECTXCTRL_DISABLE 0x0
+#define IXGBE_LSECTXCTRL_AUTH 0x1
+#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2
+#define IXGBE_LSECTXCTRL_AISCI 0x00000020
+#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
+#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8
+
+#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C
+#define IXGBE_LSECRXCTRL_EN_SHIFT 2
+#define IXGBE_LSECRXCTRL_DISABLE 0x0
+#define IXGBE_LSECRXCTRL_CHECK 0x1
+#define IXGBE_LSECRXCTRL_STRICT 0x2
+#define IXGBE_LSECRXCTRL_DROP 0x3
+#define IXGBE_LSECRXCTRL_PLSH 0x00000040
+#define IXGBE_LSECRXCTRL_RP 0x00000080
+#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33
/* IpSec Registers */
-#define IXGBE_IPSTXIDX 0x08900
-#define IXGBE_IPSTXSALT 0x08904
-#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXIDX 0x08E00
-#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXSPI 0x08E14
-#define IXGBE_IPSRXIPIDX 0x08E18
-#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXSALT 0x08E2C
-#define IXGBE_IPSRXMOD 0x08E30
-
-#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4
+#define IXGBE_IPSTXIDX 0x08900
+#define IXGBE_IPSTXSALT 0x08904
+#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXIDX 0x08E00
+#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSPI 0x08E14
+#define IXGBE_IPSRXIPIDX 0x08E18
+#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSALT 0x08E2C
+#define IXGBE_IPSRXMOD 0x08E30
+
+#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4
/* DCB registers */
-#define IXGBE_RTRPCS 0x02430
-#define IXGBE_RTTDCS 0x04900
-#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
-#define IXGBE_RTTPCS 0x0CD00
-#define IXGBE_RTRUP2TC 0x03020
-#define IXGBE_RTTUP2TC 0x0C800
-#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDQSEL 0x04904
-#define IXGBE_RTTDT1C 0x04908
-#define IXGBE_RTTDT1S 0x0490C
-#define IXGBE_RTTDTECC 0x04990
-#define IXGBE_RTTDTECC_NO_BCN 0x00000100
-
-#define IXGBE_RTTBCNRC 0x04984
-#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
-#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
-#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
+#define IXGBE_RTRPCS 0x02430
+#define IXGBE_RTTDCS 0x04900
+#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
+#define IXGBE_RTTPCS 0x0CD00
+#define IXGBE_RTRUP2TC 0x03020
+#define IXGBE_RTTUP2TC 0x0C800
+#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDQSEL 0x04904
+#define IXGBE_RTTDT1C 0x04908
+#define IXGBE_RTTDT1S 0x0490C
+#define IXGBE_RTTDTECC 0x04990
+#define IXGBE_RTTDTECC_NO_BCN 0x00000100
+
+#define IXGBE_RTTBCNRC 0x04984
+#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
+#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
#define IXGBE_RTTBCNRC_RF_INT_MASK \
(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
+#define IXGBE_RTTBCNRM 0x04980
/* BCN (for DCB) Registers */
-#define IXGBE_RTTBCNRM 0x04980
-#define IXGBE_RTTBCNRS 0x04988
-#define IXGBE_RTTBCNCR 0x08B00
-#define IXGBE_RTTBCNACH 0x08B04
-#define IXGBE_RTTBCNACL 0x08B08
-#define IXGBE_RTTBCNTG 0x04A90
-#define IXGBE_RTTBCNIDX 0x08B0C
-#define IXGBE_RTTBCNCP 0x08B10
-#define IXGBE_RTFRTIMER 0x08B14
-#define IXGBE_RTTBCNRTT 0x05150
-#define IXGBE_RTTBCNRD 0x0498C
+#define IXGBE_RTTBCNRS 0x04988
+#define IXGBE_RTTBCNCR 0x08B00
+#define IXGBE_RTTBCNACH 0x08B04
+#define IXGBE_RTTBCNACL 0x08B08
+#define IXGBE_RTTBCNTG 0x04A90
+#define IXGBE_RTTBCNIDX 0x08B0C
+#define IXGBE_RTTBCNCP 0x08B10
+#define IXGBE_RTFRTIMER 0x08B14
+#define IXGBE_RTTBCNRTT 0x05150
+#define IXGBE_RTTBCNRD 0x0498C
/* FCoE DMA Context Registers */
-#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
-#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
-#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */
-#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */
-#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
-#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4))
-#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */
-#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */
-#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */
-#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */
-#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */
-#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3
-#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8
-#define IXGBE_FCBUFF_OFFSET_SHIFT 16
-#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */
-#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */
-#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */
-#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */
-#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
+#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
+#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
+#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */
+#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */
+#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0*/
+#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4))
+#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */
+#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */
+#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3
+#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8
+#define IXGBE_FCBUFF_OFFSET_SHIFT 16
+#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */
+#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */
+#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */
+#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */
+#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
/* FCoE SOF/EOF */
-#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */
-#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */
-#define IXGBE_REOFF 0x05158 /* Rx FC EOF */
-#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */
+#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */
+#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */
+#define IXGBE_REOFF 0x05158 /* Rx FC EOF */
+#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */
/* FCoE Filter Context Registers */
-#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
-#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
-#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
-#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */
-#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */
-#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */
-#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */
-#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */
-#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */
-#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */
+#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
+#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
+#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
+#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */
+#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */
+#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */
+#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */
+#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */
/* FCoE Receive Control */
-#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */
-#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */
-#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */
-#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */
-#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */
-#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */
-#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */
-#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */
-#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */
-#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
-#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
+#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */
+#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
+#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
/* FCoE Redirection */
-#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */
-#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */
-#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
-#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */
-#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */
-#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
+#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */
+#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */
+#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
+#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */
+#define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */
+#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */
+#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
/* Stats registers */
-#define IXGBE_CRCERRS 0x04000
-#define IXGBE_ILLERRC 0x04004
-#define IXGBE_ERRBC 0x04008
-#define IXGBE_MSPDC 0x04010
-#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/
-#define IXGBE_MLFC 0x04034
-#define IXGBE_MRFC 0x04038
-#define IXGBE_RLEC 0x04040
-#define IXGBE_LXONTXC 0x03F60
-#define IXGBE_LXONRXC 0x0CF60
-#define IXGBE_LXOFFTXC 0x03F68
-#define IXGBE_LXOFFRXC 0x0CF68
-#define IXGBE_LXONRXCNT 0x041A4
-#define IXGBE_LXOFFRXCNT 0x041A8
-#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/
-#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/
-#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/
-#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/
-#define IXGBE_PRC64 0x0405C
-#define IXGBE_PRC127 0x04060
-#define IXGBE_PRC255 0x04064
-#define IXGBE_PRC511 0x04068
-#define IXGBE_PRC1023 0x0406C
-#define IXGBE_PRC1522 0x04070
-#define IXGBE_GPRC 0x04074
-#define IXGBE_BPRC 0x04078
-#define IXGBE_MPRC 0x0407C
-#define IXGBE_GPTC 0x04080
-#define IXGBE_GORCL 0x04088
-#define IXGBE_GORCH 0x0408C
-#define IXGBE_GOTCL 0x04090
-#define IXGBE_GOTCH 0x04094
-#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/
-#define IXGBE_RUC 0x040A4
-#define IXGBE_RFC 0x040A8
-#define IXGBE_ROC 0x040AC
-#define IXGBE_RJC 0x040B0
-#define IXGBE_MNGPRC 0x040B4
-#define IXGBE_MNGPDC 0x040B8
-#define IXGBE_MNGPTC 0x0CF90
-#define IXGBE_TORL 0x040C0
-#define IXGBE_TORH 0x040C4
-#define IXGBE_TPR 0x040D0
-#define IXGBE_TPT 0x040D4
-#define IXGBE_PTC64 0x040D8
-#define IXGBE_PTC127 0x040DC
-#define IXGBE_PTC255 0x040E0
-#define IXGBE_PTC511 0x040E4
-#define IXGBE_PTC1023 0x040E8
-#define IXGBE_PTC1522 0x040EC
-#define IXGBE_MPTC 0x040F0
-#define IXGBE_BPTC 0x040F4
-#define IXGBE_XEC 0x04120
-#define IXGBE_SSVPC 0x08780
-
-#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
-#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
- (0x08600 + ((_i) * 4)))
-#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
-
-#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
-#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
-#define IXGBE_FCCRC 0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */
-#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */
-#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */
-#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */
-#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
-#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
-#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
-#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */
-#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */
+#define IXGBE_CRCERRS 0x04000
+#define IXGBE_ILLERRC 0x04004
+#define IXGBE_ERRBC 0x04008
+#define IXGBE_MSPDC 0x04010
+#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/
+#define IXGBE_MLFC 0x04034
+#define IXGBE_MRFC 0x04038
+#define IXGBE_RLEC 0x04040
+#define IXGBE_LXONTXC 0x03F60
+#define IXGBE_LXONRXC 0x0CF60
+#define IXGBE_LXOFFTXC 0x03F68
+#define IXGBE_LXOFFRXC 0x0CF68
+#define IXGBE_LXONRXCNT 0x041A4
+#define IXGBE_LXOFFRXCNT 0x041A8
+#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/
+#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/
+#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/
+#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/
+#define IXGBE_PRC64 0x0405C
+#define IXGBE_PRC127 0x04060
+#define IXGBE_PRC255 0x04064
+#define IXGBE_PRC511 0x04068
+#define IXGBE_PRC1023 0x0406C
+#define IXGBE_PRC1522 0x04070
+#define IXGBE_GPRC 0x04074
+#define IXGBE_BPRC 0x04078
+#define IXGBE_MPRC 0x0407C
+#define IXGBE_GPTC 0x04080
+#define IXGBE_GORCL 0x04088
+#define IXGBE_GORCH 0x0408C
+#define IXGBE_GOTCL 0x04090
+#define IXGBE_GOTCH 0x04094
+#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/
+#define IXGBE_RUC 0x040A4
+#define IXGBE_RFC 0x040A8
+#define IXGBE_ROC 0x040AC
+#define IXGBE_RJC 0x040B0
+#define IXGBE_MNGPRC 0x040B4
+#define IXGBE_MNGPDC 0x040B8
+#define IXGBE_MNGPTC 0x0CF90
+#define IXGBE_TORL 0x040C0
+#define IXGBE_TORH 0x040C4
+#define IXGBE_TPR 0x040D0
+#define IXGBE_TPT 0x040D4
+#define IXGBE_PTC64 0x040D8
+#define IXGBE_PTC127 0x040DC
+#define IXGBE_PTC255 0x040E0
+#define IXGBE_PTC511 0x040E4
+#define IXGBE_PTC1023 0x040E8
+#define IXGBE_PTC1522 0x040EC
+#define IXGBE_MPTC 0x040F0
+#define IXGBE_BPTC 0x040F4
+#define IXGBE_XEC 0x04120
+#define IXGBE_SSVPC 0x08780
+
+#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
+#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
+ (0x08600 + ((_i) * 4)))
+#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
+
+#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_FCCRC 0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */
+#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */
+#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */
+#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */
+#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
+#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
+#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
+#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */
+#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */
+#define IXGBE_O2BGPTC 0x041C4
+#define IXGBE_O2BSPC 0x087B0
+#define IXGBE_B2OSPC 0x041C0
+#define IXGBE_B2OGPRC 0x02F90
+#define IXGBE_BUPRC 0x04180
+#define IXGBE_BMPRC 0x04184
+#define IXGBE_BBPRC 0x04188
+#define IXGBE_BUPTC 0x0418C
+#define IXGBE_BMPTC 0x04190
+#define IXGBE_BBPTC 0x04194
+#define IXGBE_BCRCERRS 0x04198
+#define IXGBE_BXONRXC 0x0419C
+#define IXGBE_BXOFFRXC 0x041E0
+#define IXGBE_BXONTXC 0x041E4
+#define IXGBE_BXOFFTXC 0x041E8
+#define IXGBE_PCRC8ECL 0x0E810
+#define IXGBE_PCRC8ECH 0x0E811
+#define IXGBE_PCRC8ECH_MASK 0x1F
+#define IXGBE_LDPCECL 0x0E820
+#define IXGBE_LDPCECH 0x0E821
/* Management */
-#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_MANC 0x05820
-#define IXGBE_MFVAL 0x05824
-#define IXGBE_MANC2H 0x05860
-#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_MIPAF 0x058B0
-#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
-#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
-#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */
-#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
-#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_LSWFW 0x15014
+#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MANC 0x05820
+#define IXGBE_MFVAL 0x05824
+#define IXGBE_MANC2H 0x05860
+#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MIPAF 0x058B0
+#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */
+#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_LSWFW 0x15014
+#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */
+#define IXGBE_BMCIPVAL 0x05060
+#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001
+#define IXGBE_BMCIP_IPADDR_VALID 0x00000002
+
+/* Management Bit Fields and Masks */
+#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */
+#define IXGBE_MANC_EN_BMC2OS_SHIFT 28
+
+/* Firmware Semaphore Register */
+#define IXGBE_FWSM_MODE_MASK 0xE
/* ARC Subsystem registers */
-#define IXGBE_HICR 0x15F00
-#define IXGBE_FWSTS 0x15F0C
-#define IXGBE_HSMC0R 0x15F04
-#define IXGBE_HSMC1R 0x15F08
-#define IXGBE_SWSR 0x15F10
-#define IXGBE_HFDR 0x15FE8
-#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */
+#define IXGBE_HICR 0x15F00
+#define IXGBE_FWSTS 0x15F0C
+#define IXGBE_HSMC0R 0x15F04
+#define IXGBE_HSMC1R 0x15F08
+#define IXGBE_SWSR 0x15F10
+#define IXGBE_HFDR 0x15FE8
+#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */
+
+#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define IXGBE_HICR_C 0x02
+#define IXGBE_HICR_SV 0x04 /* Status Validity */
+#define IXGBE_HICR_FW_RESET_ENABLE 0x40
+#define IXGBE_HICR_FW_RESET 0x80
/* PCI-E registers */
-#define IXGBE_GCR 0x11000
-#define IXGBE_GTV 0x11004
-#define IXGBE_FUNCTAG 0x11008
-#define IXGBE_GLT 0x1100C
-#define IXGBE_GSCL_1 0x11010
-#define IXGBE_GSCL_2 0x11014
-#define IXGBE_GSCL_3 0x11018
-#define IXGBE_GSCL_4 0x1101C
-#define IXGBE_GSCN_0 0x11020
-#define IXGBE_GSCN_1 0x11024
-#define IXGBE_GSCN_2 0x11028
-#define IXGBE_GSCN_3 0x1102C
-#define IXGBE_FACTPS 0x10150
-#define IXGBE_PCIEANACTL 0x11040
-#define IXGBE_SWSM 0x10140
-#define IXGBE_FWSM 0x10148
-#define IXGBE_GSSR 0x10160
-#define IXGBE_MREVID 0x11064
-#define IXGBE_DCA_ID 0x11070
-#define IXGBE_DCA_CTRL 0x11074
-#define IXGBE_SWFW_SYNC IXGBE_GSSR
+#define IXGBE_GCR 0x11000
+#define IXGBE_GTV 0x11004
+#define IXGBE_FUNCTAG 0x11008
+#define IXGBE_GLT 0x1100C
+#define IXGBE_PCIEPIPEADR 0x11004
+#define IXGBE_PCIEPIPEDAT 0x11008
+#define IXGBE_GSCL_1 0x11010
+#define IXGBE_GSCL_2 0x11014
+#define IXGBE_GSCL_3 0x11018
+#define IXGBE_GSCL_4 0x1101C
+#define IXGBE_GSCN_0 0x11020
+#define IXGBE_GSCN_1 0x11024
+#define IXGBE_GSCN_2 0x11028
+#define IXGBE_GSCN_3 0x1102C
+#define IXGBE_FACTPS 0x10150
+#define IXGBE_PCIEANACTL 0x11040
+#define IXGBE_SWSM 0x10140
+#define IXGBE_FWSM 0x10148
+#define IXGBE_GSSR 0x10160
+#define IXGBE_MREVID 0x11064
+#define IXGBE_DCA_ID 0x11070
+#define IXGBE_DCA_CTRL 0x11074
+#define IXGBE_SWFW_SYNC IXGBE_GSSR
/* PCI-E registers 82599-Specific */
-#define IXGBE_GCR_EXT 0x11050
-#define IXGBE_GSCL_5_82599 0x11030
-#define IXGBE_GSCL_6_82599 0x11034
-#define IXGBE_GSCL_7_82599 0x11038
-#define IXGBE_GSCL_8_82599 0x1103C
-#define IXGBE_PHYADR_82599 0x11040
-#define IXGBE_PHYDAT_82599 0x11044
-#define IXGBE_PHYCTL_82599 0x11048
-#define IXGBE_PBACLR_82599 0x11068
-#define IXGBE_CIAA_82599 0x11088
-#define IXGBE_CIAD_82599 0x1108C
-#define IXGBE_INTRPT_CSR_82599 0x110B0
-#define IXGBE_INTRPT_MASK_82599 0x110B8
-#define IXGBE_CDQ_MBR_82599 0x110B4
-#define IXGBE_MISC_REG_82599 0x110F0
-#define IXGBE_ECC_CTRL_0_82599 0x11100
-#define IXGBE_ECC_CTRL_1_82599 0x11104
-#define IXGBE_ECC_STATUS_82599 0x110E0
-#define IXGBE_BAR_CTRL_82599 0x110F4
+#define IXGBE_GCR_EXT 0x11050
+#define IXGBE_GSCL_5_82599 0x11030
+#define IXGBE_GSCL_6_82599 0x11034
+#define IXGBE_GSCL_7_82599 0x11038
+#define IXGBE_GSCL_8_82599 0x1103C
+#define IXGBE_PHYADR_82599 0x11040
+#define IXGBE_PHYDAT_82599 0x11044
+#define IXGBE_PHYCTL_82599 0x11048
+#define IXGBE_PBACLR_82599 0x11068
+#define IXGBE_CIAA_82599 0x11088
+#define IXGBE_CIAD_82599 0x1108C
+#define IXGBE_PICAUSE 0x110B0
+#define IXGBE_PIENA 0x110B8
+#define IXGBE_CDQ_MBR_82599 0x110B4
+#define IXGBE_PCIESPARE 0x110BC
+#define IXGBE_MISC_REG_82599 0x110F0
+#define IXGBE_ECC_CTRL_0_82599 0x11100
+#define IXGBE_ECC_CTRL_1_82599 0x11104
+#define IXGBE_ECC_STATUS_82599 0x110E0
+#define IXGBE_BAR_CTRL_82599 0x110F4
/* PCI Express Control */
-#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000
-#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000
-#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
-#define IXGBE_GCR_CAP_VER2 0x00040000
-
-#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
-#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
-#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
-#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
-#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
- IXGBE_GCR_EXT_VT_MODE_64)
+#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000
+#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define IXGBE_GCR_CAP_VER2 0x00040000
+
+#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
+#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000
+#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
+#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
+ IXGBE_GCR_EXT_VT_MODE_64)
/* Time Sync Registers */
-#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
-#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
-#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */
-#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */
-#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */
-#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */
-#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */
-#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */
-#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */
-#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */
-#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */
-#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */
-#define IXGBE_RXUDP 0x08C1C /* Time Sync Rx UDP Port - RW */
+#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
+#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
+#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */
+#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */
+#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */
+#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */
+#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */
+#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */
+#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */
+#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */
+#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */
+#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */
+#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */
+#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */
+#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */
+#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */
+#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */
+#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */
+#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */
+#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */
+#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */
+#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
+#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
+#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
+#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
/* Diagnostic Registers */
-#define IXGBE_RDSTATCTL 0x02C20
-#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
-#define IXGBE_RDHMPN 0x02F08
-#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4))
-#define IXGBE_RDPROBE 0x02F20
-#define IXGBE_RDMAM 0x02F30
-#define IXGBE_RDMAD 0x02F34
-#define IXGBE_TDSTATCTL 0x07C20
-#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
-#define IXGBE_TDHMPN 0x07F08
-#define IXGBE_TDHMPN2 0x082FC
-#define IXGBE_TXDESCIC 0x082CC
-#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4))
-#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
-#define IXGBE_TDPROBE 0x07F20
-#define IXGBE_TXBUFCTRL 0x0C600
-#define IXGBE_TXBUFDATA0 0x0C610
-#define IXGBE_TXBUFDATA1 0x0C614
-#define IXGBE_TXBUFDATA2 0x0C618
-#define IXGBE_TXBUFDATA3 0x0C61C
-#define IXGBE_RXBUFCTRL 0x03600
-#define IXGBE_RXBUFDATA0 0x03610
-#define IXGBE_RXBUFDATA1 0x03614
-#define IXGBE_RXBUFDATA2 0x03618
-#define IXGBE_RXBUFDATA3 0x0361C
-#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_RFVAL 0x050A4
-#define IXGBE_MDFTC1 0x042B8
-#define IXGBE_MDFTC2 0x042C0
-#define IXGBE_MDFTFIFO1 0x042C4
-#define IXGBE_MDFTFIFO2 0x042C8
-#define IXGBE_MDFTS 0x042CC
-#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/
-#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/
-#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/
-#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/
-#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/
-#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/
-#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
-#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
-#define IXGBE_PCIEECCCTL 0x1106C
-#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
-#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
-#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
-#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
-#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
-#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
-#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
-#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
-#define IXGBE_PCIEECCCTL0 0x11100
-#define IXGBE_PCIEECCCTL1 0x11104
-#define IXGBE_RXDBUECC 0x03F70
-#define IXGBE_TXDBUECC 0x0CF70
-#define IXGBE_RXDBUEST 0x03F74
-#define IXGBE_TXDBUEST 0x0CF74
-#define IXGBE_PBTXECC 0x0C300
-#define IXGBE_PBRXECC 0x03300
-#define IXGBE_GHECCR 0x110B0
+#define IXGBE_RDSTATCTL 0x02C20
+#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
+#define IXGBE_RDHMPN 0x02F08
+#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4))
+#define IXGBE_RDPROBE 0x02F20
+#define IXGBE_RDMAM 0x02F30
+#define IXGBE_RDMAD 0x02F34
+#define IXGBE_TDSTATCTL 0x07C20
+#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
+#define IXGBE_TDHMPN 0x07F08
+#define IXGBE_TDHMPN2 0x082FC
+#define IXGBE_TXDESCIC 0x082CC
+#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4))
+#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
+#define IXGBE_TDPROBE 0x07F20
+#define IXGBE_TXBUFCTRL 0x0C600
+#define IXGBE_TXBUFDATA0 0x0C610
+#define IXGBE_TXBUFDATA1 0x0C614
+#define IXGBE_TXBUFDATA2 0x0C618
+#define IXGBE_TXBUFDATA3 0x0C61C
+#define IXGBE_RXBUFCTRL 0x03600
+#define IXGBE_RXBUFDATA0 0x03610
+#define IXGBE_RXBUFDATA1 0x03614
+#define IXGBE_RXBUFDATA2 0x03618
+#define IXGBE_RXBUFDATA3 0x0361C
+#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_RFVAL 0x050A4
+#define IXGBE_MDFTC1 0x042B8
+#define IXGBE_MDFTC2 0x042C0
+#define IXGBE_MDFTFIFO1 0x042C4
+#define IXGBE_MDFTFIFO2 0x042C8
+#define IXGBE_MDFTS 0x042CC
+#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/
+#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/
+#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/
+#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/
+#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/
+#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/
+#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
+#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
+#define IXGBE_PCIEECCCTL 0x1106C
+#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
+#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
+#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
+#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
+#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
+#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
+#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
+#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
+#define IXGBE_PCIEECCCTL0 0x11100
+#define IXGBE_PCIEECCCTL1 0x11104
+#define IXGBE_RXDBUECC 0x03F70
+#define IXGBE_TXDBUECC 0x0CF70
+#define IXGBE_RXDBUEST 0x03F74
+#define IXGBE_TXDBUEST 0x0CF74
+#define IXGBE_PBTXECC 0x0C300
+#define IXGBE_PBRXECC 0x03300
+#define IXGBE_GHECCR 0x110B0
/* MAC Registers */
-#define IXGBE_PCS1GCFIG 0x04200
-#define IXGBE_PCS1GLCTL 0x04208
-#define IXGBE_PCS1GLSTA 0x0420C
-#define IXGBE_PCS1GDBG0 0x04210
-#define IXGBE_PCS1GDBG1 0x04214
-#define IXGBE_PCS1GANA 0x04218
-#define IXGBE_PCS1GANLP 0x0421C
-#define IXGBE_PCS1GANNP 0x04220
-#define IXGBE_PCS1GANLPNP 0x04224
-#define IXGBE_HLREG0 0x04240
-#define IXGBE_HLREG1 0x04244
-#define IXGBE_PAP 0x04248
-#define IXGBE_MACA 0x0424C
-#define IXGBE_APAE 0x04250
-#define IXGBE_ARD 0x04254
-#define IXGBE_AIS 0x04258
-#define IXGBE_MSCA 0x0425C
-#define IXGBE_MSRWD 0x04260
-#define IXGBE_MLADD 0x04264
-#define IXGBE_MHADD 0x04268
-#define IXGBE_MAXFRS 0x04268
-#define IXGBE_TREG 0x0426C
-#define IXGBE_PCSS1 0x04288
-#define IXGBE_PCSS2 0x0428C
-#define IXGBE_XPCSS 0x04290
-#define IXGBE_MFLCN 0x04294
-#define IXGBE_SERDESC 0x04298
-#define IXGBE_MACS 0x0429C
-#define IXGBE_AUTOC 0x042A0
-#define IXGBE_LINKS 0x042A4
-#define IXGBE_LINKS2 0x04324
-#define IXGBE_AUTOC2 0x042A8
-#define IXGBE_AUTOC3 0x042AC
-#define IXGBE_ANLP1 0x042B0
-#define IXGBE_ANLP2 0x042B4
-#define IXGBE_ATLASCTL 0x04800
-#define IXGBE_MMNGC 0x042D0
-#define IXGBE_ANLPNP1 0x042D4
-#define IXGBE_ANLPNP2 0x042D8
-#define IXGBE_KRPCSFC 0x042E0
-#define IXGBE_KRPCSS 0x042E4
-#define IXGBE_FECS1 0x042E8
-#define IXGBE_FECS2 0x042EC
-#define IXGBE_SMADARCTL 0x14F10
-#define IXGBE_MPVC 0x04318
-#define IXGBE_SGMIIC 0x04314
+#define IXGBE_PCS1GCFIG 0x04200
+#define IXGBE_PCS1GLCTL 0x04208
+#define IXGBE_PCS1GLSTA 0x0420C
+#define IXGBE_PCS1GDBG0 0x04210
+#define IXGBE_PCS1GDBG1 0x04214
+#define IXGBE_PCS1GANA 0x04218
+#define IXGBE_PCS1GANLP 0x0421C
+#define IXGBE_PCS1GANNP 0x04220
+#define IXGBE_PCS1GANLPNP 0x04224
+#define IXGBE_HLREG0 0x04240
+#define IXGBE_HLREG1 0x04244
+#define IXGBE_PAP 0x04248
+#define IXGBE_MACA 0x0424C
+#define IXGBE_APAE 0x04250
+#define IXGBE_ARD 0x04254
+#define IXGBE_AIS 0x04258
+#define IXGBE_MSCA 0x0425C
+#define IXGBE_MSRWD 0x04260
+#define IXGBE_MLADD 0x04264
+#define IXGBE_MHADD 0x04268
+#define IXGBE_MAXFRS 0x04268
+#define IXGBE_TREG 0x0426C
+#define IXGBE_PCSS1 0x04288
+#define IXGBE_PCSS2 0x0428C
+#define IXGBE_XPCSS 0x04290
+#define IXGBE_MFLCN 0x04294
+#define IXGBE_SERDESC 0x04298
+#define IXGBE_MACS 0x0429C
+#define IXGBE_AUTOC 0x042A0
+#define IXGBE_LINKS 0x042A4
+#define IXGBE_LINKS2 0x04324
+#define IXGBE_AUTOC2 0x042A8
+#define IXGBE_AUTOC3 0x042AC
+#define IXGBE_ANLP1 0x042B0
+#define IXGBE_ANLP2 0x042B4
+#define IXGBE_MACC 0x04330
+#define IXGBE_ATLASCTL 0x04800
+#define IXGBE_MMNGC 0x042D0
+#define IXGBE_ANLPNP1 0x042D4
+#define IXGBE_ANLPNP2 0x042D8
+#define IXGBE_KRPCSFC 0x042E0
+#define IXGBE_KRPCSS 0x042E4
+#define IXGBE_FECS1 0x042E8
+#define IXGBE_FECS2 0x042EC
+#define IXGBE_SMADARCTL 0x14F10
+#define IXGBE_MPVC 0x04318
+#define IXGBE_SGMIIC 0x04314
+
+/* Statistics Registers */
+#define IXGBE_RXNFGPC 0x041B0
+#define IXGBE_RXNFGBCL 0x041B4
+#define IXGBE_RXNFGBCH 0x041B8
+#define IXGBE_RXDGPC 0x02F50
+#define IXGBE_RXDGBCL 0x02F54
+#define IXGBE_RXDGBCH 0x02F58
+#define IXGBE_RXDDGPC 0x02F5C
+#define IXGBE_RXDDGBCL 0x02F60
+#define IXGBE_RXDDGBCH 0x02F64
+#define IXGBE_RXLPBKGPC 0x02F68
+#define IXGBE_RXLPBKGBCL 0x02F6C
+#define IXGBE_RXLPBKGBCH 0x02F70
+#define IXGBE_RXDLPBKGPC 0x02F74
+#define IXGBE_RXDLPBKGBCL 0x02F78
+#define IXGBE_RXDLPBKGBCH 0x02F7C
+#define IXGBE_TXDGPC 0x087A0
+#define IXGBE_TXDGBCL 0x087A4
+#define IXGBE_TXDGBCH 0x087A8
+
+#define IXGBE_RXDSTATCTRL 0x02F40
/* Copper Pond 2 link timeout */
#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
/* Omer CORECTL */
-#define IXGBE_CORECTL 0x014F00
+#define IXGBE_CORECTL 0x014F00
/* BARCTRL */
-#define IXGBE_BARCTRL 0x110F4
-#define IXGBE_BARCTRL_FLSIZE 0x0700
-#define IXGBE_BARCTRL_FLSIZE_SHIFT 8
-#define IXGBE_BARCTRL_CSRSIZE 0x2000
+#define IXGBE_BARCTRL 0x110F4
+#define IXGBE_BARCTRL_FLSIZE 0x0700
+#define IXGBE_BARCTRL_FLSIZE_SHIFT 8
+#define IXGBE_BARCTRL_CSRSIZE 0x2000
/* RSCCTL Bit Masks */
-#define IXGBE_RSCCTL_RSCEN 0x01
-#define IXGBE_RSCCTL_MAXDESC_1 0x00
-#define IXGBE_RSCCTL_MAXDESC_4 0x04
-#define IXGBE_RSCCTL_MAXDESC_8 0x08
-#define IXGBE_RSCCTL_MAXDESC_16 0x0C
+#define IXGBE_RSCCTL_RSCEN 0x01
+#define IXGBE_RSCCTL_MAXDESC_1 0x00
+#define IXGBE_RSCCTL_MAXDESC_4 0x04
+#define IXGBE_RSCCTL_MAXDESC_8 0x08
+#define IXGBE_RSCCTL_MAXDESC_16 0x0C
/* RSCDBU Bit Masks */
-#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F
-#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
+#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F
+#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
/* RDRXCTL Bit Masks */
-#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */
-#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */
-#define IXGBE_RDRXCTL_MVMEN 0x00000020
-#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
-#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
-#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
-#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */
-#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
-#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
+#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min THLD Size */
+#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */
+#define IXGBE_RDRXCTL_MVMEN 0x00000020
+#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
+#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
+#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
+#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disabl RSC compl on LLI */
+#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */
+#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */
/* RQTC Bit Masks and Shifts */
-#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
-#define IXGBE_RQTC_TC0_MASK (0x7 << 0)
-#define IXGBE_RQTC_TC1_MASK (0x7 << 4)
-#define IXGBE_RQTC_TC2_MASK (0x7 << 8)
-#define IXGBE_RQTC_TC3_MASK (0x7 << 12)
-#define IXGBE_RQTC_TC4_MASK (0x7 << 16)
-#define IXGBE_RQTC_TC5_MASK (0x7 << 20)
-#define IXGBE_RQTC_TC6_MASK (0x7 << 24)
-#define IXGBE_RQTC_TC7_MASK (0x7 << 28)
+#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
+#define IXGBE_RQTC_TC0_MASK (0x7 << 0)
+#define IXGBE_RQTC_TC1_MASK (0x7 << 4)
+#define IXGBE_RQTC_TC2_MASK (0x7 << 8)
+#define IXGBE_RQTC_TC3_MASK (0x7 << 12)
+#define IXGBE_RQTC_TC4_MASK (0x7 << 16)
+#define IXGBE_RQTC_TC5_MASK (0x7 << 20)
+#define IXGBE_RQTC_TC6_MASK (0x7 << 24)
+#define IXGBE_RQTC_TC7_MASK (0x7 << 28)
/* PSRTYPE.RQPL Bit masks and shift */
-#define IXGBE_PSRTYPE_RQPL_MASK 0x7
-#define IXGBE_PSRTYPE_RQPL_SHIFT 29
+#define IXGBE_PSRTYPE_RQPL_MASK 0x7
+#define IXGBE_PSRTYPE_RQPL_SHIFT 29
/* CTRL Bit Masks */
-#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
-#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
-#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
/* FACTPS */
-#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */
+#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */
/* MHADD Bit Masks */
-#define IXGBE_MHADD_MFS_MASK 0xFFFF0000
-#define IXGBE_MHADD_MFS_SHIFT 16
+#define IXGBE_MHADD_MFS_MASK 0xFFFF0000
+#define IXGBE_MHADD_MFS_SHIFT 16
/* Extended Device Control */
-#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */
-#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */
-#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
-#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */
+#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */
+#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
/* Direct Cache Access (DCA) definitions */
-#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
-#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
-
-#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
-#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
-
-#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
-#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */
-#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */
-
-#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
-#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
-#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
+#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
+#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
+#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* Rx wr Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* Rx Split Header RO */
+
+#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
/* MSCA Bit Masks */
-#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Address (new protocol) */
-#define IXGBE_MSCA_NP_ADDR_SHIFT 0
-#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Device Type (new protocol) */
-#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */
-#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */
-#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/
-#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */
-#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */
-#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */
-#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */
-#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */
-#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/
-#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */
-#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */
-#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */
-#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old protocol) */
-#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */
-#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */
+#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Addr (new prot) */
+#define IXGBE_MSCA_NP_ADDR_SHIFT 0
+#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Dev Type (new prot) */
+#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old prot */
+#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */
+#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/
+#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */
+#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */
+#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */
+#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (wr) */
+#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (rd) */
+#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (rd auto inc)*/
+#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */
+#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */
+#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new prot) */
+#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old prot) */
+#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */
+#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress ena */
/* MSRWD bit masks */
-#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
-#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
-#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
-#define IXGBE_MSRWD_READ_DATA_SHIFT 16
+#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
+#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
+#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
+#define IXGBE_MSRWD_READ_DATA_SHIFT 16
/* Atlas registers */
-#define IXGBE_ATLAS_PDN_LPBK 0x24
-#define IXGBE_ATLAS_PDN_10G 0xB
-#define IXGBE_ATLAS_PDN_1G 0xC
-#define IXGBE_ATLAS_PDN_AN 0xD
+#define IXGBE_ATLAS_PDN_LPBK 0x24
+#define IXGBE_ATLAS_PDN_10G 0xB
+#define IXGBE_ATLAS_PDN_1G 0xC
+#define IXGBE_ATLAS_PDN_AN 0xD
/* Atlas bit masks */
-#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000
-#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10
-#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0
-#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
-#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
+#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000
+#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10
+#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0
+#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
+#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
/* Omer bit masks */
-#define IXGBE_CORECTL_WRITE_CMD 0x00010000
+#define IXGBE_CORECTL_WRITE_CMD 0x00010000
/* Device Type definitions for new protocol MDIO commands */
-#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
-#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
-#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
-#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
-#define IXGBE_TWINAX_DEV 1
-
-#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
-
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
-
-#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
-#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
-#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
-#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
-#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */
-#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
-#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
-#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/
-#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
-#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
-#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
-#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */
-#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */
-#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */
-#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
-#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */
-#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
-
-#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
+#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
+#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
+#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
+#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
+#define IXGBE_TWINAX_DEV 1
+
+#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
+
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Ctrl Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
+
+#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
+#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
+#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
+#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */
+#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
+#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
+#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/
+#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
+#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
+#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
+#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */
+#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */
+#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */
+#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
+#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */
+#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
+
+#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
/* MII clause 22/28 definitions */
-#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
+#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
-#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */
+#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */
#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
-#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
-#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */
-#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/
-#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
-#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
-#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */
-#define IXGBE_MII_RESTART 0x200
-#define IXGBE_MII_AUTONEG_COMPLETE 0x20
-#define IXGBE_MII_AUTONEG_LINK_UP 0x04
-#define IXGBE_MII_AUTONEG_REG 0x0
-
-#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
-#define IXGBE_MAX_PHY_ADDR 32
+#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
+#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */
+#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
+#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */
+#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */
+#define IXGBE_MII_RESTART 0x200
+#define IXGBE_MII_AUTONEG_COMPLETE 0x20
+#define IXGBE_MII_AUTONEG_LINK_UP 0x04
+#define IXGBE_MII_AUTONEG_REG 0x0
+
+#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
+#define IXGBE_MAX_PHY_ADDR 32
/* PHY IDs*/
-#define TN1010_PHY_ID 0x00A19410
-#define TNX_FW_REV 0xB
-#define AQ1002_PHY_ID 0x03A1B420
-#define AQ_FW_REV 0x20
-#define QT2022_PHY_ID 0x0043A400
-#define ATH_PHY_ID 0x03429050
+#define TN1010_PHY_ID 0x00A19410
+#define TNX_FW_REV 0xB
+#define X540_PHY_ID 0x01540200
+#define AQ_FW_REV 0x20
+#define QT2022_PHY_ID 0x0043A400
+#define ATH_PHY_ID 0x03429050
/* PHY Types */
-#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
+#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
/* Special PHY Init Routine */
-#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
-#define IXGBE_PHY_INIT_END_NL 0xFFFF
-#define IXGBE_CONTROL_MASK_NL 0xF000
-#define IXGBE_DATA_MASK_NL 0x0FFF
-#define IXGBE_CONTROL_SHIFT_NL 12
-#define IXGBE_DELAY_NL 0
-#define IXGBE_DATA_NL 1
-#define IXGBE_CONTROL_NL 0x000F
-#define IXGBE_CONTROL_EOL_NL 0x0FFF
-#define IXGBE_CONTROL_SOL_NL 0x0000
+#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
+#define IXGBE_PHY_INIT_END_NL 0xFFFF
+#define IXGBE_CONTROL_MASK_NL 0xF000
+#define IXGBE_DATA_MASK_NL 0x0FFF
+#define IXGBE_CONTROL_SHIFT_NL 12
+#define IXGBE_DELAY_NL 0
+#define IXGBE_DATA_NL 1
+#define IXGBE_CONTROL_NL 0x000F
+#define IXGBE_CONTROL_EOL_NL 0x0FFF
+#define IXGBE_CONTROL_SOL_NL 0x0000
/* General purpose Interrupt Enable */
-#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
-#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
-#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */
-#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
-#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
-#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
-#define IXGBE_GPIE_EIAME 0x40000000
-#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
-#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
-#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */
-#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */
-#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */
-#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */
+#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
+#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
+#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */
+#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
+#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
+#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
+#define IXGBE_GPIE_EIAME 0x40000000
+#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
+#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
+#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */
+#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */
+#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */
+#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */
+
+/* Packet Buffer Initialization */
+#define IXGBE_MAX_PACKET_BUFFERS 8
+
+#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */
+#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
+#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */
+#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer */
+#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer */
+
+#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
+#define IXGBE_MAX_PB 8
+
+/* Packet buffer allocation strategies */
+enum {
+ PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */
+#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL
+ PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */
+#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED
+};
/* Transmit Flow Control status */
-#define IXGBE_TFCS_TXOFF 0x00000001
-#define IXGBE_TFCS_TXOFF0 0x00000100
-#define IXGBE_TFCS_TXOFF1 0x00000200
-#define IXGBE_TFCS_TXOFF2 0x00000400
-#define IXGBE_TFCS_TXOFF3 0x00000800
-#define IXGBE_TFCS_TXOFF4 0x00001000
-#define IXGBE_TFCS_TXOFF5 0x00002000
-#define IXGBE_TFCS_TXOFF6 0x00004000
-#define IXGBE_TFCS_TXOFF7 0x00008000
+#define IXGBE_TFCS_TXOFF 0x00000001
+#define IXGBE_TFCS_TXOFF0 0x00000100
+#define IXGBE_TFCS_TXOFF1 0x00000200
+#define IXGBE_TFCS_TXOFF2 0x00000400
+#define IXGBE_TFCS_TXOFF3 0x00000800
+#define IXGBE_TFCS_TXOFF4 0x00001000
+#define IXGBE_TFCS_TXOFF5 0x00002000
+#define IXGBE_TFCS_TXOFF6 0x00004000
+#define IXGBE_TFCS_TXOFF7 0x00008000
/* TCP Timer */
-#define IXGBE_TCPTIMER_KS 0x00000100
-#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200
-#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400
-#define IXGBE_TCPTIMER_LOOP 0x00000800
-#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF
+#define IXGBE_TCPTIMER_KS 0x00000100
+#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200
+#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400
+#define IXGBE_TCPTIMER_LOOP 0x00000800
+#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF
/* HLREG0 Bit Masks */
-#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */
-#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */
-#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */
-#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */
-#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */
-#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */
-#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */
-#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */
-#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */
-#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */
-#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */
-#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */
-#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */
-#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */
-#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */
+#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */
+#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */
+#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */
+#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */
+#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */
+#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */
+#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */
+#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */
+#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */
+#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */
+#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */
+#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */
+#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */
+#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */
+#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */
/* VMD_CTL bitmasks */
-#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001
-#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002
+#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001
+#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002
/* VT_CTL bitmasks */
-#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */
-#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */
-#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */
-#define IXGBE_VT_CTL_POOL_SHIFT 7
-#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
+#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */
+#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */
+#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */
+#define IXGBE_VT_CTL_POOL_SHIFT 7
+#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
/* VMOLR bitmasks */
-#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */
-#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */
-#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */
-#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */
-#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */
+#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */
+#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */
+#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */
+#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */
+#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */
/* VFRE bitmask */
-#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
+#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
-#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
/* RDHMPN and TDHMPN bitmasks */
-#define IXGBE_RDHMPN_RDICADDR 0x007FF800
-#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
-#define IXGBE_RDHMPN_RDICADDR_SHIFT 11
-#define IXGBE_TDHMPN_TDICADDR 0x003FF800
-#define IXGBE_TDHMPN_TDICRDREQ 0x00800000
-#define IXGBE_TDHMPN_TDICADDR_SHIFT 11
-
-#define IXGBE_RDMAM_MEM_SEL_SHIFT 13
-#define IXGBE_RDMAM_DWORD_SHIFT 9
-#define IXGBE_RDMAM_DESC_COMP_FIFO 1
-#define IXGBE_RDMAM_DFC_CMD_FIFO 2
-#define IXGBE_RDMAM_RSC_HEADER_ADDR 3
-#define IXGBE_RDMAM_TCN_STATUS_RAM 4
-#define IXGBE_RDMAM_WB_COLL_FIFO 5
-#define IXGBE_RDMAM_QSC_CNT_RAM 6
-#define IXGBE_RDMAM_QSC_FCOE_RAM 7
-#define IXGBE_RDMAM_QSC_QUEUE_CNT 8
-#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA
-#define IXGBE_RDMAM_QSC_RSC_RAM 0xB
-#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135
-#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4
-#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48
-#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7
-#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32
-#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4
-#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256
-#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9
-#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8
-#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4
-#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64
-#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4
-#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512
-#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5
-#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32
-#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4
-#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128
-#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8
-#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32
-#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8
-
-#define IXGBE_TXDESCIC_READY 0x80000000
+#define IXGBE_RDHMPN_RDICADDR 0x007FF800
+#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
+#define IXGBE_RDHMPN_RDICADDR_SHIFT 11
+#define IXGBE_TDHMPN_TDICADDR 0x003FF800
+#define IXGBE_TDHMPN_TDICRDREQ 0x00800000
+#define IXGBE_TDHMPN_TDICADDR_SHIFT 11
+
+#define IXGBE_RDMAM_MEM_SEL_SHIFT 13
+#define IXGBE_RDMAM_DWORD_SHIFT 9
+#define IXGBE_RDMAM_DESC_COMP_FIFO 1
+#define IXGBE_RDMAM_DFC_CMD_FIFO 2
+#define IXGBE_RDMAM_RSC_HEADER_ADDR 3
+#define IXGBE_RDMAM_TCN_STATUS_RAM 4
+#define IXGBE_RDMAM_WB_COLL_FIFO 5
+#define IXGBE_RDMAM_QSC_CNT_RAM 6
+#define IXGBE_RDMAM_QSC_FCOE_RAM 7
+#define IXGBE_RDMAM_QSC_QUEUE_CNT 8
+#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA
+#define IXGBE_RDMAM_QSC_RSC_RAM 0xB
+#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135
+#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4
+#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48
+#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7
+#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32
+#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4
+#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256
+#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9
+#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8
+#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4
+#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64
+#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4
+#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512
+#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8
+#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32
+#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8
+
+#define IXGBE_TXDESCIC_READY 0x80000000
/* Receive Checksum Control */
-#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
-#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
/* FCRTL Bit Masks */
-#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */
-#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */
+#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */
+#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */
/* PAP bit masks*/
-#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
+#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
/* RMCS Bit Masks */
-#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */
+#define IXGBE_RMCS_RRM 0x00000002 /* Rx Recycle Mode enable */
/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
-#define IXGBE_RMCS_RAC 0x00000004
-#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
-#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */
-#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */
-#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
+#define IXGBE_RMCS_RAC 0x00000004
+/* Deficit Fixed Prio ena */
+#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC
+#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */
+#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */
+#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
/* FCCFG Bit Masks */
-#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */
-#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */
+#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */
+#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */
/* Interrupt register bitmasks */
/* Extended Interrupt Cause Read */
-#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
-#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */
-#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */
-#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */
-#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
-#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
-#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
-#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
-#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
-#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
-#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
-#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */
-#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
-#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
-#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
-#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */
+#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */
+#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */
+#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
+#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
+#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
+#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */
+#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
+#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
+#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
+#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
/* Extended Interrupt Cause Set */
-#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
-#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */
-#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
-#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
-#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
-#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
-#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */
-#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
-#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
-#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
-#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */
+#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
+#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
/* Extended Interrupt Mask Set */
-#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
-#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
-#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
-#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
-#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
-#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
-#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */
-#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
-#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
-#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
-#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
+#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */
+#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
+#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
/* Extended Interrupt Mask Clear */
-#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
-#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
-#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
-#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
-#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
-#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
-#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */
-#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
-#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
-#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
-#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
+#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
+#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
#define IXGBE_EIMS_ENABLE_MASK ( \
- IXGBE_EIMS_RTX_QUEUE | \
- IXGBE_EIMS_LSC | \
- IXGBE_EIMS_TCP_TIMER | \
- IXGBE_EIMS_OTHER)
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_LSC | \
+ IXGBE_EIMS_TCP_TIMER | \
+ IXGBE_EIMS_OTHER)
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
-#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
-#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
-#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
-#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
-#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
-#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
-#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
-#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
-#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
-#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */
-#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */
-#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */
-#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */
-#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */
-#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */
-#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */
-#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */
-#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass check of control bits */
-#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */
-#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */
-#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */
-#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */
-#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */
-
-#define IXGBE_MAX_FTQF_FILTERS 128
-#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003
-#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000
-#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001
-#define IXGBE_FTQF_PROTOCOL_SCTP 2
-#define IXGBE_FTQF_PRIORITY_MASK 0x00000007
-#define IXGBE_FTQF_PRIORITY_SHIFT 2
-#define IXGBE_FTQF_POOL_MASK 0x0000003F
-#define IXGBE_FTQF_POOL_SHIFT 8
-#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F
-#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25
-#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E
-#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D
-#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B
-#define IXGBE_FTQF_DEST_PORT_MASK 0x17
-#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F
-#define IXGBE_FTQF_POOL_MASK_EN 0x40000000
-#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000
+#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
+#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
+#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
+#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
+#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
+#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
+#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
+#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
+#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */
+#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */
+#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */
+#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */
+#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */
+#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */
+#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */
+#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */
+#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass chk of ctrl bits */
+#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */
+#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */
+#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */
+#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */
+#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */
+
+#define IXGBE_MAX_FTQF_FILTERS 128
+#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003
+#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000
+#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001
+#define IXGBE_FTQF_PROTOCOL_SCTP 2
+#define IXGBE_FTQF_PRIORITY_MASK 0x00000007
+#define IXGBE_FTQF_PRIORITY_SHIFT 2
+#define IXGBE_FTQF_POOL_MASK 0x0000003F
+#define IXGBE_FTQF_POOL_SHIFT 8
+#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F
+#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25
+#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E
+#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D
+#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B
+#define IXGBE_FTQF_DEST_PORT_MASK 0x17
+#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F
+#define IXGBE_FTQF_POOL_MASK_EN 0x40000000
+#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000
/* Interrupt clear mask */
-#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF
+#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF
/* Interrupt Vector Allocation Registers */
-#define IXGBE_IVAR_REG_NUM 25
-#define IXGBE_IVAR_REG_NUM_82599 64
-#define IXGBE_IVAR_TXRX_ENTRY 96
-#define IXGBE_IVAR_RX_ENTRY 64
-#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i))
-#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i))
-#define IXGBE_IVAR_TX_ENTRY 32
+#define IXGBE_IVAR_REG_NUM 25
+#define IXGBE_IVAR_REG_NUM_82599 64
+#define IXGBE_IVAR_TXRX_ENTRY 96
+#define IXGBE_IVAR_RX_ENTRY 64
+#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i))
+#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i))
+#define IXGBE_IVAR_TX_ENTRY 32
-#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */
-#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */
+#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */
+#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */
-#define IXGBE_MSIX_VECTOR(_i) (0 + (_i))
+#define IXGBE_MSIX_VECTOR(_i) (0 + (_i))
-#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
/* ETYPE Queue Filter/Select Bit Masks */
-#define IXGBE_MAX_ETQF_FILTERS 8
-#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */
-#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */
-#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
-#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
-#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
-
-#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
-#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
-#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */
-#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */
+#define IXGBE_MAX_ETQF_FILTERS 8
+#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */
+#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */
+#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
+#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
+#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
+
+#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
+#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
+#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */
+#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */
/*
* ETQF filter list: one static filter per filter consumer. This is
- * to avoid filter collisions later. Add new filters
- * here!!
+ * to avoid filter collisions later. Add new filters
+ * here!!
*
* Current filters:
- * EAPOL 802.1x (0x888e): Filter 0
- * FCoE (0x8906): Filter 2
- * 1588 (0x88f7): Filter 3
- * FIP (0x8914): Filter 4
+ * EAPOL 802.1x (0x888e): Filter 0
+ * FCoE (0x8906): Filter 2
+ * 1588 (0x88f7): Filter 3
+ * FIP (0x8914): Filter 4
*/
-#define IXGBE_ETQF_FILTER_EAPOL 0
-#define IXGBE_ETQF_FILTER_FCOE 2
-#define IXGBE_ETQF_FILTER_1588 3
-#define IXGBE_ETQF_FILTER_FIP 4
+#define IXGBE_ETQF_FILTER_EAPOL 0
+#define IXGBE_ETQF_FILTER_FCOE 2
+#define IXGBE_ETQF_FILTER_1588 3
+#define IXGBE_ETQF_FILTER_FIP 4
/* VLAN Control Bit Masks */
-#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
-#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
-#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */
-#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */
-#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */
+#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
+#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
+#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */
+#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */
+#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */
/* VLAN pool filtering masks */
-#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
-#define IXGBE_VLVF_ENTRIES 64
-#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
+#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
+#define IXGBE_VLVF_ENTRIES 64
+#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
/* Per VF Port VLAN insertion rules */
-#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
-#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
-#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
+#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
/* STATUS Bit Masks */
-#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
-#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
-#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */
+#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
+#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Ena Status */
-#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
-#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
+#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
+#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
/* ESDP Bit Masks */
-#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
-#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
-#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
-#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
-#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
-#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
-#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
-#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
-#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
+#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
+#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
+#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
+#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
+#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
+#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
+#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
+#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
+#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
/* LEDCTL Bit Masks */
-#define IXGBE_LED_IVRT_BASE 0x00000040
-#define IXGBE_LED_BLINK_BASE 0x00000080
-#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
-#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
-#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i))
-#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
-#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
-#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
+#define IXGBE_LED_IVRT_BASE 0x00000040
+#define IXGBE_LED_BLINK_BASE 0x00000080
+#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
+#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
+#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i))
+#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
+#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
+#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
/* LED modes */
-#define IXGBE_LED_LINK_UP 0x0
-#define IXGBE_LED_LINK_10G 0x1
-#define IXGBE_LED_MAC 0x2
-#define IXGBE_LED_FILTER 0x3
-#define IXGBE_LED_LINK_ACTIVE 0x4
-#define IXGBE_LED_LINK_1G 0x5
-#define IXGBE_LED_ON 0xE
-#define IXGBE_LED_OFF 0xF
+#define IXGBE_LED_LINK_UP 0x0
+#define IXGBE_LED_LINK_10G 0x1
+#define IXGBE_LED_MAC 0x2
+#define IXGBE_LED_FILTER 0x3
+#define IXGBE_LED_LINK_ACTIVE 0x4
+#define IXGBE_LED_LINK_1G 0x5
+#define IXGBE_LED_ON 0xE
+#define IXGBE_LED_OFF 0xF
/* AUTOC Bit Masks */
#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000
-#define IXGBE_AUTOC_KX4_SUPP 0x80000000
-#define IXGBE_AUTOC_KX_SUPP 0x40000000
-#define IXGBE_AUTOC_PAUSE 0x30000000
-#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
-#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
-#define IXGBE_AUTOC_RF 0x08000000
-#define IXGBE_AUTOC_PD_TMR 0x06000000
-#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
-#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000
-#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000
-#define IXGBE_AUTOC_FECA 0x00040000
-#define IXGBE_AUTOC_FECR 0x00020000
-#define IXGBE_AUTOC_KR_SUPP 0x00010000
-#define IXGBE_AUTOC_AN_RESTART 0x00001000
-#define IXGBE_AUTOC_FLU 0x00000001
-#define IXGBE_AUTOC_LMS_SHIFT 13
-#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-
-#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200
-#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
-#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180
-#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
-#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-
-#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000
-#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000
-#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
-#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-
+#define IXGBE_AUTOC_KX4_SUPP 0x80000000
+#define IXGBE_AUTOC_KX_SUPP 0x40000000
+#define IXGBE_AUTOC_PAUSE 0x30000000
+#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
+#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
+#define IXGBE_AUTOC_RF 0x08000000
+#define IXGBE_AUTOC_PD_TMR 0x06000000
+#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
+#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000
+#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000
+#define IXGBE_AUTOC_FECA 0x00040000
+#define IXGBE_AUTOC_FECR 0x00020000
+#define IXGBE_AUTOC_KR_SUPP 0x00010000
+#define IXGBE_AUTOC_AN_RESTART 0x00001000
+#define IXGBE_AUTOC_FLU 0x00000001
+#define IXGBE_AUTOC_LMS_SHIFT 13
+#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200
+#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
+#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180
+#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
+#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
+#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+
+#define IXGBE_MACC_FLU 0x00000001
+#define IXGBE_MACC_FSV_10G 0x00030000
+#define IXGBE_MACC_FS 0x00040000
+#define IXGBE_MAC_RX2TX_LPBK 0x00000002
/* LINKS Bit Masks */
-#define IXGBE_LINKS_KX_AN_COMP 0x80000000
-#define IXGBE_LINKS_UP 0x40000000
-#define IXGBE_LINKS_SPEED 0x20000000
-#define IXGBE_LINKS_MODE 0x18000000
-#define IXGBE_LINKS_RX_MODE 0x06000000
-#define IXGBE_LINKS_TX_MODE 0x01800000
-#define IXGBE_LINKS_XGXS_EN 0x00400000
-#define IXGBE_LINKS_SGMII_EN 0x02000000
-#define IXGBE_LINKS_PCS_1G_EN 0x00200000
-#define IXGBE_LINKS_1G_AN_EN 0x00100000
-#define IXGBE_LINKS_KX_AN_IDLE 0x00080000
-#define IXGBE_LINKS_1G_SYNC 0x00040000
-#define IXGBE_LINKS_10G_ALIGN 0x00020000
-#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000
-#define IXGBE_LINKS_TL_FAULT 0x00001000
-#define IXGBE_LINKS_SIGNAL 0x00000F00
-
-#define IXGBE_LINKS_SPEED_82599 0x30000000
-#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
-#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
-#define IXGBE_LINKS_SPEED_100_82599 0x10000000
-#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
-#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
-
-#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
+#define IXGBE_LINKS_KX_AN_COMP 0x80000000
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED 0x20000000
+#define IXGBE_LINKS_MODE 0x18000000
+#define IXGBE_LINKS_RX_MODE 0x06000000
+#define IXGBE_LINKS_TX_MODE 0x01800000
+#define IXGBE_LINKS_XGXS_EN 0x00400000
+#define IXGBE_LINKS_SGMII_EN 0x02000000
+#define IXGBE_LINKS_PCS_1G_EN 0x00200000
+#define IXGBE_LINKS_1G_AN_EN 0x00100000
+#define IXGBE_LINKS_KX_AN_IDLE 0x00080000
+#define IXGBE_LINKS_1G_SYNC 0x00040000
+#define IXGBE_LINKS_10G_ALIGN 0x00020000
+#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000
+#define IXGBE_LINKS_TL_FAULT 0x00001000
+#define IXGBE_LINKS_SIGNAL 0x00000F00
+
+#define IXGBE_LINKS_SPEED_82599 0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
+#define IXGBE_LINKS_SPEED_100_82599 0x10000000
+#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
+#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+
+#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
/* PCS1GLSTA Bit Masks */
-#define IXGBE_PCS1GLSTA_LINK_OK 1
-#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
-#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000
-#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000
-#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000
-#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
-#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000
+#define IXGBE_PCS1GLSTA_LINK_OK 1
+#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
+#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000
+#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000
+#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000
+#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
+#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000
-#define IXGBE_PCS1GANA_SYM_PAUSE 0x80
-#define IXGBE_PCS1GANA_ASM_PAUSE 0x100
+#define IXGBE_PCS1GANA_SYM_PAUSE 0x80
+#define IXGBE_PCS1GANA_ASM_PAUSE 0x100
/* PCS1GLCTL Bit Masks */
-#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */
-#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1
-#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20
-#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40
-#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
-#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
+#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */
+#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1
+#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20
+#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40
+#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
+#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
/* ANLP1 Bit Masks */
-#define IXGBE_ANLP1_PAUSE 0x0C00
-#define IXGBE_ANLP1_SYM_PAUSE 0x0400
-#define IXGBE_ANLP1_ASM_PAUSE 0x0800
-#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
+#define IXGBE_ANLP1_PAUSE 0x0C00
+#define IXGBE_ANLP1_SYM_PAUSE 0x0400
+#define IXGBE_ANLP1_ASM_PAUSE 0x0800
+#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
/* SW Semaphore Register bitmasks */
-#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
-#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
-#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
-#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
+#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
/* SW_FW_SYNC/GSSR definitions */
-#define IXGBE_GSSR_EEP_SM 0x0001
-#define IXGBE_GSSR_PHY0_SM 0x0002
-#define IXGBE_GSSR_PHY1_SM 0x0004
-#define IXGBE_GSSR_MAC_CSR_SM 0x0008
-#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_EEP_SM 0x0001
+#define IXGBE_GSSR_PHY0_SM 0x0002
+#define IXGBE_GSSR_PHY1_SM 0x0004
+#define IXGBE_GSSR_MAC_CSR_SM 0x0008
+#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_SW_MNG_SM 0x0400
+
+/* FW Status register bitmask */
+#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
/* EEC Register */
-#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */
-#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */
-#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */
-#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */
-#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */
-#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */
-#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */
-#define IXGBE_EEC_FWE_SHIFT 4
-#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */
-#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */
-#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
-#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
-#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
-#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
-#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
+#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */
+#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */
+#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */
+#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */
+#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */
+#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */
+#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */
+#define IXGBE_EEC_FWE_SHIFT 4
+#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */
+#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */
+#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
+#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
+#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
+#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
/* EEPROM Addressing bits based on type (0-small, 1-large) */
-#define IXGBE_EEC_ADDR_SIZE 0x00000400
-#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
+#define IXGBE_EEC_ADDR_SIZE 0x00000400
+#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
+#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */
-#define IXGBE_EEC_SIZE_SHIFT 11
-#define IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT 6
-#define IXGBE_EEPROM_OPCODE_BITS 8
+#define IXGBE_EEC_SIZE_SHIFT 11
+#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
+#define IXGBE_EEPROM_OPCODE_BITS 8
/* Part Number String Length */
-#define IXGBE_PBANUM_LENGTH 11
+#define IXGBE_PBANUM_LENGTH 11
/* Checksum and EEPROM pointers */
-#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
-#define IXGBE_EEPROM_CHECKSUM 0x3F
-#define IXGBE_EEPROM_SUM 0xBABA
-#define IXGBE_PCIE_ANALOG_PTR 0x03
-#define IXGBE_ATLAS0_CONFIG_PTR 0x04
-#define IXGBE_PHY_PTR 0x04
-#define IXGBE_ATLAS1_CONFIG_PTR 0x05
-#define IXGBE_OPTION_ROM_PTR 0x05
-#define IXGBE_PCIE_GENERAL_PTR 0x06
-#define IXGBE_PCIE_CONFIG0_PTR 0x07
-#define IXGBE_PCIE_CONFIG1_PTR 0x08
-#define IXGBE_CORE0_PTR 0x09
-#define IXGBE_CORE1_PTR 0x0A
-#define IXGBE_MAC0_PTR 0x0B
-#define IXGBE_MAC1_PTR 0x0C
-#define IXGBE_CSR0_CONFIG_PTR 0x0D
-#define IXGBE_CSR1_CONFIG_PTR 0x0E
-#define IXGBE_FW_PTR 0x0F
-#define IXGBE_PBANUM0_PTR 0x15
-#define IXGBE_PBANUM1_PTR 0x16
-#define IXGBE_SAN_MAC_ADDR_PTR 0x28
-#define IXGBE_DEVICE_CAPS 0x2C
-#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
-#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
-#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
+#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
+#define IXGBE_EEPROM_CHECKSUM 0x3F
+#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_PCIE_ANALOG_PTR 0x03
+#define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_PHY_PTR 0x04
+#define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_OPTION_ROM_PTR 0x05
+#define IXGBE_PCIE_GENERAL_PTR 0x06
+#define IXGBE_PCIE_CONFIG0_PTR 0x07
+#define IXGBE_PCIE_CONFIG1_PTR 0x08
+#define IXGBE_CORE0_PTR 0x09
+#define IXGBE_CORE1_PTR 0x0A
+#define IXGBE_MAC0_PTR 0x0B
+#define IXGBE_MAC1_PTR 0x0C
+#define IXGBE_CSR0_CONFIG_PTR 0x0D
+#define IXGBE_CSR1_CONFIG_PTR 0x0E
+#define IXGBE_FW_PTR 0x0F
+#define IXGBE_PBANUM0_PTR 0x15
+#define IXGBE_PBANUM1_PTR 0x16
+#define IXGBE_ALT_MAC_ADDR_PTR 0x37
+#define IXGBE_FREE_SPACE_PTR 0X3E
+
+#define IXGBE_SAN_MAC_ADDR_PTR 0x28
+#define IXGBE_DEVICE_CAPS 0x2C
+#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
+#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
/* MSI-X capability fields masks */
-#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF
+#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF
/* Legacy EEPROM word offsets */
-#define IXGBE_ISCSI_BOOT_CAPS 0x0033
-#define IXGBE_ISCSI_SETUP_PORT_0 0x0030
-#define IXGBE_ISCSI_SETUP_PORT_1 0x0034
+#define IXGBE_ISCSI_BOOT_CAPS 0x0033
+#define IXGBE_ISCSI_SETUP_PORT_0 0x0030
+#define IXGBE_ISCSI_SETUP_PORT_1 0x0034
/* EEPROM Commands - SPI */
-#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */
-#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01
-#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
-#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
-#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */
-#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */
+#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */
+#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01
+#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
+#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
+#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */
+#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */
/* EEPROM reset Write Enable latch */
-#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04
-#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */
-#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */
-#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
-#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
-#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
+#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04
+#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */
+#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */
+#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
+#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
+#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
/* EEPROM Read Register */
-#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */
-#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */
-#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */
-#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
-#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
-#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */
+#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */
+#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */
+#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */
+#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */
+#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */
+
+#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
-#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
+#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
+#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* words rd in burst */
+#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */
#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
-#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */
#endif
/* Number of 5 microseconds we wait for EERD read and
* EERW write to complete */
-#define IXGBE_EERD_EEWR_ATTEMPTS 100000
+#define IXGBE_EERD_EEWR_ATTEMPTS 100000
/* # attempts we wait for flush update to complete */
-#define IXGBE_FLUDONE_ATTEMPTS 20000
-
-#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */
-#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */
-#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */
-#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */
-
-#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
-#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
-#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
-#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
-#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
-#define IXGBE_FW_LESM_STATE_1 0x1
-#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
-#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
-#define IXGBE_FW_PATCH_VERSION_4 0x7
-#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
-#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */
-#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */
-#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */
-#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */
-#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
-#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
+#define IXGBE_FLUDONE_ATTEMPTS 20000
+
+#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */
+#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */
+#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */
+#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */
+
+#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
+#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
+#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
+#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
+#define IXGBE_FW_LESM_STATE_1 0x1
+#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
+#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
+#define IXGBE_FW_PATCH_VERSION_4 0x7
+#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
+#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */
+#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */
+#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */
+
+#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */
+#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */
+#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */
/* PCI Bus Info */
-#define IXGBE_PCI_DEVICE_STATUS 0xAA
-#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
-#define IXGBE_PCI_LINK_STATUS 0xB2
-#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
-#define IXGBE_PCI_LINK_WIDTH 0x3F0
-#define IXGBE_PCI_LINK_WIDTH_1 0x10
-#define IXGBE_PCI_LINK_WIDTH_2 0x20
-#define IXGBE_PCI_LINK_WIDTH_4 0x40
-#define IXGBE_PCI_LINK_WIDTH_8 0x80
-#define IXGBE_PCI_LINK_SPEED 0xF
-#define IXGBE_PCI_LINK_SPEED_2500 0x1
-#define IXGBE_PCI_LINK_SPEED_5000 0x2
-#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
-#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
-#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
+#define IXGBE_PCI_DEVICE_STATUS 0xAA
+#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
+#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
+#define IXGBE_PCI_LINK_WIDTH 0x3F0
+#define IXGBE_PCI_LINK_WIDTH_1 0x10
+#define IXGBE_PCI_LINK_WIDTH_2 0x20
+#define IXGBE_PCI_LINK_WIDTH_4 0x40
+#define IXGBE_PCI_LINK_WIDTH_8 0x80
+#define IXGBE_PCI_LINK_SPEED 0xF
+#define IXGBE_PCI_LINK_SPEED_2500 0x1
+#define IXGBE_PCI_LINK_SPEED_5000 0x2
+#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
+#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
/* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
-/* Check whether address is multicast. This is little-endian specific check.*/
+/* Check whether address is multicast. This is little-endian specific check.*/
#define IXGBE_IS_MULTICAST(Address) \
- (bool)(((u8 *)(Address))[0] & ((u8)0x01))
+ (bool)(((u8 *)(Address))[0] & ((u8)0x01))
/* Check whether an address is broadcast. */
-#define IXGBE_IS_BROADCAST(Address) \
- ((((u8 *)(Address))[0] == ((u8)0xff)) && \
- (((u8 *)(Address))[1] == ((u8)0xff)))
+#define IXGBE_IS_BROADCAST(Address) \
+ ((((u8 *)(Address))[0] == ((u8)0xff)) && \
+ (((u8 *)(Address))[1] == ((u8)0xff)))
/* RAH */
-#define IXGBE_RAH_VIND_MASK 0x003C0000
-#define IXGBE_RAH_VIND_SHIFT 18
-#define IXGBE_RAH_AV 0x80000000
-#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF
+#define IXGBE_RAH_VIND_MASK 0x003C0000
+#define IXGBE_RAH_VIND_SHIFT 18
+#define IXGBE_RAH_AV 0x80000000
+#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF
/* Header split receive */
-#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
-#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
-#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
-#define IXGBE_RFCTL_NFSW_DIS 0x00000040
-#define IXGBE_RFCTL_NFSR_DIS 0x00000080
-#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
-#define IXGBE_RFCTL_NFS_VER_SHIFT 8
-#define IXGBE_RFCTL_NFS_VER_2 0
-#define IXGBE_RFCTL_NFS_VER_3 1
-#define IXGBE_RFCTL_NFS_VER_4 2
-#define IXGBE_RFCTL_IPV6_DIS 0x00000400
-#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800
-#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000
-#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000
-#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
+#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
+#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
+#define IXGBE_RFCTL_RSC_DIS 0x00000010
+#define IXGBE_RFCTL_NFSW_DIS 0x00000040
+#define IXGBE_RFCTL_NFSR_DIS 0x00000080
+#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
+#define IXGBE_RFCTL_NFS_VER_SHIFT 8
+#define IXGBE_RFCTL_NFS_VER_2 0
+#define IXGBE_RFCTL_NFS_VER_3 1
+#define IXGBE_RFCTL_NFS_VER_4 2
+#define IXGBE_RFCTL_IPV6_DIS 0x00000400
+#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800
+#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000
+#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000
+#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
/* Transmit Config masks */
-#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
-#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */
-#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
/* Enable short packet padding to 64 bytes */
-#define IXGBE_TX_PAD_ENABLE 0x00000400
-#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */
+#define IXGBE_TX_PAD_ENABLE 0x00000400
+#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */
/* This allows for 16K packets + 4k for vlan */
-#define IXGBE_MAX_FRAME_SZ 0x40040000
+#define IXGBE_MAX_FRAME_SZ 0x40040000
-#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
-#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
+#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
+#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
/* Receive Config masks */
-#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
-#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
-#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
-#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
-
-#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
-#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
-#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
-#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
-#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
-#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
+#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Desc Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Ena specific Rx Queue */
+#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc wr-bk flushing */
+#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* X540 supported only */
+#define IXGBE_RXDCTL_RLPML_EN 0x00008000
+#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+
+#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */
+
+#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */
+
+#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF
+#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00
+#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01
+#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02
+#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03
+#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04
+
+#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00
+#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000
+#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100
+#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200
+#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300
+#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800
+#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900
+#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00
+#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00
+#define IXGBE_RXMTRL_V2_SIGNALLING_MSG 0x0C00
+#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00
+
+#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
+#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
+#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
+#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
+#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
/* Receive Priority Flow Control Enable */
-#define IXGBE_FCTRL_RPFCE 0x00004000
-#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
-#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */
-#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */
-#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
-#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
+#define IXGBE_FCTRL_RPFCE 0x00004000
+#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
+#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */
+#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */
+#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
+#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
+#define IXGBE_MFLCN_RPFCM 0x00000004 /* Receive Priority FC Mode */
+#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF0 /* Rx Priority FC bitmap mask */
+#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */
/* Multiple Receive Queue Control */
-#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */
-#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */
-#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */
-#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */
-#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */
-#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */
-#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */
-#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */
-#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */
-#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */
-#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */
-#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000
-#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
-#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000
+#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */
+#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */
+#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */
+#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */
+#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */
+#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */
+#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */
+#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */
+#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */
+#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000
#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000
-#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
-#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
-#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
+#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
/* Queue Drop Enable */
-#define IXGBE_QDE_ENABLE 0x00000001
-#define IXGBE_QDE_IDX_MASK 0x00007F00
-#define IXGBE_QDE_IDX_SHIFT 8
-
-#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
-#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
-#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
-#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
-#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
-#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
-#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
-#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
-#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-
-#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
-#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000
-#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
+#define IXGBE_QDE_ENABLE 0x00000001
+#define IXGBE_QDE_IDX_MASK 0x00007F00
+#define IXGBE_QDE_IDX_SHIFT 8
+#define IXGBE_QDE_WRITE 0x00010000
+#define IXGBE_QDE_READ 0x00020000
+
+#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+
+#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
+#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000
+#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
/* Multiple Transmit Queue Command Register */
-#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
-#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
-#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
-#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
-#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
-#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */
-#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
+#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
+#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
+#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
+#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
+#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */
+#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
/* Receive Descriptor bit definitions */
-#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
-#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
-#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
-#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
-#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
-#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
-#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
-#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
-#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
-#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
-#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
-#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
-#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
-#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
-#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */
-#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
-#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
-#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
-#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
-#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
-#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
-#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
-#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
-#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
-#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
-#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
-#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */
-#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
-#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
-#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
-#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
-#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */
-#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */
-#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
-#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
-#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
-#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
-#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
-#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
-#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
-#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
-#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
-#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
-#define IXGBE_RXD_PRI_SHIFT 13
-#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
-#define IXGBE_RXD_CFI_SHIFT 12
-
-#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
-#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
-#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
-#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
-#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */
-#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
-#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
-#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
-#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
-#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
-#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
+#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
+#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
+#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */
+#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */
+#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
+#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
+#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
+#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */
+#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */
+#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT 13
+#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT 12
+
+#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
+#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
+#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */
/* PSRTYPE bit definitions */
-#define IXGBE_PSRTYPE_TCPHDR 0x00000010
-#define IXGBE_PSRTYPE_UDPHDR 0x00000020
-#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
-#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
-#define IXGBE_PSRTYPE_L2HDR 0x00001000
+#define IXGBE_PSRTYPE_TCPHDR 0x00000010
+#define IXGBE_PSRTYPE_UDPHDR 0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
/* SRRCTL bit definitions */
-#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
-#define IXGBE_SRRCTL_RDMTS_SHIFT 22
-#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
-#define IXGBE_SRRCTL_DROP_EN 0x10000000
-#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
-#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
-#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+#define IXGBE_SRRCTL_RDMTS_SHIFT 22
+#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
+#define IXGBE_SRRCTL_DROP_EN 0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
-#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
-#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
-#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000
-#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000
+#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
-#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
-#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
-#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
-#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
-#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
-#define IXGBE_RXDADV_RSCCNT_SHIFT 17
-#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
-#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
-#define IXGBE_RXDADV_SPH 0x8000
+#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
+#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
+#define IXGBE_RXDADV_SPH 0x8000
/* RSS Hash results */
-#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000
-#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
-#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002
-#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
-#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004
-#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005
+#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005
#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
-#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
-#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
/* RSS Packet Types as indicated in the receive descriptor. */
-#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000
-#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */
-#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */
-#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */
-#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */
-#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
-#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
-#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
-#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
-#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
-#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
-#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
-#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
-#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
-#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
+#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
+#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
+#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
/* Security Processing bit Indication */
-#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000
-#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
+#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000
+#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
/* Masks to determine if packets should be dropped due to frame errors */
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
- IXGBE_RXD_ERR_CE | \
- IXGBE_RXD_ERR_LE | \
- IXGBE_RXD_ERR_PE | \
- IXGBE_RXD_ERR_OSE | \
- IXGBE_RXD_ERR_USE)
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
- IXGBE_RXDADV_ERR_CE | \
- IXGBE_RXDADV_ERR_LE | \
- IXGBE_RXDADV_ERR_PE | \
- IXGBE_RXDADV_ERR_OSE | \
- IXGBE_RXDADV_ERR_USE)
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599 IXGBE_RXDADV_ERR_RXE
/* Multicast bit mask */
-#define IXGBE_MCSTCTRL_MFE 0x4
+#define IXGBE_MCSTCTRL_MFE 0x4
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
-#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
-#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
/* Vlan-specific macros */
-#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */
-#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */
-#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
-#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
+#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
/* SR-IOV specific macros */
-#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
-#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
-#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
-#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
+#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
+#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
+#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
/* Little Endian defines */
#ifndef __le16
@@ -2031,97 +2224,138 @@
#endif
enum ixgbe_fdir_pballoc_type {
- IXGBE_FDIR_PBALLOC_64K = 0,
- IXGBE_FDIR_PBALLOC_128K,
- IXGBE_FDIR_PBALLOC_256K,
+ IXGBE_FDIR_PBALLOC_NONE = 0,
+ IXGBE_FDIR_PBALLOC_64K = 1,
+ IXGBE_FDIR_PBALLOC_128K = 2,
+ IXGBE_FDIR_PBALLOC_256K = 3,
};
-#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16
/* Flow Director register values */
-#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001
-#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002
-#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003
-#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008
-#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010
-#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020
-#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
-#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
-#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
-#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
-#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
-#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
-#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28
-
-#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16
-#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16
-#define IXGBE_FDIRIP6M_DIPM_SHIFT 16
-#define IXGBE_FDIRM_VLANID 0x00000001
-#define IXGBE_FDIRM_VLANP 0x00000002
-#define IXGBE_FDIRM_POOL 0x00000004
-#define IXGBE_FDIRM_L4P 0x00000008
-#define IXGBE_FDIRM_FLEX 0x00000010
-#define IXGBE_FDIRM_DIPv6 0x00000020
-
-#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
-#define IXGBE_FDIRFREE_FREE_SHIFT 0
-#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000
-#define IXGBE_FDIRFREE_COLL_SHIFT 16
-#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F
-#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0
-#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000
-#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16
-#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF
-#define IXGBE_FDIRUSTAT_ADD_SHIFT 0
-#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000
-#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16
-#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF
-#define IXGBE_FDIRFSTAT_FADD_SHIFT 0
-#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00
-#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8
-#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16
-#define IXGBE_FDIRVLAN_FLEX_SHIFT 16
-#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15
-#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16
-
-#define IXGBE_FDIRCMD_CMD_MASK 0x00000003
-#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
-#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
-#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
-#define IXGBE_FDIRCMD_CMD_QUERY_REM_HASH 0x00000007
-#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
-#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
-#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
-#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040
-#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060
-#define IXGBE_FDIRCMD_IPV6 0x00000080
-#define IXGBE_FDIRCMD_CLEARHT 0x00000100
-#define IXGBE_FDIRCMD_DROP 0x00000200
-#define IXGBE_FDIRCMD_INT 0x00000400
-#define IXGBE_FDIRCMD_LAST 0x00000800
-#define IXGBE_FDIRCMD_COLLISION 0x00001000
-#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
-#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
-#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
-#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
-#define IXGBE_FDIR_INIT_DONE_POLL 10
-#define IXGBE_FDIRCMD_CMD_POLL 10
+#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001
+#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002
+#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003
+#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008
+#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010
+#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020
+#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
+#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
+#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
+#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
+#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
+#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
+#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28
+
+#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16
+#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16
+#define IXGBE_FDIRIP6M_DIPM_SHIFT 16
+#define IXGBE_FDIRM_VLANID 0x00000001
+#define IXGBE_FDIRM_VLANP 0x00000002
+#define IXGBE_FDIRM_POOL 0x00000004
+#define IXGBE_FDIRM_L4P 0x00000008
+#define IXGBE_FDIRM_FLEX 0x00000010
+#define IXGBE_FDIRM_DIPv6 0x00000020
+
+#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
+#define IXGBE_FDIRFREE_FREE_SHIFT 0
+#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000
+#define IXGBE_FDIRFREE_COLL_SHIFT 16
+#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F
+#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0
+#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000
+#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16
+#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF
+#define IXGBE_FDIRUSTAT_ADD_SHIFT 0
+#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000
+#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16
+#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF
+#define IXGBE_FDIRFSTAT_FADD_SHIFT 0
+#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00
+#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8
+#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16
+#define IXGBE_FDIRVLAN_FLEX_SHIFT 16
+#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15
+#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16
+
+#define IXGBE_FDIRCMD_CMD_MASK 0x00000003
+#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
+#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
+#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
+#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004
+#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
+#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
+#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
+#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040
+#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060
+#define IXGBE_FDIRCMD_IPV6 0x00000080
+#define IXGBE_FDIRCMD_CLEARHT 0x00000100
+#define IXGBE_FDIRCMD_DROP 0x00000200
+#define IXGBE_FDIRCMD_INT 0x00000400
+#define IXGBE_FDIRCMD_LAST 0x00000800
+#define IXGBE_FDIRCMD_COLLISION 0x00001000
+#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
+#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
+#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
+#define IXGBE_FDIR_INIT_DONE_POLL 10
+#define IXGBE_FDIRCMD_CMD_POLL 10
+
+#define IXGBE_FDIR_DROP_QUEUE 127
+
+#define IXGBE_STATUS_OVERHEATING_BIT 20 /* STATUS overtemp bit num */
+
+/* Manageablility Host Interface defines */
+#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
+#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
+
+/* CEM Support */
+#define FW_CEM_HDR_LEN 0x4
+#define FW_CEM_CMD_DRIVER_INFO 0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
+#define FW_CEM_CMD_RESERVED 0X0
+#define FW_CEM_UNUSED_VER 0x0
+#define FW_CEM_MAX_RETRIES 3
+#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+
+/* Host Interface Command Structures */
+
+struct ixgbe_hic_hdr {
+ u8 cmd;
+ u8 buf_len;
+ union {
+ u8 cmd_resv;
+ u8 ret_status;
+ } cmd_or_resp;
+ u8 checksum;
+};
+
+struct ixgbe_hic_drv_info {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_num;
+ u8 ver_sub;
+ u8 ver_build;
+ u8 ver_min;
+ u8 ver_maj;
+ u8 pad; /* end spacing to ensure length is mult. of dword */
+ u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
/* Transmit Descriptor - Legacy */
struct ixgbe_legacy_tx_desc {
- u64 buffer_addr; /* Address of the descriptor's data buffer */
+ u64 buffer_addr; /* Address of the descriptor's data buffer */
union {
__le32 data;
struct {
- __le16 length; /* Data buffer length */
- u8 cso; /* Checksum offset */
- u8 cmd; /* Descriptor control */
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
} flags;
} lower;
union {
__le32 data;
struct {
- u8 status; /* Descriptor status */
- u8 css; /* Checksum start */
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
__le16 vlan;
} fields;
} upper;
@@ -2130,12 +2364,12 @@ struct ixgbe_legacy_tx_desc {
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
- __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 buffer_addr; /* Address of descriptor's data buf */
__le32 cmd_type_len;
__le32 olinfo_status;
} read;
struct {
- __le64 rsvd; /* Reserved */
+ __le64 rsvd; /* Reserved */
__le32 nxtseq_seed;
__le32 status;
} wb;
@@ -2144,10 +2378,10 @@ union ixgbe_adv_tx_desc {
/* Receive Descriptor - Legacy */
struct ixgbe_legacy_rx_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
- __le16 length; /* Length of data DMAed into data buffer */
- __le16 csum; /* Packet checksum */
- u8 status; /* Descriptor status */
- u8 errors; /* Descriptor Errors */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
__le16 vlan;
};
@@ -2191,121 +2425,170 @@ struct ixgbe_adv_tx_context_desc {
};
/* Adv Transmit Descriptor Config Masks */
-#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
-#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */
-#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
-#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */
-#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
-#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
-#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
-#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
-#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
-#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
-#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
-#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
-#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
-#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
-#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
-#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */
-#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
-#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
-#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
-#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
-#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
- IXGBE_ADVTXD_POPTS_SHIFT)
-#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
- IXGBE_ADVTXD_POPTS_SHIFT)
-#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
-#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
-#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
-#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
-#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
-#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
-#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
-#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
-#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
-#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/
-#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
+#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
+#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */
+#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 time stamp */
+#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
+#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */
+#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Adv Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Adv Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
+#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */
+#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
+#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */
+#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
+#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
+#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+/* 1st&Last TSO-full iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800
+#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
+#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* req Markers and CRC */
+#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
-#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
-#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */
-#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */
-#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */
-#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */
-#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */
-#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */
-#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */
-#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */
-#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */
-#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
-#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */
+#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */
+#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
/* Autonegotiation advertised speeds */
typedef u32 ixgbe_autoneg_advertised;
/* Link speed */
typedef u32 ixgbe_link_speed;
-#define IXGBE_LINK_SPEED_UNKNOWN 0
-#define IXGBE_LINK_SPEED_100_FULL 0x0008
-#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
-#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
-#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
- IXGBE_LINK_SPEED_10GB_FULL)
-#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
- IXGBE_LINK_SPEED_1GB_FULL | \
- IXGBE_LINK_SPEED_10GB_FULL)
+#define IXGBE_LINK_SPEED_UNKNOWN 0
+#define IXGBE_LINK_SPEED_100_FULL 0x0008
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
+#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
+ IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
/* Physical layer type */
typedef u32 ixgbe_physical_layer;
-#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
-#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
-#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
-#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004
-#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
-#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
-#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
-#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
-#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
-#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
-#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
-
-/* Flow Control Macros */
-#define PAUSE_RTT 8
-#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024)
-
-#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\
- PAUSE_MTU(MTU))
-#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
+
+/* Flow Control Data Sheet defined values
+ * Calculation and defines taken from 802.1bb Annex O
+ */
+
+/* BitTimes (BT) conversion */
+#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024))
+#define IXGBE_B2BT(BT) (BT * 8)
+
+/* Calculate Delay to respond to PFC */
+#define IXGBE_PFC_D 672
+
+/* Calculate Cable Delay */
+#define IXGBE_CABLE_DC 5556 /* Delay Copper */
+#define IXGBE_CABLE_DO 5000 /* Delay Optical */
+
+/* Calculate Interface Delay X540 */
+#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */
+#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */
+#define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */
+
+#define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC)
+
+/* Calculate Interface Delay 82598, 82599 */
+#define IXGBE_PHY_D 12800
+#define IXGBE_MAC_D 4096
+#define IXGBE_XAUI_D (2 * 1024)
+
+#define IXGBE_ID (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D)
+
+/* Calculate Delay incurred from higher layer */
+#define IXGBE_HD 6144
+
+/* Calculate PCI Bus delay for low thresholds */
+#define IXGBE_PCI_DELAY 10000
+
+/* Calculate X540 delay value in bit times */
+#define IXGBE_FILL_RATE (36 / 25)
+
+#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \
+ (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID_X540) + \
+ IXGBE_HD + IXGBE_B2BT(TC)))
+
+/* Calculate 82599, 82598 delay value in bit times */
+#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \
+ (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \
+ IXGBE_HD + IXGBE_B2BT(TC)))
+
+/* Calculate low threshold delay values */
+#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \
+ (IXGBE_FILL_RATE * IXGBE_PCI_DELAY))
+#define IXGBE_LOW_DV(TC) (2 * IXGBE_LOW_DV_X540(TC))
/* Software ATR hash keys */
-#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
-#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
/* Software ATR input stream values and masks */
-#define IXGBE_ATR_HASH_MASK 0x7fff
-#define IXGBE_ATR_L4TYPE_MASK 0x3
-#define IXGBE_ATR_L4TYPE_UDP 0x1
-#define IXGBE_ATR_L4TYPE_TCP 0x2
-#define IXGBE_ATR_L4TYPE_SCTP 0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define IXGBE_ATR_HASH_MASK 0x7fff
+#define IXGBE_ATR_L4TYPE_MASK 0x3
+#define IXGBE_ATR_L4TYPE_UDP 0x1
+#define IXGBE_ATR_L4TYPE_TCP 0x2
+#define IXGBE_ATR_L4TYPE_SCTP 0x3
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
enum ixgbe_atr_flow_type {
- IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
- IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
- IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
- IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
- IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
- IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
- IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
- IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+ IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
+ IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
+ IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
+ IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+ IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
+ IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
+ IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
+ IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
};
/* Flow Director ATR input struct. */
@@ -2313,26 +2596,26 @@ union ixgbe_atr_input {
/*
* Byte layout in order, all values with MSB first:
*
- * vm_pool - 1 byte
- * flow_type - 1 byte
- * vlan_id - 2 bytes
- * src_ip - 16 bytes
- * dst_ip - 16 bytes
- * src_port - 2 bytes
- * dst_port - 2 bytes
- * flex_bytes - 2 bytes
- * rsvd0 - 2 bytes - space reserved must be 0.
+ * vm_pool - 1 byte
+ * flow_type - 1 byte
+ * vlan_id - 2 bytes
+ * src_ip - 16 bytes
+ * dst_ip - 16 bytes
+ * src_port - 2 bytes
+ * dst_port - 2 bytes
+ * flex_bytes - 2 bytes
+ * bkt_hash - 2 bytes
*/
struct {
- u8 vm_pool;
- u8 flow_type;
+ u8 vm_pool;
+ u8 flow_type;
__be16 vlan_id;
__be32 dst_ip[4];
__be32 src_ip[4];
__be16 src_port;
__be16 dst_port;
__be16 flex_bytes;
- __be16 rsvd0;
+ __be16 bkt_hash;
} formatted;
__be32 dword_stream[11];
};
@@ -2354,25 +2637,15 @@ union ixgbe_atr_hash_dword {
};
-struct ixgbe_atr_input_masks {
- __be16 rsvd0;
- __be16 vlan_id_mask;
- __be32 dst_ip_mask[4];
- __be32 src_ip_mask[4];
- __be16 src_port_mask;
- __be16 dst_port_mask;
- __be16 flex_mask;
-};
-
/*
* Unavailable: The FCoE Boot Option ROM is not present in the flash.
* Disabled: Present; boot order is not set for any targets on the port.
* Enabled: Present; boot order is set for at least one target on the port.
*/
enum ixgbe_fcoe_boot_status {
- ixgbe_fcoe_bootstatus_disabled = 0,
- ixgbe_fcoe_bootstatus_enabled = 1,
- ixgbe_fcoe_bootstatus_unavailable = 0xFFFF
+ ixgbe_fcoe_bootstatus_disabled = 0,
+ ixgbe_fcoe_bootstatus_enabled = 1,
+ ixgbe_fcoe_bootstatus_unavailable = 0xFFFF
};
enum ixgbe_eeprom_type {
@@ -2387,6 +2660,8 @@ enum ixgbe_mac_type {
ixgbe_mac_82598EB,
ixgbe_mac_82599EB,
ixgbe_mac_82599_vf,
+ ixgbe_mac_X540,
+ ixgbe_mac_X540_vf,
ixgbe_num_macs
};
@@ -2419,10 +2694,10 @@ enum ixgbe_phy_type {
* 0 SFP_DA_CU
* 1 SFP_SR
* 2 SFP_LR
- * 3 SFP_DA_CU_CORE0 - 82599-specific
- * 4 SFP_DA_CU_CORE1 - 82599-specific
- * 5 SFP_SR/LR_CORE0 - 82599-specific
- * 6 SFP_SR/LR_CORE1 - 82599-specific
+ * 3 SFP_DA_CU_CORE0 - 82599-specific
+ * 4 SFP_DA_CU_CORE1 - 82599-specific
+ * 5 SFP_SR/LR_CORE0 - 82599-specific
+ * 6 SFP_SR/LR_CORE1 - 82599-specific
*/
enum ixgbe_sfp_type {
ixgbe_sfp_type_da_cu = 0,
@@ -2477,26 +2752,26 @@ enum ixgbe_bus_type {
/* PCI bus speeds */
enum ixgbe_bus_speed {
- ixgbe_bus_speed_unknown = 0,
- ixgbe_bus_speed_33 = 33,
- ixgbe_bus_speed_66 = 66,
- ixgbe_bus_speed_100 = 100,
- ixgbe_bus_speed_120 = 120,
- ixgbe_bus_speed_133 = 133,
- ixgbe_bus_speed_2500 = 2500,
- ixgbe_bus_speed_5000 = 5000,
+ ixgbe_bus_speed_unknown = 0,
+ ixgbe_bus_speed_33 = 33,
+ ixgbe_bus_speed_66 = 66,
+ ixgbe_bus_speed_100 = 100,
+ ixgbe_bus_speed_120 = 120,
+ ixgbe_bus_speed_133 = 133,
+ ixgbe_bus_speed_2500 = 2500,
+ ixgbe_bus_speed_5000 = 5000,
ixgbe_bus_speed_reserved
};
/* PCI bus widths */
enum ixgbe_bus_width {
- ixgbe_bus_width_unknown = 0,
- ixgbe_bus_width_pcie_x1 = 1,
- ixgbe_bus_width_pcie_x2 = 2,
- ixgbe_bus_width_pcie_x4 = 4,
- ixgbe_bus_width_pcie_x8 = 8,
- ixgbe_bus_width_32 = 32,
- ixgbe_bus_width_64 = 64,
+ ixgbe_bus_width_unknown = 0,
+ ixgbe_bus_width_pcie_x1 = 1,
+ ixgbe_bus_width_pcie_x2 = 2,
+ ixgbe_bus_width_pcie_x4 = 4,
+ ixgbe_bus_width_pcie_x8 = 8,
+ ixgbe_bus_width_32 = 32,
+ ixgbe_bus_width_64 = 64,
ixgbe_bus_width_reserved
};
@@ -2520,7 +2795,7 @@ struct ixgbe_bus_info {
/* Flow control parameters */
struct ixgbe_fc_info {
- u32 high_water; /* Flow Control High-water */
+ u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */
u32 low_water; /* Flow Control Low-water */
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
@@ -2601,6 +2876,14 @@ struct ixgbe_hw_stats {
u64 fcoeptc;
u64 fcoedwrc;
u64 fcoedwtc;
+ u64 fcoe_noddp;
+ u64 fcoe_noddp_ext_buff;
+ u64 ldpcec;
+ u64 pcrc8ec;
+ u64 b2ospc;
+ u64 b2ogprc;
+ u64 o2bgptc;
+ u64 o2bspc;
};
/* forward declaration */
@@ -2608,13 +2891,15 @@ struct ixgbe_hw;
/* iterator type for walking multicast address lists */
typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
- u32 *vmdq);
+ u32 *vmdq);
/* Function pointer table */
struct ixgbe_eeprom_operations {
s32 (*init_params)(struct ixgbe_hw *);
s32 (*read)(struct ixgbe_hw *, u16, u16 *);
+ s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
s32 (*write)(struct ixgbe_hw *, u16, u16);
+ s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
s32 (*update_checksum)(struct ixgbe_hw *);
u16 (*calc_checksum)(struct ixgbe_hw *);
@@ -2641,6 +2926,8 @@ struct ixgbe_mac_operations {
s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
s32 (*setup_sfp)(struct ixgbe_hw *);
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
+ s32 (*disable_sec_rx_path)(struct ixgbe_hw *);
+ s32 (*enable_sec_rx_path)(struct ixgbe_hw *);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
void (*release_swfw_sync)(struct ixgbe_hw *, u16);
@@ -2651,7 +2938,10 @@ struct ixgbe_mac_operations {
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
- bool *);
+ bool *);
+
+ /* Packet Buffer manipulation */
+ void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int);
/* LED */
s32 (*led_on)(struct ixgbe_hw *, u32);
@@ -2661,25 +2951,30 @@ struct ixgbe_mac_operations {
/* RAR, Multicast, VLAN */
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+ s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
s32 (*clear_rar)(struct ixgbe_hw *, u32);
s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32);
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
- ixgbe_mc_addr_itr);
+ ixgbe_mc_addr_itr);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
- ixgbe_mc_addr_itr);
+ ixgbe_mc_addr_itr, bool clear);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+ s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, bool *);
s32 (*init_uta_tables)(struct ixgbe_hw *);
void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *, s32);
+
+ /* Manageability interface */
+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
};
struct ixgbe_phy_operations {
@@ -2691,7 +2986,7 @@ struct ixgbe_phy_operations {
s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
s32 (*setup_link)(struct ixgbe_hw *);
s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
- bool);
+ bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
@@ -2703,58 +2998,60 @@ struct ixgbe_phy_operations {
};
struct ixgbe_eeprom_info {
- struct ixgbe_eeprom_operations ops;
- enum ixgbe_eeprom_type type;
- u32 semaphore_delay;
- u16 word_size;
- u16 address_bits;
+ struct ixgbe_eeprom_operations ops;
+ enum ixgbe_eeprom_type type;
+ u32 semaphore_delay;
+ u16 word_size;
+ u16 address_bits;
+ u16 word_page_size;
};
#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
struct ixgbe_mac_info {
- struct ixgbe_mac_operations ops;
- enum ixgbe_mac_type type;
- u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ struct ixgbe_mac_operations ops;
+ enum ixgbe_mac_type type;
+ u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
/* prefix for World Wide Node Name (WWNN) */
- u16 wwnn_prefix;
+ u16 wwnn_prefix;
/* prefix for World Wide Port Name (WWPN) */
- u16 wwpn_prefix;
+ u16 wwpn_prefix;
#define IXGBE_MAX_MTA 128
- u32 mta_shadow[IXGBE_MAX_MTA];
- s32 mc_filter_type;
- u32 mcft_size;
- u32 vft_size;
- u32 num_rar_entries;
- u32 rar_highwater;
- u32 rx_pb_size;
- u32 max_tx_queues;
- u32 max_rx_queues;
- u32 max_msix_vectors;
- bool msix_vectors_from_pcie;
- u32 orig_autoc;
- u32 orig_autoc2;
- bool orig_link_settings_stored;
- bool autotry_restart;
- u8 flags;
+ u32 mta_shadow[IXGBE_MAX_MTA];
+ s32 mc_filter_type;
+ u32 mcft_size;
+ u32 vft_size;
+ u32 num_rar_entries;
+ u32 rar_highwater;
+ u32 rx_pb_size;
+ u32 max_tx_queues;
+ u32 max_rx_queues;
+ u32 max_msix_vectors;
+ bool msix_vectors_from_pcie;
+ u32 orig_autoc;
+ bool arc_subsystem_valid;
+ u32 orig_autoc2;
+ bool orig_link_settings_stored;
+ bool autotry_restart;
+ u8 flags;
};
struct ixgbe_phy_info {
- struct ixgbe_phy_operations ops;
- enum ixgbe_phy_type type;
- u32 addr;
- u32 id;
- enum ixgbe_sfp_type sfp_type;
- bool sfp_setup_needed;
- u32 revision;
- enum ixgbe_media_type media_type;
- bool reset_disable;
- ixgbe_autoneg_advertised autoneg_advertised;
- enum ixgbe_smart_speed smart_speed;
- bool smart_speed_active;
- bool multispeed_fiber;
- bool reset_if_overtemp;
+ struct ixgbe_phy_operations ops;
+ enum ixgbe_phy_type type;
+ u32 addr;
+ u32 id;
+ enum ixgbe_sfp_type sfp_type;
+ bool sfp_setup_needed;
+ u32 revision;
+ enum ixgbe_media_type media_type;
+ bool reset_disable;
+ ixgbe_autoneg_advertised autoneg_advertised;
+ enum ixgbe_smart_speed smart_speed;
+ bool smart_speed_active;
+ bool multispeed_fiber;
+ bool reset_if_overtemp;
};
#include "ixgbe_mbx.h"
@@ -2789,62 +3086,66 @@ struct ixgbe_mbx_info {
};
struct ixgbe_hw {
- u8 *hw_addr;
- void *back;
- struct ixgbe_mac_info mac;
- struct ixgbe_addr_filter_info addr_ctrl;
- struct ixgbe_fc_info fc;
- struct ixgbe_phy_info phy;
- struct ixgbe_eeprom_info eeprom;
- struct ixgbe_bus_info bus;
- struct ixgbe_mbx_info mbx;
- u16 device_id;
- u16 vendor_id;
- u16 subsystem_device_id;
- u16 subsystem_vendor_id;
- u8 revision_id;
- bool adapter_stopped;
+ u8 *hw_addr;
+ void *back;
+ struct ixgbe_mac_info mac;
+ struct ixgbe_addr_filter_info addr_ctrl;
+ struct ixgbe_fc_info fc;
+ struct ixgbe_phy_info phy;
+ struct ixgbe_eeprom_info eeprom;
+ struct ixgbe_bus_info bus;
+ struct ixgbe_mbx_info mbx;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ bool adapter_stopped;
+ bool force_full_reset;
};
#define ixgbe_call_func(hw, func, params, error) \
- (func != NULL) ? func params : error
+ (func != NULL) ? func params : error
/* Error Codes */
-#define IXGBE_SUCCESS 0
-#define IXGBE_ERR_EEPROM -1
-#define IXGBE_ERR_EEPROM_CHECKSUM -2
-#define IXGBE_ERR_PHY -3
-#define IXGBE_ERR_CONFIG -4
-#define IXGBE_ERR_PARAM -5
-#define IXGBE_ERR_MAC_TYPE -6
-#define IXGBE_ERR_UNKNOWN_PHY -7
-#define IXGBE_ERR_LINK_SETUP -8
-#define IXGBE_ERR_ADAPTER_STOPPED -9
-#define IXGBE_ERR_INVALID_MAC_ADDR -10
-#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
-#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
-#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
-#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
-#define IXGBE_ERR_RESET_FAILED -15
-#define IXGBE_ERR_SWFW_SYNC -16
-#define IXGBE_ERR_PHY_ADDR_INVALID -17
-#define IXGBE_ERR_I2C -18
-#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
-#define IXGBE_ERR_SFP_NOT_PRESENT -20
-#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
-#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
-#define IXGBE_ERR_FDIR_REINIT_FAILED -23
-#define IXGBE_ERR_EEPROM_VERSION -24
-#define IXGBE_ERR_NO_SPACE -25
-#define IXGBE_ERR_OVERTEMP -26
-#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
-#define IXGBE_ERR_FC_NOT_SUPPORTED -28
-#define IXGBE_ERR_FLOW_CONTROL -29
-#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
-#define IXGBE_ERR_PBA_SECTION -31
-#define IXGBE_ERR_INVALID_ARGUMENT -32
-#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
+#define IXGBE_SUCCESS 0
+#define IXGBE_ERR_EEPROM -1
+#define IXGBE_ERR_EEPROM_CHECKSUM -2
+#define IXGBE_ERR_PHY -3
+#define IXGBE_ERR_CONFIG -4
+#define IXGBE_ERR_PARAM -5
+#define IXGBE_ERR_MAC_TYPE -6
+#define IXGBE_ERR_UNKNOWN_PHY -7
+#define IXGBE_ERR_LINK_SETUP -8
+#define IXGBE_ERR_ADAPTER_STOPPED -9
+#define IXGBE_ERR_INVALID_MAC_ADDR -10
+#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
+#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
+#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
+#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
+#define IXGBE_ERR_RESET_FAILED -15
+#define IXGBE_ERR_SWFW_SYNC -16
+#define IXGBE_ERR_PHY_ADDR_INVALID -17
+#define IXGBE_ERR_I2C -18
+#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
+#define IXGBE_ERR_SFP_NOT_PRESENT -20
+#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
+#define IXGBE_ERR_FDIR_REINIT_FAILED -23
+#define IXGBE_ERR_EEPROM_VERSION -24
+#define IXGBE_ERR_NO_SPACE -25
+#define IXGBE_ERR_OVERTEMP -26
+#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
+#define IXGBE_ERR_FC_NOT_SUPPORTED -28
+#define IXGBE_ERR_FLOW_CONTROL -29
+#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define IXGBE_ERR_PBA_SECTION -31
+#define IXGBE_ERR_INVALID_ARGUMENT -32
+#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
+#define IXGBE_ERR_OUT_OF_MEM -34
+
+#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#endif /* _IXGBE_TYPE_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_vf.c b/sys/dev/ixgbe/ixgbe_vf.c
index 2717899..a3147aa 100644
--- a/sys/dev/ixgbe/ixgbe_vf.c
+++ b/sys/dev/ixgbe/ixgbe_vf.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -37,25 +37,6 @@
#include "ixgbe_type.h"
#include "ixgbe_vf.h"
-s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
-s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
-s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
-s32 ixgbe_stop_hw_vf(struct ixgbe_hw *hw);
-u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
-u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
-s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
-s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete);
-s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up, bool autoneg_wait_to_complete);
-s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr);
-s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, ixgbe_mc_addr_itr);
-s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
-
#ifndef IXGBE_VFWRITE_REG
#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG
#endif
@@ -82,7 +63,7 @@ s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
hw->mac.ops.clear_hw_cntrs = NULL;
hw->mac.ops.get_media_type = NULL;
hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf;
- hw->mac.ops.stop_adapter = ixgbe_stop_hw_vf;
+ hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf;
hw->mac.ops.get_bus_info = NULL;
/* Link */
@@ -92,6 +73,7 @@ s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
/* RAR, Multicast, VLAN */
hw->mac.ops.set_rar = ixgbe_set_rar_vf;
+ hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf;
hw->mac.ops.init_rx_addrs = NULL;
hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf;
hw->mac.ops.enable_mc = NULL;
@@ -161,11 +143,12 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
hw->mac.ops.stop_adapter(hw);
DEBUGOUT("Issuing a function level reset to MAC\n");
- ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL);
- IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, (ctrl | IXGBE_CTRL_RST));
- IXGBE_WRITE_FLUSH(hw);
- usec_delay(1);
+ ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL) | IXGBE_CTRL_RST;
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ msec_delay(50);
/* we cannot reset while the RSTI / RSTD bits are asserted */
while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
@@ -182,9 +165,11 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
msec_delay(10);
- /* set our "perm_addr" based on info provided by PF */
- /* also set up the mc_filter_type which is piggy backed
- * on the mac address in word 3 */
+ /*
+ * set our "perm_addr" based on info provided by PF
+ * also set up the mc_filter_type which is piggy backed
+ * on the mac address in word 3
+ */
ret_val = mbx->ops.read_posted(hw, msgbuf,
IXGBE_VF_PERMADDR_MSG_LEN, 0);
if (!ret_val) {
@@ -204,7 +189,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
}
/**
- * ixgbe_stop_hw_vf - Generic stop Tx/Rx units
+ * ixgbe_stop_adapter_vf - Generic stop Tx/Rx units
* @hw: pointer to hardware structure
*
* Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
@@ -212,9 +197,8 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
* the shared code and drivers to determine if the adapter is in a stopped
* state and should not touch the hardware.
**/
-s32 ixgbe_stop_hw_vf(struct ixgbe_hw *hw)
+s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw)
{
- u32 number_of_queues;
u32 reg_val;
u16 i;
@@ -224,34 +208,27 @@ s32 ixgbe_stop_hw_vf(struct ixgbe_hw *hw)
*/
hw->adapter_stopped = TRUE;
- /* Disable the receive unit by stopped each queue */
- number_of_queues = hw->mac.max_rx_queues;
- for (i = 0; i < number_of_queues; i++) {
- reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i));
- if (reg_val & IXGBE_RXDCTL_ENABLE) {
- reg_val &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
- }
- }
-
- IXGBE_WRITE_FLUSH(hw);
-
/* Clear interrupt mask to stop from interrupts being generated */
IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
- /* Clear any pending interrupts */
+ /* Clear any pending interrupts, flush previous writes */
IXGBE_VFREAD_REG(hw, IXGBE_VTEICR);
/* Disable the transmit unit. Each queue must be disabled. */
- number_of_queues = hw->mac.max_tx_queues;
- for (i = 0; i < number_of_queues; i++) {
- reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFTXDCTL(i));
- if (reg_val & IXGBE_TXDCTL_ENABLE) {
- reg_val &= ~IXGBE_TXDCTL_ENABLE;
- IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
- }
+ for (i = 0; i < hw->mac.max_tx_queues; i++)
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i));
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
}
+ /* flush all queues disables */
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(2);
+
return IXGBE_SUCCESS;
}
@@ -304,15 +281,13 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
* @enable_addr: set flag that address is active
**/
s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr)
+ u32 enable_addr)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
- UNREFERENCED_PARAMETER(vmdq);
- UNREFERENCED_PARAMETER(enable_addr);
- UNREFERENCED_PARAMETER(index);
+ UNREFERENCED_3PARAMETER(vmdq, enable_addr, index);
memset(msgbuf, 0, 12);
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
@@ -342,7 +317,8 @@ s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
* Updates the Multicast Table Array.
**/
s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, ixgbe_mc_addr_itr next)
+ u32 mc_addr_count, ixgbe_mc_addr_itr next,
+ bool clear)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
@@ -351,6 +327,8 @@ s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 cnt, i;
u32 vmdq;
+ UNREFERENCED_1PARAMETER(clear);
+
DEBUGFUNC("ixgbe_update_mc_addr_list_vf");
/* Each entry in the list uses 1 16 bit word. We have 30
@@ -388,14 +366,14 @@ s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
- UNREFERENCED_PARAMETER(vind);
+ UNREFERENCED_1PARAMETER(vind);
msgbuf[0] = IXGBE_VF_SET_VLAN;
msgbuf[1] = vlan;
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
- return(mbx->ops.write_posted(hw, msgbuf, 2, 0));
+ return mbx->ops.write_posted(hw, msgbuf, 2, 0);
}
/**
@@ -406,7 +384,7 @@ s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
**/
u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
{
- UNREFERENCED_PARAMETER(hw);
+ UNREFERENCED_1PARAMETER(hw);
return IXGBE_VF_MAX_TX_QUEUES;
}
@@ -418,7 +396,7 @@ u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
**/
u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw)
{
- UNREFERENCED_PARAMETER(hw);
+ UNREFERENCED_1PARAMETER(hw);
return IXGBE_VF_MAX_RX_QUEUES;
}
@@ -436,6 +414,38 @@ s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
return IXGBE_SUCCESS;
}
+s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ /*
+ * If index is one then this is the start of a new list and needs
+ * indication to the PF so it can do it's own list management.
+ * If it is zero then that tells the PF to just clear all of
+ * this VF's macvlans and there is no new list.
+ */
+ msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
+ msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
+ if (addr)
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ if (!ret_val)
+ if (msgbuf[0] == (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
+ ret_val = IXGBE_ERR_OUT_OF_MEM;
+
+ return ret_val;
+}
+
/**
* ixgbe_setup_mac_link_vf - Setup MAC link settings
* @hw: pointer to hardware structure
@@ -446,13 +456,10 @@ s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
* Set the link speed in the AUTOC register and restarts link.
**/
s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
{
- UNREFERENCED_PARAMETER(hw);
- UNREFERENCED_PARAMETER(speed);
- UNREFERENCED_PARAMETER(autoneg);
- UNREFERENCED_PARAMETER(autoneg_wait_to_complete);
+ UNREFERENCED_4PARAMETER(hw, speed, autoneg, autoneg_wait_to_complete);
return IXGBE_SUCCESS;
}
@@ -466,10 +473,10 @@ s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
* Reads the links register to determine if link is up and the current speed
**/
s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up, bool autoneg_wait_to_complete)
+ bool *link_up, bool autoneg_wait_to_complete)
{
u32 links_reg;
- UNREFERENCED_PARAMETER(autoneg_wait_to_complete);
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
if (!(hw->mbx.ops.check_for_rst(hw, 0))) {
*link_up = FALSE;
diff --git a/sys/dev/ixgbe/ixgbe_vf.h b/sys/dev/ixgbe/ixgbe_vf.h
index d0c4b34..5c77ba4 100644
--- a/sys/dev/ixgbe/ixgbe_vf.h
+++ b/sys/dev/ixgbe/ixgbe_vf.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -35,52 +35,52 @@
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
-#define IXGBE_VF_IRQ_CLEAR_MASK 7
-#define IXGBE_VF_MAX_TX_QUEUES 8
-#define IXGBE_VF_MAX_RX_QUEUES 8
+#define IXGBE_VF_IRQ_CLEAR_MASK 7
+#define IXGBE_VF_MAX_TX_QUEUES 8
+#define IXGBE_VF_MAX_RX_QUEUES 8
-#define IXGBE_VFCTRL 0x00000
-#define IXGBE_VFSTATUS 0x00008
-#define IXGBE_VFLINKS 0x00010
-#define IXGBE_VFFRTIMER 0x00048
-#define IXGBE_VFRXMEMWRAP 0x03190
-#define IXGBE_VTEICR 0x00100
-#define IXGBE_VTEICS 0x00104
-#define IXGBE_VTEIMS 0x00108
-#define IXGBE_VTEIMC 0x0010C
-#define IXGBE_VTEIAC 0x00110
-#define IXGBE_VTEIAM 0x00114
-#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
-#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
-#define IXGBE_VTIVAR_MISC 0x00140
-#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
+#define IXGBE_VFCTRL 0x00000
+#define IXGBE_VFSTATUS 0x00008
+#define IXGBE_VFLINKS 0x00010
+#define IXGBE_VFFRTIMER 0x00048
+#define IXGBE_VFRXMEMWRAP 0x03190
+#define IXGBE_VTEICR 0x00100
+#define IXGBE_VTEICS 0x00104
+#define IXGBE_VTEIMS 0x00108
+#define IXGBE_VTEIMC 0x0010C
+#define IXGBE_VTEIAC 0x00110
+#define IXGBE_VTEIAM 0x00114
+#define IXGBE_VTEITR(x) (0x00820 + (4 * (x)))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x)))
+#define IXGBE_VTIVAR_MISC 0x00140
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x)))
/* define IXGBE_VFPBACL still says TBD in EAS */
-#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
-#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
-#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
-#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
-#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
-#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
-#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
-#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
-#define IXGBE_VFPSRTYPE 0x00300
-#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
-#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
-#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
-#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
-#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
-#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
-#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
-#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
-#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
-#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
-#define IXGBE_VFGPRC 0x0101C
-#define IXGBE_VFGPTC 0x0201C
-#define IXGBE_VFGORC_LSB 0x01020
-#define IXGBE_VFGORC_MSB 0x01024
-#define IXGBE_VFGOTC_LSB 0x02020
-#define IXGBE_VFGOTC_MSB 0x02024
-#define IXGBE_VFMPRC 0x01034
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x)))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x)))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x)))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x)))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x)))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x)))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x)))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x)))
+#define IXGBE_VFPSRTYPE 0x00300
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x)))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x)))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x)))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x)))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x)))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x)))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x)))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x)))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x)))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x)))
+#define IXGBE_VFGPRC 0x0101C
+#define IXGBE_VFGPTC 0x0201C
+#define IXGBE_VFGORC_LSB 0x01020
+#define IXGBE_VFGORC_MSB 0x01024
+#define IXGBE_VFGOTC_LSB 0x02020
+#define IXGBE_VFGOTC_MSB 0x02024
+#define IXGBE_VFMPRC 0x01034
struct ixgbevf_hw_stats {
@@ -109,5 +109,23 @@ struct ixgbevf_hw_stats {
u64 saved_reset_vfmprc;
};
-#endif /* __IXGBE_VF_H__ */
+s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg, bool autoneg_wait_to_complete);
+s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool autoneg_wait_to_complete);
+s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr);
+s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr,
+ bool clear);
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+#endif /* __IXGBE_VF_H__ */
diff --git a/sys/dev/ixgbe/ixgbe_x540.c b/sys/dev/ixgbe/ixgbe_x540.c
new file mode 100755
index 0000000..cab014a
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_x540.c
@@ -0,0 +1,971 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2012, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "ixgbe_x540.h"
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
+
+/**
+ * ixgbe_init_ops_X540 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for X540.
+ * Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X540");
+
+ ret_val = ixgbe_init_phy_ops_generic(hw);
+ ret_val = ixgbe_init_ops_generic(hw);
+
+
+ /* EEPROM */
+ eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540;
+ eeprom->ops.read = &ixgbe_read_eerd_X540;
+ eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_X540;
+ eeprom->ops.write = &ixgbe_write_eewr_X540;
+ eeprom->ops.write_buffer = &ixgbe_write_eewr_buffer_X540;
+ eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X540;
+ eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X540;
+ eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X540;
+
+ /* PHY */
+ phy->ops.init = &ixgbe_init_phy_ops_generic;
+ phy->ops.reset = NULL;
+
+ /* MAC */
+ mac->ops.reset_hw = &ixgbe_reset_hw_X540;
+ mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
+ mac->ops.get_media_type = &ixgbe_get_media_type_X540;
+ mac->ops.get_supported_physical_layer =
+ &ixgbe_get_supported_physical_layer_X540;
+ mac->ops.read_analog_reg8 = NULL;
+ mac->ops.write_analog_reg8 = NULL;
+ mac->ops.start_hw = &ixgbe_start_hw_X540;
+ mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
+ mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
+ mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
+ mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
+ mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
+ mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540;
+ mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540;
+ mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
+ mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
+ mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
+ mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
+ mac->rar_highwater = 1;
+ mac->ops.set_vfta = &ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
+ mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
+ mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
+ mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
+ mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
+
+ /* Link */
+ mac->ops.get_link_capabilities =
+ &ixgbe_get_copper_link_capabilities_generic;
+ mac->ops.setup_link = &ixgbe_setup_mac_link_X540;
+ mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
+ mac->ops.check_link = &ixgbe_check_mac_link_generic;
+
+ mac->mcft_size = 128;
+ mac->vft_size = 128;
+ mac->num_rar_entries = 128;
+ mac->rx_pb_size = 384;
+ mac->max_tx_queues = 128;
+ mac->max_rx_queues = 128;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ /*
+ * FWSM register
+ * ARC supported; valid only if manageability features are
+ * enabled.
+ */
+ mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
+ IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
+
+ hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+
+ /* LEDs */
+ mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
+ mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
+
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_link_capabilities_X540 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: TRUE when autoneg or autotry is enabled
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_media_type_X540 - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ return ixgbe_media_type_copper;
+}
+
+/**
+ * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: TRUE if autonegotiation enabled
+ * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ **/
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ DEBUGFUNC("ixgbe_setup_mac_link_X540");
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+}
+
+/**
+ * ixgbe_reset_hw_X540 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and perform a reset.
+ **/
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 ctrl, i;
+
+ DEBUGFUNC("ixgbe_reset_hw_X540");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+mac_reset_top:
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ DEBUGOUT("Reset polling failed to complete.\n");
+ }
+ msec_delay(100);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Store the permanent SAN mac address */
+ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
+reset_hw_out:
+ return status;
+}
+
+/**
+ * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_start_hw_X540");
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ ret_val = ixgbe_start_hw_gen2(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u16 ext_ability = 0;
+
+ DEBUGFUNC("ixgbe_get_supported_physical_layer_X540");
+
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ DEBUGFUNC("ixgbe_init_eeprom_params_X540");
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+ DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_eerd_X540- Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_read_eerd_X540");
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS)
+ status = ixgbe_read_eerd_generic(hw, offset, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_read_eerd_buffer_X540");
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS)
+ status = ixgbe_read_eerd_buffer_generic(hw, offset,
+ words, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_write_eewr_X540");
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS)
+ status = ixgbe_write_eewr_generic(hw, offset, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_write_eewr_buffer_X540");
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS)
+ status = ixgbe_write_eewr_buffer_generic(hw, offset,
+ words, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
+ *
+ * This function does not use synchronization for EERD and EEWR. It can
+ * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
+ *
+ * @hw: pointer to hardware structure
+ **/
+u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ u16 i;
+ u16 j;
+ u16 checksum = 0;
+ u16 length = 0;
+ u16 pointer = 0;
+ u16 word = 0;
+
+ /*
+ * Do not use hw->eeprom.ops.read because we do not want to take
+ * the synchronization semaphores here. Instead use
+ * ixgbe_read_eerd_generic
+ */
+
+ DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
+
+ /* Include 0x0-0x3F in the checksum */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+ if (ixgbe_read_eerd_generic(hw, i, &word) != IXGBE_SUCCESS) {
+ DEBUGOUT("EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+
+ /*
+ * Include all data from pointers 0x3, 0x6-0xE. This excludes the
+ * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+ */
+ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+ continue;
+
+ if (ixgbe_read_eerd_generic(hw, i, &pointer) != IXGBE_SUCCESS) {
+ DEBUGOUT("EEPROM read failed\n");
+ break;
+ }
+
+ /* Skip pointer section if the pointer is invalid. */
+ if (pointer == 0xFFFF || pointer == 0 ||
+ pointer >= hw->eeprom.word_size)
+ continue;
+
+ if (ixgbe_read_eerd_generic(hw, pointer, &length) !=
+ IXGBE_SUCCESS) {
+ DEBUGOUT("EEPROM read failed\n");
+ break;
+ }
+
+ /* Skip pointer section if length is invalid. */
+ if (length == 0xFFFF || length == 0 ||
+ (pointer + length) >= hw->eeprom.word_size)
+ continue;
+
+ for (j = pointer+1; j <= pointer+length; j++) {
+ if (ixgbe_read_eerd_generic(hw, j, &word) !=
+ IXGBE_SUCCESS) {
+ DEBUGOUT("EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return checksum;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+ u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540");
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("EEPROM read failed\n");
+ goto out;
+ }
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ checksum = hw->eeprom.ops.calc_checksum(hw);
+
+ /*
+ * Do not use hw->eeprom.ops.read because we do not want to take
+ * the synchronization semaphores twice here.
+ */
+ ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
+ &read_checksum);
+
+ /*
+ * Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum)
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+out:
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum;
+
+ DEBUGFUNC("ixgbe_update_eeprom_checksum_X540");
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT("EEPROM read failed\n");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ checksum = hw->eeprom.ops.calc_checksum(hw);
+
+ /*
+ * Do not use hw->eeprom.ops.write because we do not want to
+ * take the synchronization semaphores twice here.
+ */
+ status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
+ checksum);
+
+ if (status == IXGBE_SUCCESS)
+ status = ixgbe_update_flash_X540(hw);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ return status;
+}
+
+/**
+ * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
+ * EEPROM from shadow RAM to the flash device.
+ **/
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+{
+ u32 flup;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ DEBUGFUNC("ixgbe_update_flash_X540");
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == IXGBE_ERR_EEPROM) {
+ DEBUGOUT("Flash update time out\n");
+ goto out;
+ }
+
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == IXGBE_SUCCESS)
+ DEBUGOUT("Flash update complete\n");
+ else
+ DEBUGOUT("Flash update time out\n");
+
+ if (hw->revision_id == 0) {
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ if (flup & IXGBE_EEC_SEC1VAL) {
+ flup |= IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ }
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == IXGBE_SUCCESS)
+ DEBUGOUT("Flash update complete\n");
+ else
+ DEBUGOUT("Flash update time out\n");
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_poll_flash_update_done_X540 - Poll flash update status
+ * @hw: pointer to hardware structure
+ *
+ * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
+ * flash update is done.
+ **/
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 reg;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ DEBUGFUNC("ixgbe_poll_flash_update_done_X540");
+
+ for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (reg & IXGBE_EEC_FLUDONE) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
+ * the specified function (CSR, PHY0, PHY1, NVM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 5;
+ u32 hwmask = 0;
+ u32 timeout = 200;
+ u32 i;
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_acquire_swfw_sync_X540");
+
+ if (swmask == IXGBE_GSSR_EEP_SM)
+ hwmask = IXGBE_GSSR_FLASH_SM;
+
+ /* SW only mask doesn't have FW bit pair */
+ if (swmask == IXGBE_GSSR_SW_MNG_SM)
+ fwmask = 0;
+
+ for (i = 0; i < timeout; i++) {
+ /*
+ * SW NVM semaphore bit is used for access to all
+ * SW_FW_SYNC bits (not just NVM)
+ */
+ if (ixgbe_get_swfw_sync_semaphore(hw)) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask | hwmask))) {
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msec_delay(5);
+ goto out;
+ } else {
+ /*
+ * Firmware currently using resource (fwmask), hardware
+ * currently using resource (hwmask), or other software
+ * thread currently using resource (swmask)
+ */
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msec_delay(5);
+ }
+ }
+
+ /* Failed to get SW only semaphore */
+ if (swmask == IXGBE_GSSR_SW_MNG_SM) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ /* If the resource is not released by the FW/HW the SW can assume that
+ * the FW/HW malfunctions. In that case the SW should sets the SW bit(s)
+ * of the requested resource(s) while ignoring the corresponding FW/HW
+ * bits in the SW_FW_SYNC register.
+ */
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (swfw_sync & (fwmask | hwmask)) {
+ if (ixgbe_get_swfw_sync_semaphore(hw)) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msec_delay(5);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore throught the SW_FW_SYNC register
+ * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
+ **/
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+
+ DEBUGFUNC("ixgbe_release_swfw_sync_X540");
+
+ ixgbe_get_swfw_sync_semaphore(hw);
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync &= ~swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msec_delay(5);
+}
+
+/**
+ * ixgbe_get_nvm_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so SW/FW can gain control of shared resources
+ **/
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM;
+ u32 timeout = 2000;
+ u32 i;
+ u32 swsm;
+
+ DEBUGFUNC("ixgbe_get_swfw_sync_semaphore");
+
+ /* Get SMBI software semaphore between device drivers first */
+ for (i = 0; i < timeout; i++) {
+ /*
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI)) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ usec_delay(50);
+ }
+
+ /* Now get the semaphore between SW/FW through the REGSMP bit */
+ if (status == IXGBE_SUCCESS) {
+ for (i = 0; i < timeout; i++) {
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (!(swsm & IXGBE_SWFW_REGSMP))
+ break;
+
+ usec_delay(50);
+ }
+
+ /*
+ * Release semaphores and return error if SW NVM semaphore
+ * was not granted because we don't have access to the EEPROM
+ */
+ if (i >= timeout) {
+ DEBUGOUT("REGSMP Software NVM semaphore not "
+ "granted.\n");
+ ixgbe_release_swfw_sync_semaphore(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ } else {
+ DEBUGOUT("Software semaphore SMBI between device drivers "
+ "not granted.\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_nvm_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("ixgbe_release_swfw_sync_semaphore");
+
+ /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm &= ~IXGBE_SWSM_SMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm &= ~IXGBE_SWFW_REGSMP;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_blink_led_start_X540 - Blink LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ *
+ * Devices that implement the version 2 interface:
+ * X540
+ **/
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+{
+ u32 macc_reg;
+ u32 ledctl_reg;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ DEBUGFUNC("ixgbe_blink_led_start_X540");
+
+ /*
+ * Link should be up in order for the blink bit in the LED control
+ * register to work. Force link and speed in the MAC if link is down.
+ * This will be reversed when we stop the blinking.
+ */
+ hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
+ if (link_up == FALSE) {
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+ }
+ /* Set the LED to LINK_UP + BLINK. */
+ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+ ledctl_reg |= IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to stop blinking
+ *
+ * Devices that implement the version 2 interface:
+ * X540
+ **/
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+{
+ u32 macc_reg;
+ u32 ledctl_reg;
+
+ DEBUGFUNC("ixgbe_blink_led_stop_X540");
+
+ /* Restore the LED to its default value. */
+ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+ ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+ ledctl_reg &= ~IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+
+ /* Unforce link and speed in the MAC. */
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
diff --git a/sys/dev/ixgbe/ixgbe_x540.h b/sys/dev/ixgbe/ixgbe_x540.h
new file mode 100755
index 0000000..29cf8bb
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_x540.h
@@ -0,0 +1,65 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2012, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXGBE_X540_H_
+#define _IXGBE_X540_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg, bool link_up_wait_to_complete);
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
+u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+#endif /* _IXGBE_X540_H_ */
diff --git a/sys/dev/ixgbe/ixv.c b/sys/dev/ixgbe/ixv.c
index 1a8b507..a0594b9 100644
--- a/sys/dev/ixgbe/ixv.c
+++ b/sys/dev/ixgbe/ixv.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2011, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -42,7 +42,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
-char ixv_driver_version[] = "1.0.1";
+char ixv_driver_version[] = "1.1.2";
/*********************************************************************
* PCI Device ID Table
@@ -57,6 +57,7 @@ char ixv_driver_version[] = "1.0.1";
static ixv_vendor_info_t ixv_vendor_info_array[] =
{
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -386,7 +387,7 @@ ixv_attach(device_t dev)
hw->fc.requested_mode = ixgbe_fc_full;
hw->fc.pause_time = IXV_FC_PAUSE;
hw->fc.low_water = IXV_FC_LO;
- hw->fc.high_water = IXV_FC_HI;
+ hw->fc.high_water[0] = IXV_FC_HI;
hw->fc.send_xon = TRUE;
error = ixgbe_init_hw(hw);
@@ -1023,6 +1024,17 @@ ixv_msix_que(void *arg)
IXV_TX_LOCK(txr);
more_tx = ixv_txeof(txr);
+ /*
+ ** Make certain that if the stack
+ ** has anything queued the task gets
+ ** scheduled to handle it.
+ */
+#if __FreeBSD_version < 800000
+ if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
+#else
+ if (!drbr_empty(adapter->ifp, txr->br))
+#endif
+ more_tx = 1;
IXV_TX_UNLOCK(txr);
more_rx = ixv_rxeof(que, adapter->rx_process_limit);
@@ -1195,7 +1207,7 @@ ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
struct mbuf *m_head;
bus_dma_segment_t segs[32];
bus_dmamap_t map;
- struct ixv_tx_buf *txbuf;
+ struct ixv_tx_buf *txbuf, *txbuf_mapped;
union ixgbe_adv_tx_desc *txd = NULL;
m_head = *m_headp;
@@ -1214,6 +1226,7 @@ ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
*/
first = txr->next_avail_desc;
txbuf = &txr->tx_buffers[first];
+ txbuf_mapped = txbuf;
map = txbuf->map;
/*
@@ -1383,7 +1396,7 @@ ixv_set_multi(struct adapter *adapter)
update_ptr = mta;
ixgbe_update_mc_addr_list(&adapter->hw,
- update_ptr, mcnt, ixv_mc_array_itr);
+ update_ptr, mcnt, ixv_mc_array_itr, TRUE);
return;
}
@@ -2702,11 +2715,14 @@ ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
bus_dma_segment_t pseg[1];
struct ixv_rx_buf *rxbuf;
struct mbuf *mh, *mp;
- int i, nsegs, error, cleaned;
-
- i = rxr->next_to_refresh;
- cleaned = -1; /* Signify no completions */
- while (i != limit) {
+ int i, j, nsegs, error;
+ bool refreshed = FALSE;
+
+ i = j = rxr->next_to_refresh;
+ /* Get the control variable, one beyond refresh point */
+ if (++j == adapter->num_rx_desc)
+ j = 0;
+ while (j != limit) {
rxbuf = &rxr->rx_buffers[i];
if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
mh = m_gethdr(M_DONTWAIT, MT_DATA);
@@ -2737,34 +2753,36 @@ ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
M_PKTHDR, adapter->rx_mbuf_sz);
if (mp == NULL)
goto update;
- mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag,
- rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("GET BUF: dmamap load"
- " failure - %d\n", error);
- m_free(mp);
- goto update;
- }
- rxbuf->m_pack = mp;
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_PREREAD);
- rxr->rx_base[i].read.pkt_addr =
- htole64(pseg[0].ds_addr);
- }
+ } else
+ mp = rxbuf->m_pack;
- cleaned = i;
+ mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+ rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ printf("GET BUF: dmamap load"
+ " failure - %d\n", error);
+ m_free(mp);
+ rxbuf->m_pack = NULL;
+ goto update;
+ }
+ rxbuf->m_pack = mp;
+ bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+ BUS_DMASYNC_PREREAD);
+ rxr->rx_base[i].read.pkt_addr =
+ htole64(pseg[0].ds_addr);
+
+ refreshed = TRUE;
+ rxr->next_to_refresh = i = j;
/* Calculate next index */
- if (++i == adapter->num_rx_desc)
- i = 0;
- /* This is the work marker for refresh */
- rxr->next_to_refresh = i;
+ if (++j == adapter->num_rx_desc)
+ j = 0;
}
update:
- if (cleaned != -1) /* If we refreshed some, bump tail */
+ if (refreshed) /* update tail index */
IXGBE_WRITE_REG(&adapter->hw,
- IXGBE_VFRDT(rxr->me), cleaned);
+ IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
return;
}
@@ -2974,6 +2992,7 @@ skip_head:
rxr->lro_enabled = FALSE;
rxr->rx_split_packets = 0;
rxr->rx_bytes = 0;
+ rxr->discard = FALSE;
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -3233,31 +3252,41 @@ ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
return;
}
+ IXV_RX_UNLOCK(rxr);
(*ifp->if_input)(ifp, m);
+ IXV_RX_LOCK(rxr);
}
static __inline void
ixv_rx_discard(struct rx_ring *rxr, int i)
{
- struct adapter *adapter = rxr->adapter;
struct ixv_rx_buf *rbuf;
- struct mbuf *mh, *mp;
rbuf = &rxr->rx_buffers[i];
- if (rbuf->fmp != NULL) /* Partial chain ? */
- m_freem(rbuf->fmp);
- mh = rbuf->m_head;
- mp = rbuf->m_pack;
+ if (rbuf->fmp != NULL) {/* Partial chain ? */
+ rbuf->fmp->m_flags |= M_PKTHDR;
+ m_freem(rbuf->fmp);
+ rbuf->fmp = NULL;
+ }
- /* Reuse loaded DMA map and just update mbuf chain */
- mh->m_len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- mh->m_next = NULL;
+ /*
+ ** With advanced descriptors the writeback
+ ** clobbers the buffer addrs, so its easier
+ ** to just free the existing mbufs and take
+ ** the normal refresh path to get new buffers
+ ** and mapping.
+ */
+ if (rbuf->m_head) {
+ m_free(rbuf->m_head);
+ rbuf->m_head = NULL;
+ }
+
+ if (rbuf->m_pack) {
+ m_free(rbuf->m_pack);
+ rbuf->m_pack = NULL;
+ }
- mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
- mp->m_data = mp->m_ext.ext_buf;
- mp->m_next = NULL;
return;
}
@@ -3396,7 +3425,8 @@ ixv_rxeof(struct ix_queue *que, int count)
} else {
/* Singlet, prepare to send */
sendmp = mh;
- if (staterr & IXGBE_RXD_STAT_VP) {
+ if ((adapter->num_vlans) &&
+ (staterr & IXGBE_RXD_STAT_VP)) {
sendmp->m_pkthdr.ether_vtag = vtag;
sendmp->m_flags |= M_VLANTAG;
}
@@ -3470,10 +3500,8 @@ next_desc:
}
/* Refresh any remaining buf structs */
- if (processed != 0) {
+ if (ixv_rx_unrefreshed(rxr))
ixv_refresh_mbufs(rxr, i);
- processed = 0;
- }
rxr->next_to_check = i;
@@ -3611,12 +3639,14 @@ ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
+ IXV_CORE_LOCK(adapter);
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
ixv_shadow_vfta[index] |= (1 << bit);
++adapter->num_vlans;
/* Re-init to load the changes */
- ixv_init(adapter);
+ ixv_init_locked(adapter);
+ IXV_CORE_UNLOCK(adapter);
}
/*
@@ -3636,12 +3666,14 @@ ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
+ IXV_CORE_LOCK(adapter);
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
ixv_shadow_vfta[index] &= ~(1 << bit);
--adapter->num_vlans;
/* Re-init to load the changes */
- ixv_init(adapter);
+ ixv_init_locked(adapter);
+ IXV_CORE_UNLOCK(adapter);
}
static void
diff --git a/sys/dev/ixgbe/ixv.h b/sys/dev/ixgbe/ixv.h
index 32a682e..096819c3 100644
--- a/sys/dev/ixgbe/ixv.h
+++ b/sys/dev/ixgbe/ixv.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -415,4 +415,20 @@ drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
return (!buf_ring_empty(br));
}
#endif
+
+/*
+** Find the number of unrefreshed RX descriptors
+*/
+static inline u16
+ixv_rx_unrefreshed(struct rx_ring *rxr)
+{
+ struct adapter *adapter = rxr->adapter;
+
+ if (rxr->next_to_check > rxr->next_to_refresh)
+ return (rxr->next_to_check - rxr->next_to_refresh - 1);
+ else
+ return ((adapter->num_rx_desc + rxr->next_to_check) -
+ rxr->next_to_refresh - 1);
+}
+
#endif /* _IXV_H_ */
diff --git a/sys/dev/mps/mpi/mpi2.h b/sys/dev/mps/mpi/mpi2.h
index 6d883bc..b9e46ad 100644
--- a/sys/dev/mps/mpi/mpi2.h
+++ b/sys/dev/mps/mpi/mpi2.h
@@ -1,6 +1,35 @@
-/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2011 LSI Corporation.
*
*
* Name: mpi2.h
@@ -9,7 +38,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.14
+ * mpi2.h Version: 02.00.18
*
* Version History
* ---------------
@@ -58,6 +87,15 @@
* Added MSI-x index mask and shift for Reply Post Host
* Index register.
* Added function code for Host Based Discovery Action.
+ * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL.
+ * Added defines for product-specific range of message
+ * function codes, 0xF0 to 0xFF.
+ * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added alternative defines for the SGE Direction bit.
+ * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
* --------------------------------------------------------------------------
*/
@@ -83,7 +121,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x0E)
+#define MPI2_HEADER_VERSION_UNIT (0x12)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -476,8 +514,6 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION
/*****************************************************************************
*
* Message Functions
-* 0x80 -> 0x8F reserved for private message use per product
-*
*
*****************************************************************************/
@@ -508,6 +544,9 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION
#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */
#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator */
#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) /* Host Based Discovery Action */
+#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) /* Power Management Control */
+#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) /* beginning of product-specific range */
+#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF) /* end of product-specific range */
@@ -922,6 +961,9 @@ typedef struct _MPI2_MPI_SGE_UNION
#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00)
#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04)
+#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST)
+#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC)
+
/* Address Size */
#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00)
@@ -1046,11 +1088,11 @@ typedef struct _MPI2_IEEE_SGE_UNION
/* Data Location Address Space */
#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03)
-#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
-#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) /* IEEE Simple Element only */
+#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) /* IEEE Simple Element only */
#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
-#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
-
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) /* IEEE Simple Element only */
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR (0x03) /* IEEE Chain Element only */
/****************************************************************************
* IEEE SGE operation Macros
diff --git a/sys/dev/mps/mpi/mpi2_cnfg.h b/sys/dev/mps/mpi/mpi2_cnfg.h
index 78f26f1..ef3334f 100644
--- a/sys/dev/mps/mpi/mpi2_cnfg.h
+++ b/sys/dev/mps/mpi/mpi2_cnfg.h
@@ -1,13 +1,42 @@
-/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2011 LSI Corporation.
*
*
* Name: mpi2_cnfg.h
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.13
+ * mpi2_cnfg.h Version: 02.00.17
*
* Version History
* ---------------
@@ -110,6 +139,31 @@
* Added Ethernet configuration pages.
* 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
* Added SAS PHY Page 4 structure and defines.
+ * 02-10-10 02.00.14 Modified the comments for the configuration page
+ * structures that contain an array of data. The host
+ * should use the "count" field in the page data (e.g. the
+ * NumPhys field) to determine the number of valid elements
+ * in the array.
+ * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines.
+ * Added PowerManagementCapabilities to IO Unit Page 7.
+ * Added PortWidthModGroup field to
+ * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines.
+ * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT
+ * define.
+ * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
+ * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
+ * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
+ * defines.
+ * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to
+ * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for
+ * the Pinout field.
+ * Added BoardTemperature and BoardTemperatureUnits fields
+ * to MPI2_CONFIG_PAGE_IO_UNIT_7.
+ * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
+ * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
* --------------------------------------------------------------------------
*/
@@ -193,6 +247,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION
#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17)
#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19)
+#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A)
/*****************************************************************************
@@ -322,7 +377,7 @@ typedef struct _MPI2_CONFIG_REQUEST
#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06)
#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07)
-/* values for SGLFlags field are in the SGL section of mpi2.h */
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
/* Config Reply Message */
@@ -368,14 +423,19 @@ typedef struct _MPI2_CONFIG_REPLY
#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064)
#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065)
+#define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E)
+
#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080)
#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081)
#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082)
#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083)
#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084)
#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085)
-#define MPI2_MFGPAGE_DEVID_SAS2208_7 (0x0086)
-#define MPI2_MFGPAGE_DEVID_SAS2208_8 (0x0087)
+#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086)
+#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087)
+#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E)
+
+
/* Manufacturing Page 0 */
@@ -541,7 +601,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_4
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength or NumPhys at runtime.
+ * one and check the value returned for NumPhys at runtime.
*/
#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES
#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1)
@@ -590,23 +650,31 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO
U32 Pinout; /* 0x00 */
U8 Connector[16]; /* 0x04 */
U8 Location; /* 0x14 */
- U8 Reserved1; /* 0x15 */
+ U8 ReceptacleID; /* 0x15 */
U16 Slot; /* 0x16 */
U32 Reserved2; /* 0x18 */
} MPI2_MANPAGE7_CONNECTOR_INFO, MPI2_POINTER PTR_MPI2_MANPAGE7_CONNECTOR_INFO,
Mpi2ManPage7ConnectorInfo_t, MPI2_POINTER pMpi2ManPage7ConnectorInfo_t;
/* defines for the Pinout field */
-#define MPI2_MANPAGE7_PINOUT_SFF_8484_L4 (0x00080000)
-#define MPI2_MANPAGE7_PINOUT_SFF_8484_L3 (0x00040000)
-#define MPI2_MANPAGE7_PINOUT_SFF_8484_L2 (0x00020000)
-#define MPI2_MANPAGE7_PINOUT_SFF_8484_L1 (0x00010000)
-#define MPI2_MANPAGE7_PINOUT_SFF_8470_L4 (0x00000800)
-#define MPI2_MANPAGE7_PINOUT_SFF_8470_L3 (0x00000400)
-#define MPI2_MANPAGE7_PINOUT_SFF_8470_L2 (0x00000200)
-#define MPI2_MANPAGE7_PINOUT_SFF_8470_L1 (0x00000100)
-#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x00000002)
-#define MPI2_MANPAGE7_PINOUT_CONNECTION_UNKNOWN (0x00000001)
+#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00)
+#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8)
+
+#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF)
+#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00)
+#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01)
+#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02)
+#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03)
+#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04)
+#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07)
+#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08)
+#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C)
+#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D)
/* defines for the Location field */
#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01)
@@ -619,7 +687,7 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check NumPhys at runtime.
+ * one and check the value returned for NumPhys at runtime.
*/
#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX
#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1)
@@ -640,7 +708,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7
MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_7,
Mpi2ManufacturingPage7_t, MPI2_POINTER pMpi2ManufacturingPage7_t;
-#define MPI2_MANUFACTURING7_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
/* defines for the Flags field */
#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
@@ -717,6 +785,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
/* IO Unit Page 1 Flags defines */
#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
+#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9)
#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000)
#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200)
#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400)
@@ -724,15 +793,13 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040)
#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020)
#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004)
-#define MPI2_IOUNITPAGE1_MULTI_PATHING (0x00000002)
-#define MPI2_IOUNITPAGE1_SINGLE_PATHING (0x00000000)
/* IO Unit Page 3 */
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength at runtime.
+ * one and check the value returned for GPIOCount at runtime.
*/
#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX
#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1)
@@ -761,7 +828,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3
/*
* Upper layer code (drivers, utilities, etc.) should leave this define set to
- * one and check Header.PageLength or NumDmaEngines at runtime.
+ * one and check the value returned for NumDmaEngines at runtime.
*/
#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES
#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1)
@@ -826,15 +893,17 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7
U8 PCIeWidth; /* 0x06 */
U8 PCIeSpeed; /* 0x07 */
U32 ProcessorState; /* 0x08 */
- U32 Reserved2; /* 0x0C */
+ U32 PowerManagementCapabilities; /* 0x0C */
U16 IOCTemperature; /* 0x10 */
U8 IOCTemperatureUnits; /* 0x12 */
U8 IOCSpeed; /* 0x13 */
- U32 Reserved3; /* 0x14 */
+ U16 BoardTemperature; /* 0x14 */
+ U8 BoardTemperatureUnits; /* 0x16 */
+ U8 Reserved3; /* 0x17 */
} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t;
-#define MPI2_IOUNITPAGE7_PAGEVERSION (0x00)
+#define MPI2_IOUNITPAGE7_PAGEVERSION (0x02)
/* defines for IO Unit Page 7 PCIeWidth field */
#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01)
@@ -855,6 +924,13 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7
#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01)
#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02)
+/* defines for IO Unit Page 7 PowerManagementCapabilities field */
+#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400)
+#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200)
+#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100)
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008)
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004)
+
/* defines for IO Unit Page 7 IOCTemperatureUnits field */
#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01)
@@ -866,6 +942,11 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7
#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04)
#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08)
+/* defines for IO Unit Page 7 BoardTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02)
+
/****************************************************************************
@@ -1198,7 +1279,7 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_3
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength or NumPhys at runtime.
+ * one and check the value returned for NumPhys at runtime.
*/
#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES
#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1)
@@ -1272,7 +1353,7 @@ typedef struct _MPI2_RAIDVOL0_SETTINGS
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength at runtime.
+ * one and check the value returned for NumPhysDisks at runtime.
*/
#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX
#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
@@ -1329,6 +1410,7 @@ typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0
#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000)
#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000)
#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080)
#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040)
#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020)
#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000)
@@ -1451,11 +1533,15 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0
#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03)
#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04)
#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06)
#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF)
/* PhysDiskAttributes defines */
+#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C)
#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08)
#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04)
+
+#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03)
#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02)
#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01)
@@ -1474,7 +1560,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength or NumPhysDiskPaths at runtime.
+ * one and check the value returned for NumPhysDiskPaths at runtime.
*/
#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX
#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1)
@@ -1527,6 +1613,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1
#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03)
#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04)
#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06)
#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08)
#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09)
#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A)
@@ -1553,6 +1640,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1
#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000)
#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000)
+#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27)
#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000)
#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000)
#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000)
@@ -1636,7 +1724,7 @@ typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.ExtPageLength or NumPhys at runtime.
+ * one and check the value returned for NumPhys at runtime.
*/
#ifndef MPI2_SAS_IOUNIT0_PHY_MAX
#define MPI2_SAS_IOUNIT0_PHY_MAX (1)
@@ -1707,7 +1795,7 @@ typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.ExtPageLength or NumPhys at runtime.
+ * one and check the value returned for NumPhys at runtime.
*/
#ifndef MPI2_SAS_IOUNIT1_PHY_MAX
#define MPI2_SAS_IOUNIT1_PHY_MAX (1)
@@ -1798,7 +1886,7 @@ typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * four and check Header.ExtPageLength or NumPhys at runtime.
+ * one and check the value returned for NumPhys at runtime.
*/
#ifndef MPI2_SAS_IOUNIT4_PHY_MAX
#define MPI2_SAS_IOUNIT4_PHY_MAX (4)
@@ -1837,7 +1925,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4
typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS
{
U8 ControlFlags; /* 0x00 */
- U8 Reserved1; /* 0x01 */
+ U8 PortWidthModGroup; /* 0x01 */
U16 InactivityTimerExponent; /* 0x02 */
U8 SATAPartialTimeout; /* 0x04 */
U8 Reserved2; /* 0x05 */
@@ -1857,6 +1945,9 @@ typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS
#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02)
#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01)
+/* defines for PortWidthModeGroup field */
+#define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF)
+
/* defines for InactivityTimerExponent field */
#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000)
#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12)
@@ -1878,7 +1969,7 @@ typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.ExtPageLength or NumPhys at runtime.
+ * one and check the value returned for NumPhys at runtime.
*/
#ifndef MPI2_SAS_IOUNIT5_PHY_MAX
#define MPI2_SAS_IOUNIT5_PHY_MAX (1)
@@ -1896,7 +1987,137 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5
MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5,
Mpi2SasIOUnitPage5_t, MPI2_POINTER pMpi2SasIOUnitPage5_t;
-#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x00)
+#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01)
+
+
+/* SAS IO Unit Page 6 */
+
+typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS
+{
+ U8 CurrentStatus; /* 0x00 */
+ U8 CurrentModulation; /* 0x01 */
+ U8 CurrentUtilization; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U32 Reserved2; /* 0x04 */
+} MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
+ MPI2_POINTER PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
+ Mpi2SasIOUnit6PortWidthModGroupStatus_t,
+ MPI2_POINTER pMpi2SasIOUnit6PortWidthModGroupStatus_t;
+
+/* defines for CurrentStatus field */
+#define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00)
+#define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01)
+#define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02)
+#define MPI2_SASIOUNIT6_STATUS_LINK_DOWN (0x03)
+#define MPI2_SASIOUNIT6_STATUS_OBSERVATION_ONLY (0x04)
+#define MPI2_SASIOUNIT6_STATUS_INACTIVE (0x05)
+#define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06)
+#define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07)
+
+/* defines for CurrentModulation field */
+#define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00)
+#define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01)
+#define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02)
+#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumGroups at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX
+#define MPI2_SAS_IOUNIT6_GROUP_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+ U8 NumGroups; /* 0x10 */
+ U8 Reserved3; /* 0x11 */
+ U16 Reserved4; /* 0x12 */
+ MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS
+ PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /* 0x14 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_6,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6,
+ Mpi2SasIOUnitPage6_t, MPI2_POINTER pMpi2SasIOUnitPage6_t;
+
+#define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00)
+
+
+/* SAS IO Unit Page 7 */
+
+typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS
+{
+ U8 Flags; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U8 Threshold75Pct; /* 0x04 */
+ U8 Threshold50Pct; /* 0x05 */
+ U8 Threshold25Pct; /* 0x06 */
+ U8 Reserved3; /* 0x07 */
+} MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
+ MPI2_POINTER PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
+ Mpi2SasIOUnit7PortWidthModGroupSettings_t,
+ MPI2_POINTER pMpi2SasIOUnit7PortWidthModGroupSettings_t;
+
+/* defines for Flags field */
+#define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01)
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumGroups at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX
+#define MPI2_SAS_IOUNIT7_GROUP_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 SamplingInterval; /* 0x08 */
+ U8 WindowLength; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U32 Reserved2; /* 0x0C */
+ U32 Reserved3; /* 0x10 */
+ U8 NumGroups; /* 0x14 */
+ U8 Reserved4; /* 0x15 */
+ U16 Reserved5; /* 0x16 */
+ MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS
+ PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX]; /* 0x18 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_7,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7,
+ Mpi2SasIOUnitPage7_t, MPI2_POINTER pMpi2SasIOUnitPage7_t;
+
+#define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00)
+
+
+/* SAS IO Unit Page 8 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 PowerManagementCapabilities; /* 0x0C */
+ U32 Reserved2; /* 0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_8,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8,
+ Mpi2SasIOUnitPage8_t, MPI2_POINTER pMpi2SasIOUnitPage8_t;
+
+#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00)
+
+/* defines for PowerManagementCapabilities field */
+#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x000001000)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x000000800)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x000000400)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x000000200)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x000000100)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x000000010)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x000000008)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x000000004)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x000000002)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x000000001)
@@ -2187,7 +2408,7 @@ typedef struct _MPI2_SASPHY2_PHY_EVENT
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.ExtPageLength or NumPhyEvents at runtime.
+ * one and check the value returned for NumPhyEvents at runtime.
*/
#ifndef MPI2_SASPHY2_PHY_EVENT_MAX
#define MPI2_SASPHY2_PHY_EVENT_MAX (1)
@@ -2280,7 +2501,7 @@ typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.ExtPageLength or NumPhyEvents at runtime.
+ * one and check the value returned for NumPhyEvents at runtime.
*/
#ifndef MPI2_SASPHY3_PHY_EVENT_MAX
#define MPI2_SASPHY3_PHY_EVENT_MAX (1)
@@ -2392,7 +2613,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.ExtPageLength or NumPhys at runtime.
+ * one and check the value returned for NumLogEntries at runtime.
*/
#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES
#define MPI2_LOG_0_NUM_LOG_ENTRIES (1)
@@ -2442,7 +2663,7 @@ typedef struct _MPI2_CONFIG_PAGE_LOG_0
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.ExtPageLength or NumPhys at runtime.
+ * one and check the value returned for NumElements at runtime.
*/
#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS
#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1)
@@ -2642,5 +2863,25 @@ typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1
#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03)
+/****************************************************************************
+* Extended Manufacturing Config Pages
+****************************************************************************/
+
+/*
+ * Generic structure to use for product-specific extended manufacturing pages
+ * (currently Extended Manufacturing Page 40 through Extended Manufacturing
+ * Page 60).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 ProductSpecificInfo; /* 0x08 */
+} MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ Mpi2ExtManufacturingPagePS_t, MPI2_POINTER pMpi2ExtManufacturingPagePS_t;
+
+/* PageVersion should be provided by product-specific code */
+
#endif
diff --git a/sys/dev/mps/mpi/mpi2_hbd.h b/sys/dev/mps/mpi/mpi2_hbd.h
index d14e352..e31fc5e 100644
--- a/sys/dev/mps/mpi/mpi2_hbd.h
+++ b/sys/dev/mps/mpi/mpi2_hbd.h
@@ -1,13 +1,42 @@
-/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
/*
- * Copyright (c) 2009 LSI Corporation.
+ * Copyright (c) 2009-2011 LSI Corporation.
*
*
* Name: mpi2_hbd.h
* Title: MPI Host Based Discovery messages and structures
* Creation Date: October 21, 2009
*
- * mpi2_hbd.h Version: 02.00.00
+ * mpi2_hbd.h Version: 02.00.01
*
* Version History
* ---------------
@@ -15,6 +44,8 @@
* Date Version Description
* -------- -------- ------------------------------------------------------
* 10-28-09 02.00.00 Initial version.
+ * 08-11-10 02.00.01 Removed PortGroups, DmaGroup, and ControlGroup from
+ * HBD Action request, replaced by AdditionalInfo field.
* --------------------------------------------------------------------------
*/
@@ -48,10 +79,7 @@ typedef struct _MPI2_HBD_ACTION_REQUEST
U8 Port; /* 0x25 */
U8 MaxConnections; /* 0x26 */
U8 MaxRate; /* 0x27 */
- U8 PortGroups; /* 0x28 */
- U8 DmaGroup; /* 0x29 */
- U8 ControlGroup; /* 0x2A */
- U8 Reserved6; /* 0x2B */
+ U32 AdditionalInfo; /* 0x28 */
U16 InitialAWT; /* 0x2C */
U16 Reserved7; /* 0x2E */
U32 Reserved8; /* 0x30 */
diff --git a/sys/dev/mps/mpi/mpi2_history.txt b/sys/dev/mps/mpi/mpi2_history.txt
index d70df0d..01dc3b6e 100644
--- a/sys/dev/mps/mpi/mpi2_history.txt
+++ b/sys/dev/mps/mpi/mpi2_history.txt
@@ -1,29 +1,58 @@
-/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
==============================
Fusion-MPT MPI 2.0 Header File Change History
==============================
- Copyright (c) 2000-2009 LSI Corporation.
+ Copyright (c) 2000-2011 LSI Corporation.
---------------------------------------
- Header Set Release Version: 02.00.14
- Header Set Release Date: 10-28-09
+ Header Set Release Version: 02.00.18
+ Header Set Release Date: 11-10-10
---------------------------------------
Filename Current version Prior version
---------- --------------- -------------
- mpi2.h 02.00.14 02.00.13
- mpi2_cnfg.h 02.00.13 02.00.12
- mpi2_init.h 02.00.08 02.00.07
- mpi2_ioc.h 02.00.13 02.00.12
- mpi2_raid.h 02.00.04 02.00.04
- mpi2_sas.h 02.00.03 02.00.02
- mpi2_targ.h 02.00.03 02.00.03
- mpi2_tool.h 02.00.04 02.00.04
+ mpi2.h 02.00.18 02.00.17
+ mpi2_cnfg.h 02.00.17 02.00.16
+ mpi2_init.h 02.00.11 02.00.10
+ mpi2_ioc.h 02.00.16 02.00.15
+ mpi2_raid.h 02.00.05 02.00.05
+ mpi2_sas.h 02.00.05 02.00.05
+ mpi2_targ.h 02.00.04 02.00.04
+ mpi2_tool.h 02.00.06 02.00.06
mpi2_type.h 02.00.00 02.00.00
mpi2_ra.h 02.00.00 02.00.00
- mpi2_hbd.h 02.00.00
- mpi2_history.txt 02.00.14 02.00.13
+ mpi2_hbd.h 02.00.01 02.00.01
+ mpi2_history.txt 02.00.18 02.00.17
* Date Version Description
@@ -72,6 +101,15 @@ mpi2.h
* Added MSI-x index mask and shift for Reply Post Host
* Index register.
* Added function code for Host Based Discovery Action.
+ * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL.
+ * Added defines for product-specific range of message
+ * function codes, 0xF0 to 0xFF.
+ * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added alternative defines for the SGE Direction bit.
+ * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
* --------------------------------------------------------------------------
mpi2_cnfg.h
@@ -171,6 +209,31 @@ mpi2_cnfg.h
* Added Ethernet configuration pages.
* 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
* Added SAS PHY Page 4 structure and defines.
+ * 02-10-10 02.00.14 Modified the comments for the configuration page
+ * structures that contain an array of data. The host
+ * should use the "count" field in the page data (e.g. the
+ * NumPhys field) to determine the number of valid elements
+ * in the array.
+ * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines.
+ * Added PowerManagementCapabilities to IO Unit Page 7.
+ * Added PortWidthModGroup field to
+ * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines.
+ * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT
+ * define.
+ * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
+ * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
+ * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
+ * defines.
+ * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to
+ * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for
+ * the Pinout field.
+ * Added BoardTemperature and BoardTemperatureUnits fields
+ * to MPI2_CONFIG_PAGE_IO_UNIT_7.
+ * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
+ * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
* --------------------------------------------------------------------------
mpi2_init.h
@@ -192,6 +255,9 @@ mpi2_init.h
* both SCSI IO Error Reply and SCSI Task Management Reply.
* Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
* Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
+ * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
+ * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request.
+ * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
* --------------------------------------------------------------------------
mpi2_ioc.h
@@ -280,6 +346,12 @@ mpi2_ioc.h
* (MPI2_FW_HEADER_PID_).
* Modified values for SAS ProductID Family
* (MPI2_FW_HEADER_PID_FAMILY_).
+ * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines.
+ * Added PowerManagementControl Request structures and
+ * defines.
+ * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete.
+ * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
+ * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
* --------------------------------------------------------------------------
mpi2_raid.h
@@ -292,6 +364,7 @@ mpi2_raid.h
* can be sized by the build environment.
* 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
* VolumeCreationFlags and marked the old one as obsolete.
+ * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
* --------------------------------------------------------------------------
mpi2_sas.h
@@ -302,6 +375,8 @@ mpi2_sas.h
* Request.
* 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
* to MPI2_SGE_IO_UNION since it supports chained SGLs.
+ * 05-12-10 02.00.04 Modified some comments.
+ * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control.
* --------------------------------------------------------------------------
mpi2_targ.h
@@ -313,6 +388,7 @@ mpi2_targ.h
* MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST.
* Target Status Send Request only takes a single SGE for
* response data.
+ * 02-10-10 02.00.04 Added comment to MPI2_TARGET_SSP_RSP_IU structure.
* --------------------------------------------------------------------------
mpi2_tool.h
@@ -325,6 +401,9 @@ mpi2_tool.h
* and reply messages.
* Added MPI2_DIAG_BUF_TYPE_EXTENDED.
* Incremented MPI2_DIAG_BUF_TYPE_COUNT.
+ * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
+ * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
+ * Post Request.
* --------------------------------------------------------------------------
mpi2_type.h
@@ -337,24 +416,40 @@ mpi2_ra.h
mpi2_hbd.h
* 10-28-09 02.00.00 Initial version.
+ * 08-11-10 02.00.01 Removed PortGroups, DmaGroup, and ControlGroup from
+ * HBD Action request, replaced by AdditionalInfo field.
* --------------------------------------------------------------------------
mpi2_history.txt Parts list history
-Filename 02.00.14 02.00.13 02.00.12
----------- -------- -------- --------
-mpi2.h 02.00.14 02.00.13 02.00.12
-mpi2_cnfg.h 02.00.13 02.00.12 02.00.11
-mpi2_init.h 02.00.08 02.00.07 02.00.07
-mpi2_ioc.h 02.00.13 02.00.12 02.00.11
-mpi2_raid.h 02.00.04 02.00.04 02.00.03
-mpi2_sas.h 02.00.03 02.00.02 02.00.02
-mpi2_targ.h 02.00.03 02.00.03 02.00.03
-mpi2_tool.h 02.00.04 02.00.04 02.00.03
-mpi2_type.h 02.00.00 02.00.00 02.00.00
-mpi2_ra.h 02.00.00 02.00.00 02.00.00
-mpi2_hbd.h 02.00.00
+Filename 02.00.18
+---------- --------
+mpi2.h 02.00.18
+mpi2_cnfg.h 02.00.17
+mpi2_init.h 02.00.11
+mpi2_ioc.h 02.00.16
+mpi2_raid.h 02.00.05
+mpi2_sas.h 02.00.05
+mpi2_targ.h 02.00.04
+mpi2_tool.h 02.00.06
+mpi2_type.h 02.00.00
+mpi2_ra.h 02.00.00
+mpi2_hbd.h 02.00.01
+
+Filename 02.00.17 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12
+---------- -------- -------- -------- -------- -------- --------
+mpi2.h 02.00.17 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12
+mpi2_cnfg.h 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12 02.00.11
+mpi2_init.h 02.00.10 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07
+mpi2_ioc.h 02.00.15 02.00.15 02.00.14 02.00.13 02.00.12 02.00.11
+mpi2_raid.h 02.00.05 02.00.05 02.00.04 02.00.04 02.00.04 02.00.03
+mpi2_sas.h 02.00.05 02.00.04 02.00.03 02.00.03 02.00.02 02.00.02
+mpi2_targ.h 02.00.04 02.00.04 02.00.04 02.00.03 02.00.03 02.00.03
+mpi2_tool.h 02.00.06 02.00.05 02.00.04 02.00.04 02.00.04 02.00.03
+mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
+mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
+mpi2_hbd.h 02.00.01 02.00.00 02.00.00 02.00.00
Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
---------- -------- -------- -------- -------- -------- --------
diff --git a/sys/dev/mps/mpi/mpi2_init.h b/sys/dev/mps/mpi/mpi2_init.h
index 8d2b1f9..ca4a685 100644
--- a/sys/dev/mps/mpi/mpi2_init.h
+++ b/sys/dev/mps/mpi/mpi2_init.h
@@ -1,13 +1,42 @@
-/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2011 LSI Corporation.
*
*
* Name: mpi2_init.h
* Title: MPI SCSI initiator mode messages and structures
* Creation Date: June 23, 2006
*
- * mpi2_init.h Version: 02.00.08
+ * mpi2_init.h Version: 02.00.11
*
* Version History
* ---------------
@@ -32,6 +61,9 @@
* both SCSI IO Error Reply and SCSI Task Management Reply.
* Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
* Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
+ * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
+ * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request.
+ * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
* --------------------------------------------------------------------------
*/
@@ -58,20 +90,6 @@ typedef struct
} MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32,
Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t;
-/* TBD: I don't think this is needed for MPI2/Gen2 */
-#if 0
-typedef struct
-{
- U8 CDB[16]; /* 0x00 */
- U32 DataLength; /* 0x10 */
- U32 PrimaryReferenceTag; /* 0x14 */
- U16 PrimaryApplicationTag; /* 0x18 */
- U16 PrimaryApplicationTagMask; /* 0x1A */
- U32 TransferLength; /* 0x1C */
-} MPI2_SCSI_IO32_CDB_EEDP16, MPI2_POINTER PTR_MPI2_SCSI_IO32_CDB_EEDP16,
- Mpi2ScsiIo32CdbEedp16_t, MPI2_POINTER pMpi2ScsiIo32CdbEedp16_t;
-#endif
-
typedef union
{
U8 CDB32[32];
@@ -112,7 +130,13 @@ typedef struct _MPI2_SCSI_IO_REQUEST
U8 LUN[8]; /* 0x34 */
U32 Control; /* 0x3C */
MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
+
+#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /* typically this is left undefined */
+ MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion;
+#endif
+
MPI2_SGE_IO_UNION SGL; /* 0x60 */
+
} MPI2_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST,
Mpi2SCSIIORequest_t, MPI2_POINTER pMpi2SCSIIORequest_t;
@@ -146,6 +170,9 @@ typedef struct _MPI2_SCSI_IO_REQUEST
#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4)
#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0)
+/* number of SGLOffset fields */
+#define MPI2_SCSIIO_NUM_SGLOFFSETS (4)
+
/* SCSI IO IoFlags bits */
/* Large CDB Address Space */
diff --git a/sys/dev/mps/mpi/mpi2_ioc.h b/sys/dev/mps/mpi/mpi2_ioc.h
index 24a5662..ca19a5c 100644
--- a/sys/dev/mps/mpi/mpi2_ioc.h
+++ b/sys/dev/mps/mpi/mpi2_ioc.h
@@ -1,13 +1,42 @@
-/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2011 LSI Corporation.
*
*
* Name: mpi2_ioc.h
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.13
+ * mpi2_ioc.h Version: 02.00.16
*
* Version History
* ---------------
@@ -99,6 +128,12 @@
* (MPI2_FW_HEADER_PID_).
* Modified values for SAS ProductID Family
* (MPI2_FW_HEADER_PID_FAMILY_).
+ * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines.
+ * Added PowerManagementControl Request structures and
+ * defines.
+ * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete.
+ * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
+ * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
* --------------------------------------------------------------------------
*/
@@ -454,7 +489,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
#define MPI2_EVENT_STATE_CHANGE (0x0002)
#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005)
#define MPI2_EVENT_EVENT_CHANGE (0x000A)
-#define MPI2_EVENT_TASK_SET_FULL (0x000E)
+#define MPI2_EVENT_TASK_SET_FULL (0x000E) /* obsolete */
#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F)
#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014)
#define MPI2_EVENT_SAS_DISCOVERY (0x0016)
@@ -470,6 +505,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022)
#define MPI2_EVENT_GPIO_INTERRUPT (0x0023)
#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024)
+#define MPI2_EVENT_SAS_QUIESCE (0x0025)
/* Log Entry Added Event data */
@@ -515,6 +551,7 @@ typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED
MPI2_POINTER pMpi2EventDataHardResetReceived_t;
/* Task Set Full Event data */
+/* this event is obsolete */
typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL
{
@@ -829,6 +866,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST
#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03)
#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04)
#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06)
#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08)
#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09)
#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A)
@@ -896,6 +934,23 @@ typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER
/* use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags field */
+/* SAS Quiesce Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE
+{
+ U8 ReasonCode; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 Reserved3; /* 0x04 */
+} MPI2_EVENT_DATA_SAS_QUIESCE,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_QUIESCE,
+ Mpi2EventDataSasQuiesce_t, MPI2_POINTER pMpi2EventDataSasQuiesce_t;
+
+/* SAS Quiesce Event data ReasonCode values */
+#define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01)
+#define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02)
+
+
/* Host Based Discovery Phy Event data */
typedef struct _MPI2_EVENT_HBD_PHY_SAS
@@ -1009,7 +1064,9 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST
#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07)
#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08)
#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
+#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
/* FWDownload TransactionContext Element */
typedef struct _MPI2_FW_DOWNLOAD_TCSGE
@@ -1186,7 +1243,6 @@ typedef struct _MPI2_FW_IMAGE_HEADER
#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
-#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
@@ -1410,5 +1466,101 @@ typedef struct _MPI2_INIT_IMAGE_FOOTER
#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
+/****************************************************************************
+* PowerManagementControl message
+****************************************************************************/
+
+/* PowerManagementControl Request message */
+typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST
+{
+ U8 Feature; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 Parameter1; /* 0x0C */
+ U8 Parameter2; /* 0x0D */
+ U8 Parameter3; /* 0x0E */
+ U8 Parameter4; /* 0x0F */
+ U32 Reserved5; /* 0x10 */
+ U32 Reserved6; /* 0x14 */
+} MPI2_PWR_MGMT_CONTROL_REQUEST, MPI2_POINTER PTR_MPI2_PWR_MGMT_CONTROL_REQUEST,
+ Mpi2PwrMgmtControlRequest_t, MPI2_POINTER pMpi2PwrMgmtControlRequest_t;
+
+/* defines for the Feature field */
+#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01)
+#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02)
+#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03)
+#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04)
+#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/* parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */
+/* Parameter1 contains a PHY number */
+/* Parameter2 indicates power condition action using these defines */
+#define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01)
+#define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02)
+#define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03)
+/* Parameter3 and Parameter4 are reserved */
+
+/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION Feature */
+/* Parameter1 contains SAS port width modulation group number */
+/* Parameter2 indicates IOC action using these defines */
+#define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01)
+#define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02)
+#define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03)
+/* Parameter3 indicates desired modulation level using these defines */
+#define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00)
+#define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01)
+#define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02)
+#define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03)
+/* Parameter4 is reserved */
+
+/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */
+/* Parameter1 indicates desired PCIe link speed using these defines */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00)
+#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01)
+#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02)
+/* Parameter2 indicates desired PCIe link width using these defines */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01)
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02)
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04)
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08)
+/* Parameter3 and Parameter4 are reserved */
+
+/* parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */
+/* Parameter1 indicates desired IOC hardware clock speed using these defines */
+#define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01)
+#define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02)
+#define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04)
+#define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08)
+/* Parameter2, Parameter3, and Parameter4 are reserved */
+
+
+/* PowerManagementControl Reply message */
+typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY
+{
+ U8 Feature; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_PWR_MGMT_CONTROL_REPLY, MPI2_POINTER PTR_MPI2_PWR_MGMT_CONTROL_REPLY,
+ Mpi2PwrMgmtControlReply_t, MPI2_POINTER pMpi2PwrMgmtControlReply_t;
+
+
#endif
diff --git a/sys/dev/mps/mpi/mpi2_ra.h b/sys/dev/mps/mpi/mpi2_ra.h
index 18b0b3d..0f01226 100644
--- a/sys/dev/mps/mpi/mpi2_ra.h
+++ b/sys/dev/mps/mpi/mpi2_ra.h
@@ -1,6 +1,35 @@
-/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
/*
- * Copyright (c) 2009 LSI Corporation.
+ * Copyright (c) 2011 LSI Corporation.
*
*
* Name: mpi2_ra.h
diff --git a/sys/dev/mps/mpi/mpi2_raid.h b/sys/dev/mps/mpi/mpi2_raid.h
index f653028..557468a 100644
--- a/sys/dev/mps/mpi/mpi2_raid.h
+++ b/sys/dev/mps/mpi/mpi2_raid.h
@@ -1,13 +1,42 @@
-/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
/*
- * Copyright (c) 2000-2008 LSI Corporation.
+ * Copyright (c) 2000-2011 LSI Corporation.
*
*
* Name: mpi2_raid.h
* Title: MPI Integrated RAID messages and structures
* Creation Date: April 26, 2007
*
- * mpi2_raid.h Version: 02.00.04
+ * mpi2_raid.h Version: 02.00.05
*
* Version History
* ---------------
@@ -23,6 +52,7 @@
* can be sized by the build environment.
* 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
* VolumeCreationFlags and marked the old one as obsolete.
+ * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
* --------------------------------------------------------------------------
*/
@@ -261,6 +291,7 @@ typedef struct _MPI2_RAID_VOL_INDICATOR
#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001)
#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002)
#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003)
+#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004)
/* RAID Action Reply ActionData union */
diff --git a/sys/dev/mps/mpi/mpi2_sas.h b/sys/dev/mps/mpi/mpi2_sas.h
index ef64a730..1f3341f 100644
--- a/sys/dev/mps/mpi/mpi2_sas.h
+++ b/sys/dev/mps/mpi/mpi2_sas.h
@@ -1,13 +1,42 @@
-/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
/*
- * Copyright (c) 2000-2007 LSI Corporation.
+ * Copyright (c) 2000-2011 LSI Corporation.
*
*
* Name: mpi2_sas.h
* Title: MPI Serial Attached SCSI structures and definitions
* Creation Date: February 9, 2007
*
- * mpi2.h Version: 02.00.03
+ * mpi2_sas.h Version: 02.00.05
*
* Version History
* ---------------
@@ -21,6 +50,8 @@
* Request.
* 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
* to MPI2_SGE_IO_UNION since it supports chained SGLs.
+ * 05-12-10 02.00.04 Modified some comments.
+ * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control.
* --------------------------------------------------------------------------
*/
@@ -111,7 +142,7 @@ typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST
/* values for PassthroughFlags field */
#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80)
-/* values for SGLFlags field are in the SGL section of mpi2.h */
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
/* SMP Passthrough Reply Message */
@@ -163,7 +194,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST
U32 Reserved4; /* 0x14 */
U32 DataLength; /* 0x18 */
U8 CommandFIS[20]; /* 0x1C */
- MPI2_SGE_IO_UNION SGL; /* 0x20 */
+ MPI2_SGE_IO_UNION SGL; /* 0x30 */
} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t;
@@ -175,7 +206,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST
#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002)
#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001)
-/* values for SGLFlags field are in the SGL section of mpi2.h */
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
/* SATA Passthrough Reply Message */
@@ -246,6 +277,8 @@ typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST
#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D)
#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E)
#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F)
+#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14)
+#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15)
#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80)
/* values for the PrimFlags field */
diff --git a/sys/dev/mps/mpi/mpi2_targ.h b/sys/dev/mps/mpi/mpi2_targ.h
index 50f38d0..fcd694c 100644
--- a/sys/dev/mps/mpi/mpi2_targ.h
+++ b/sys/dev/mps/mpi/mpi2_targ.h
@@ -1,13 +1,42 @@
-/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
/*
- * Copyright (c) 2000-2008 LSI Corporation.
+ * Copyright (c) 2000-2011 LSI Corporation.
*
*
* Name: mpi2_targ.h
* Title: MPI Target mode messages and structures
* Creation Date: September 8, 2006
*
- * mpi2_targ.h Version: 02.00.03
+ * mpi2_targ.h Version: 02.00.04
*
* Version History
* ---------------
@@ -22,6 +51,7 @@
* MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST.
* Target Status Send Request only takes a single SGE for
* response data.
+ * 02-10-10 02.00.04 Added comment to MPI2_TARGET_SSP_RSP_IU structure.
* --------------------------------------------------------------------------
*/
@@ -343,6 +373,7 @@ typedef struct _MPI2_TARGET_STATUS_SEND_REQUEST
typedef struct _MPI2_TARGET_SSP_RSP_IU
{
U32 Reserved0[6]; /* reserved for SSP header */ /* 0x00 */
+
/* start of RESPONSE information unit */
U32 Reserved1; /* 0x18 */
U32 Reserved2; /* 0x1C */
@@ -352,6 +383,8 @@ typedef struct _MPI2_TARGET_SSP_RSP_IU
U32 Reserved4; /* 0x24 */
U32 SenseDataLength; /* 0x28 */
U32 ResponseDataLength; /* 0x2C */
+
+ /* start of Response or Sense Data (size may vary dynamically) */
U8 ResponseSenseData[4]; /* 0x30 */
} MPI2_TARGET_SSP_RSP_IU, MPI2_POINTER PTR_MPI2_TARGET_SSP_RSP_IU,
Mpi2TargetSspRspIu_t, MPI2_POINTER pMpi2TargetSspRspIu_t;
diff --git a/sys/dev/mps/mpi/mpi2_tool.h b/sys/dev/mps/mpi/mpi2_tool.h
index f782507..16c0ffc 100644
--- a/sys/dev/mps/mpi/mpi2_tool.h
+++ b/sys/dev/mps/mpi/mpi2_tool.h
@@ -1,13 +1,42 @@
-/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2011 LSI Corporation.
*
*
* Name: mpi2_tool.h
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.04
+ * mpi2_tool.h Version: 02.00.06
*
* Version History
* ---------------
@@ -23,6 +52,9 @@
* and reply messages.
* Added MPI2_DIAG_BUF_TYPE_EXTENDED.
* Incremented MPI2_DIAG_BUF_TYPE_COUNT.
+ * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
+ * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
+ * Post Request.
* --------------------------------------------------------------------------
*/
@@ -38,6 +70,7 @@
/* defines for the Tools */
#define MPI2_TOOLBOX_CLEAN_TOOL (0x00)
#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01)
+#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02)
#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
@@ -121,6 +154,46 @@ typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST
/****************************************************************************
+* Toolbox Diagnostic Data Upload request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 SGLFlags; /* 0x0C */
+ U8 Reserved5; /* 0x0D */
+ U16 Reserved6; /* 0x0E */
+ U32 Flags; /* 0x10 */
+ U32 DataLength; /* 0x14 */
+ MPI2_SGE_SIMPLE_UNION SGL; /* 0x18 */
+} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+ MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+ Mpi2ToolboxDiagDataUploadRequest_t,
+ MPI2_POINTER pMpi2ToolboxDiagDataUploadRequest_t;
+
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER
+{
+ U32 DiagDataLength; /* 00h */
+ U8 FormatCode; /* 04h */
+ U8 Reserved1; /* 05h */
+ U16 Reserved2; /* 06h */
+} MPI2_DIAG_DATA_UPLOAD_HEADER, MPI2_POINTER PTR_MPI2_DIAG_DATA_UPLOAD_HEADER,
+ Mpi2DiagDataUploadHeader_t, MPI2_POINTER pMpi2DiagDataUploadHeader_t;
+
+
+/****************************************************************************
* Toolbox ISTWI Read Write Tool
****************************************************************************/
@@ -164,7 +237,7 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST
#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11)
#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12)
-/* values for SGLFlags field are in the SGL section of mpi2.h */
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
/* Toolbox ISTWI Read Write Tool reply message */
@@ -251,7 +324,7 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST
Mpi2ToolboxDiagnosticCliRequest_t,
MPI2_POINTER pMpi2ToolboxDiagnosticCliRequest_t;
-/* values for SGLFlags field are in the SGL section of mpi2.h */
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
/* Toolbox Diagnostic CLI Tool reply message */
@@ -319,6 +392,10 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST
/* count of the number of buffer types */
#define MPI2_DIAG_BUF_TYPE_COUNT (0x03)
+/* values for the Flags field */
+#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002)
+#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001)
+
/****************************************************************************
* Diagnostic Buffer Post reply
diff --git a/sys/dev/mps/mpi/mpi2_type.h b/sys/dev/mps/mpi/mpi2_type.h
index 9effe68..fa4ecd9 100644
--- a/sys/dev/mps/mpi/mpi2_type.h
+++ b/sys/dev/mps/mpi/mpi2_type.h
@@ -1,6 +1,35 @@
-/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
/*
- * Copyright (c) 2000-2007 LSI Corporation.
+ * Copyright (c) 2000-2011 LSI Corporation.
*
*
* Name: mpi2_type.h
diff --git a/sys/dev/mps/mps.c b/sys/dev/mps/mps.c
index ed70c43..d3d068b 100644
--- a/sys/dev/mps/mps.c
+++ b/sys/dev/mps/mps.c
@@ -22,6 +22,36 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
+ *
+ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
*/
#include <sys/cdefs.h>
@@ -29,6 +59,7 @@ __FBSDID("$FreeBSD$");
/* Communications core for LSI MPT2 */
+/* TODO Move headers to mpsvar */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
@@ -43,29 +74,44 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/uio.h>
#include <sys/sysctl.h>
+#include <sys/queue.h>
+#include <sys/kthread.h>
#include <sys/endian.h>
+#include <sys/eventhandler.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
+#include <dev/pci/pcivar.h>
+
#include <cam/scsi/scsi_all.h>
#include <dev/mps/mpi/mpi2_type.h>
#include <dev/mps/mpi/mpi2.h>
#include <dev/mps/mpi/mpi2_ioc.h>
+#include <dev/mps/mpi/mpi2_sas.h>
#include <dev/mps/mpi/mpi2_cnfg.h>
+#include <dev/mps/mpi/mpi2_init.h>
+#include <dev/mps/mpi/mpi2_tool.h>
+#include <dev/mps/mps_ioctl.h>
#include <dev/mps/mpsvar.h>
#include <dev/mps/mps_table.h>
+static int mps_diag_reset(struct mps_softc *sc);
+static int mps_init_queues(struct mps_softc *sc);
+static int mps_message_unit_reset(struct mps_softc *sc);
+static int mps_transition_operational(struct mps_softc *sc);
static void mps_startup(void *arg);
-static void mps_startup_complete(struct mps_softc *sc, struct mps_command *cm);
static int mps_send_iocinit(struct mps_softc *sc);
static int mps_attach_log(struct mps_softc *sc);
static __inline void mps_complete_command(struct mps_command *cm);
-static void mps_dispatch_event(struct mps_softc *sc, uintptr_t data, MPI2_EVENT_NOTIFICATION_REPLY *reply);
+static void mps_dispatch_event(struct mps_softc *sc, uintptr_t data,
+ MPI2_EVENT_NOTIFICATION_REPLY *reply);
static void mps_config_complete(struct mps_softc *sc, struct mps_command *cm);
static void mps_periodic(void *);
+static int mps_reregister_events(struct mps_softc *sc);
+static void mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm);
SYSCTL_NODE(_hw, OID_AUTO, mps, CTLFLAG_RD, 0, "MPS Driver Parameters");
@@ -78,7 +124,7 @@ MALLOC_DEFINE(M_MPT2, "mps", "mpt2 driver memory");
static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d };
static int
-mps_hard_reset(struct mps_softc *sc)
+mps_diag_reset(struct mps_softc *sc)
{
uint32_t reg;
int i, error, tries = 0;
@@ -129,7 +175,7 @@ mps_hard_reset(struct mps_softc *sc)
}
static int
-mps_soft_reset(struct mps_softc *sc)
+mps_message_unit_reset(struct mps_softc *sc)
{
mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
@@ -160,7 +206,7 @@ mps_transition_ready(struct mps_softc *sc)
* resetting it.
*/
if (reg & MPI2_DOORBELL_USED) {
- mps_hard_reset(sc);
+ mps_diag_reset(sc);
DELAY(50000);
continue;
}
@@ -181,10 +227,10 @@ mps_transition_ready(struct mps_softc *sc)
} else if (state == MPI2_IOC_STATE_FAULT) {
mps_dprint(sc, MPS_INFO, "IOC in fault state 0x%x\n",
state & MPI2_DOORBELL_FAULT_CODE_MASK);
- mps_hard_reset(sc);
+ mps_diag_reset(sc);
} else if (state == MPI2_IOC_STATE_OPERATIONAL) {
/* Need to take ownership */
- mps_soft_reset(sc);
+ mps_message_unit_reset(sc);
} else if (state == MPI2_IOC_STATE_RESET) {
/* Wait a bit, IOC might be in transition */
mps_dprint(sc, MPS_FAULT,
@@ -220,14 +266,108 @@ mps_transition_operational(struct mps_softc *sc)
state = reg & MPI2_IOC_STATE_MASK;
if (state != MPI2_IOC_STATE_READY) {
- if ((error = mps_transition_ready(sc)) != 0)
+ if ((error = mps_transition_ready(sc)) != 0) {
+ mps_dprint(sc, MPS_FAULT,
+ "%s failed to transition ready\n", __func__);
return (error);
+ }
}
error = mps_send_iocinit(sc);
return (error);
}
+/*
+ * XXX Some of this should probably move to mps.c
+ *
+ * The terms diag reset and hard reset are used interchangeably in the MPI
+ * docs to mean resetting the controller chip. In this code diag reset
+ * cleans everything up, and the hard reset function just sends the reset
+ * sequence to the chip. This should probably be refactored so that every
+ * subsystem gets a reset notification of some sort, and can clean up
+ * appropriately.
+ */
+int
+mps_reinit(struct mps_softc *sc)
+{
+ int error;
+ uint32_t db;
+
+ mps_printf(sc, "%s sc %p\n", __func__, sc);
+
+ mtx_assert(&sc->mps_mtx, MA_OWNED);
+
+ if (sc->mps_flags & MPS_FLAGS_DIAGRESET) {
+ mps_printf(sc, "%s reset already in progress\n", __func__);
+ return 0;
+ }
+
+ /* make sure the completion callbacks can recognize they're getting
+ * a NULL cm_reply due to a reset.
+ */
+ sc->mps_flags |= MPS_FLAGS_DIAGRESET;
+
+ mps_printf(sc, "%s mask interrupts\n", __func__);
+ mps_mask_intr(sc);
+
+ error = mps_diag_reset(sc);
+ if (error != 0) {
+ panic("%s hard reset failed with error %d\n",
+ __func__, error);
+ }
+
+ /* Restore the PCI state, including the MSI-X registers */
+ mps_pci_restore(sc);
+
+ /* Give the I/O subsystem special priority to get itself prepared */
+ mpssas_handle_reinit(sc);
+
+ /* reinitialize queues after the reset */
+ bzero(sc->free_queue, sc->fqdepth * 4);
+ mps_init_queues(sc);
+
+ /* get the chip out of the reset state */
+ error = mps_transition_operational(sc);
+ if (error != 0)
+ panic("%s transition operational failed with error %d\n",
+ __func__, error);
+
+ /* Reinitialize the reply queue. This is delicate because this
+ * function is typically invoked by task mgmt completion callbacks,
+ * which are called by the interrupt thread. We need to make sure
+ * the interrupt handler loop will exit when we return to it, and
+ * that it will recognize the indexes we've changed.
+ */
+ sc->replypostindex = 0;
+ mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex);
+ mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, sc->replypostindex);
+
+ db = mps_regread(sc, MPI2_DOORBELL_OFFSET);
+ mps_printf(sc, "%s doorbell 0x%08x\n", __func__, db);
+
+ mps_printf(sc, "%s unmask interrupts post %u free %u\n", __func__,
+ sc->replypostindex, sc->replyfreeindex);
+
+ mps_unmask_intr(sc);
+
+ mps_printf(sc, "%s restarting post %u free %u\n", __func__,
+ sc->replypostindex, sc->replyfreeindex);
+
+ /* restart will reload the event masks clobbered by the reset, and
+ * then enable the port.
+ */
+ mps_reregister_events(sc);
+
+ /* the end of discovery will release the simq, so we're done. */
+ mps_printf(sc, "%s finished sc %p post %u free %u\n",
+ __func__, sc,
+ sc->replypostindex, sc->replyfreeindex);
+
+ sc->mps_flags &= ~MPS_FLAGS_DIAGRESET;
+
+ return 0;
+}
+
/* Wait for the chip to ACK a word that we've put into its FIFO */
static int
mps_wait_db_ack(struct mps_softc *sc)
@@ -382,51 +522,25 @@ mps_request_sync(struct mps_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply,
return (0);
}
-void
+static void
mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm)
{
- mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+ mps_dprint(sc, MPS_TRACE, "%s SMID %u cm %p ccb %p\n", __func__,
+ cm->cm_desc.Default.SMID, cm, cm->cm_ccb);
if (sc->mps_flags & MPS_FLAGS_ATTACH_DONE)
mtx_assert(&sc->mps_mtx, MA_OWNED);
- if ((cm->cm_desc.Default.SMID < 1)
- || (cm->cm_desc.Default.SMID >= sc->num_reqs)) {
- mps_printf(sc, "%s: invalid SMID %d, desc %#x %#x\n",
- __func__, cm->cm_desc.Default.SMID,
- cm->cm_desc.Words.High, cm->cm_desc.Words.Low);
- }
+ if (++sc->io_cmds_active > sc->io_cmds_highwater)
+ sc->io_cmds_highwater++;
+
mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET,
cm->cm_desc.Words.Low);
mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET,
cm->cm_desc.Words.High);
}
-int
-mps_request_polled(struct mps_softc *sc, struct mps_command *cm)
-{
- int error, timeout = 0;
-
- error = 0;
-
- cm->cm_flags |= MPS_CM_FLAGS_POLLED;
- cm->cm_complete = NULL;
- mps_map_command(sc, cm);
-
- while ((cm->cm_flags & MPS_CM_FLAGS_COMPLETE) == 0) {
- mps_intr(sc);
- DELAY(50 * 1000);
- if (timeout++ > 1000) {
- mps_dprint(sc, MPS_FAULT, "polling failed\n");
- error = ETIMEDOUT;
- break;
- }
- }
-
- return (error);
-}
-
/*
* Just the FACTS, ma'am.
*/
@@ -469,9 +583,19 @@ mps_get_portfacts(struct mps_softc *sc, MPI2_PORT_FACTS_REPLY *facts, int port)
cm->cm_data = NULL;
error = mps_request_polled(sc, cm);
reply = (MPI2_PORT_FACTS_REPLY *)cm->cm_reply;
- if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
+ if (reply == NULL) {
+ mps_printf(sc, "%s NULL reply\n", __func__);
+ goto done;
+ }
+ if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) {
+ mps_printf(sc,
+ "%s error %d iocstatus 0x%x iocloginfo 0x%x type 0x%x\n",
+ __func__, error, reply->IOCStatus, reply->IOCLogInfo,
+ reply->PortType);
error = ENXIO;
+ }
bcopy(reply, facts, sizeof(MPI2_PORT_FACTS_REPLY));
+done:
mps_free_command(sc, cm);
return (error);
@@ -522,35 +646,6 @@ mps_send_iocinit(struct mps_softc *sc)
return (error);
}
-static int
-mps_send_portenable(struct mps_softc *sc)
-{
- MPI2_PORT_ENABLE_REQUEST *request;
- struct mps_command *cm;
-
- mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
-
- if ((cm = mps_alloc_command(sc)) == NULL)
- return (EBUSY);
- request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
- request->Function = MPI2_FUNCTION_PORT_ENABLE;
- request->MsgFlags = 0;
- request->VP_ID = 0;
- cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
- cm->cm_complete = mps_startup_complete;
-
- mps_enqueue_request(sc, cm);
- return (0);
-}
-
-static int
-mps_send_mur(struct mps_softc *sc)
-{
-
- /* Placeholder */
- return (0);
-}
-
void
mps_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
@@ -685,7 +780,7 @@ mps_alloc_requests(struct mps_softc *sc)
bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize,
mps_memaddr_cb, &sc->req_busaddr, 0);
- rsize = sc->facts->IOCRequestFrameSize * MPS_CHAIN_FRAMES * 4;
+ rsize = sc->facts->IOCRequestFrameSize * sc->max_chains * 4;
if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */
16, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
@@ -733,9 +828,9 @@ mps_alloc_requests(struct mps_softc *sc)
bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize,
mps_memaddr_cb, &sc->sense_busaddr, 0);
- sc->chains = malloc(sizeof(struct mps_chain) * MPS_CHAIN_FRAMES,
- M_MPT2, M_WAITOK | M_ZERO);
- for (i = 0; i < MPS_CHAIN_FRAMES; i++) {
+ sc->chains = malloc(sizeof(struct mps_chain) * sc->max_chains, M_MPT2,
+ M_WAITOK | M_ZERO);
+ for (i = 0; i < sc->max_chains; i++) {
chain = &sc->chains[i];
chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames +
i * sc->facts->IOCRequestFrameSize * 4);
@@ -759,7 +854,7 @@ mps_alloc_requests(struct mps_softc *sc)
busdma_lock_mutex, /* lockfunc */
&sc->mps_mtx, /* lockarg */
&sc->buffer_dmat)) {
- device_printf(sc->mps_dev, "Cannot allocate sense DMA tag\n");
+ device_printf(sc->mps_dev, "Cannot allocate buffer DMA tag\n");
return (ENOMEM);
}
@@ -780,12 +875,16 @@ mps_alloc_requests(struct mps_softc *sc)
cm->cm_desc.Default.SMID = i;
cm->cm_sc = sc;
TAILQ_INIT(&cm->cm_chain_list);
- callout_init(&cm->cm_callout, 1 /*MPSAFE*/);
+ callout_init_mtx(&cm->cm_callout, &sc->mps_mtx, 0);
/* XXX Is a failure here a critical problem? */
if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) == 0)
- mps_free_command(sc, cm);
+ if (i <= sc->facts->HighPriorityCredit)
+ mps_free_high_priority_command(sc, cm);
+ else
+ mps_free_command(sc, cm);
else {
+ panic("failed to allocate command %d\n", i);
sc->num_reqs = i;
break;
}
@@ -819,28 +918,53 @@ mps_init_queues(struct mps_softc *sc)
return (0);
}
-int
-mps_attach(struct mps_softc *sc)
+/* Get the driver parameter tunables. Lowest priority are the driver defaults.
+ * Next are the global settings, if they exist. Highest are the per-unit
+ * settings, if they exist.
+ */
+static void
+mps_get_tunables(struct mps_softc *sc)
{
- int i, error;
- char tmpstr[80], tmpstr2[80];
+ char tmpstr[80];
+
+ /* XXX default to some debugging for now */
+ sc->mps_debug = MPS_FAULT;
+ sc->disable_msix = 0;
+ sc->disable_msi = 0;
+ sc->max_chains = MPS_CHAIN_FRAMES;
/*
- * Grab any tunable-set debug level so that tracing works as early
- * as possible.
+ * Grab the global variables.
*/
- snprintf(tmpstr, sizeof(tmpstr), "hw.mps.%d.debug_level",
+ TUNABLE_INT_FETCH("hw.mps.debug_level", &sc->mps_debug);
+ TUNABLE_INT_FETCH("hw.mps.disable_msix", &sc->disable_msix);
+ TUNABLE_INT_FETCH("hw.mps.disable_msi", &sc->disable_msi);
+ TUNABLE_INT_FETCH("hw.mps.max_chains", &sc->max_chains);
+
+ /* Grab the unit-instance variables */
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.debug_level",
device_get_unit(sc->mps_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->mps_debug);
- snprintf(tmpstr, sizeof(tmpstr), "hw.mps.%d.allow_multiple_tm_cmds",
+
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.disable_msix",
device_get_unit(sc->mps_dev));
- TUNABLE_INT_FETCH(tmpstr, &sc->allow_multiple_tm_cmds);
+ TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix);
- mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.disable_msi",
+ device_get_unit(sc->mps_dev));
+ TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi);
- mtx_init(&sc->mps_mtx, "MPT2SAS lock", NULL, MTX_DEF);
- callout_init_mtx(&sc->periodic, &sc->mps_mtx, 0);
- TAILQ_INIT(&sc->event_list);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_chains",
+ device_get_unit(sc->mps_dev));
+ TUNABLE_INT_FETCH(tmpstr, &sc->max_chains);
+}
+
+static void
+mps_setup_sysctl(struct mps_softc *sc)
+{
+ struct sysctl_ctx_list *sysctl_ctx = NULL;
+ struct sysctl_oid *sysctl_tree = NULL;
+ char tmpstr[80], tmpstr2[80];
/*
* Setup the sysctl variable so the user can change the debug level
@@ -850,44 +974,85 @@ mps_attach(struct mps_softc *sc)
device_get_unit(sc->mps_dev));
snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mps_dev));
- sysctl_ctx_init(&sc->sysctl_ctx);
- sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
- SYSCTL_STATIC_CHILDREN(_hw_mps), OID_AUTO, tmpstr2, CTLFLAG_RD,
- 0, tmpstr);
- if (sc->sysctl_tree == NULL)
- return (ENOMEM);
+ sysctl_ctx = device_get_sysctl_ctx(sc->mps_dev);
+ if (sysctl_ctx != NULL)
+ sysctl_tree = device_get_sysctl_tree(sc->mps_dev);
- SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+ if (sysctl_tree == NULL) {
+ sysctl_ctx_init(&sc->sysctl_ctx);
+ sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
+ SYSCTL_STATIC_CHILDREN(_hw_mps), OID_AUTO, tmpstr2,
+ CTLFLAG_RD, 0, tmpstr);
+ if (sc->sysctl_tree == NULL)
+ return;
+ sysctl_ctx = &sc->sysctl_ctx;
+ sysctl_tree = sc->sysctl_tree;
+ }
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "debug_level", CTLFLAG_RW, &sc->mps_debug, 0,
"mps debug level");
- SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
- OID_AUTO, "allow_multiple_tm_cmds", CTLFLAG_RW,
- &sc->allow_multiple_tm_cmds, 0,
- "allow multiple simultaneous task management cmds");
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0,
+ "Disable the use of MSI-X interrupts");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "disable_msi", CTLFLAG_RD, &sc->disable_msi, 0,
+ "Disable the use of MSI interrupts");
+
+ SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "firmware_version", CTLFLAG_RW, &sc->fw_version,
+ strlen(sc->fw_version), "firmware version");
+
+ SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "driver_version", CTLFLAG_RW, MPS_DRIVER_VERSION,
+ strlen(MPS_DRIVER_VERSION), "driver version");
- SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "io_cmds_active", CTLFLAG_RD,
&sc->io_cmds_active, 0, "number of currently active commands");
- SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
&sc->io_cmds_highwater, 0, "maximum active commands seen");
- SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "chain_free", CTLFLAG_RD,
&sc->chain_free, 0, "number of free chain elements");
- SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "chain_free_lowwater", CTLFLAG_RD,
&sc->chain_free_lowwater, 0,"lowest number of free chain elements");
- SYSCTL_ADD_UQUAD(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "max_chains", CTLFLAG_RD,
+ &sc->max_chains, 0,"maximum chain frames that will be allocated");
+
+#if __FreeBSD_version >= 900030
+ SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "chain_alloc_fail", CTLFLAG_RD,
&sc->chain_alloc_fail, "chain allocation failures");
+#endif //FreeBSD_version >= 900030
+}
- if ((error = mps_transition_ready(sc)) != 0)
+int
+mps_attach(struct mps_softc *sc)
+{
+ int i, error;
+
+ mps_get_tunables(sc);
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ mtx_init(&sc->mps_mtx, "MPT2SAS lock", NULL, MTX_DEF);
+ callout_init_mtx(&sc->periodic, &sc->mps_mtx, 0);
+ TAILQ_INIT(&sc->event_list);
+
+ if ((error = mps_transition_ready(sc)) != 0) {
+ mps_printf(sc, "%s failed to transition ready\n", __func__);
return (error);
+ }
sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPT2,
M_ZERO|M_NOWAIT);
@@ -896,11 +1061,15 @@ mps_attach(struct mps_softc *sc)
mps_print_iocfacts(sc, sc->facts);
- mps_printf(sc, "Firmware: %02d.%02d.%02d.%02d\n",
+ snprintf(sc->fw_version, sizeof(sc->fw_version),
+ "%02d.%02d.%02d.%02d",
sc->facts->FWVersion.Struct.Major,
sc->facts->FWVersion.Struct.Minor,
sc->facts->FWVersion.Struct.Unit,
sc->facts->FWVersion.Struct.Dev);
+
+ mps_printf(sc, "Firmware: %s, Driver: %s\n", sc->fw_version,
+ MPS_DRIVER_VERSION);
mps_printf(sc, "IOCCapabilities: %b\n", sc->facts->IOCCapabilities,
"\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf"
"\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR"
@@ -914,34 +1083,66 @@ mps_attach(struct mps_softc *sc)
*/
if ((sc->facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0) {
- mps_hard_reset(sc);
+ mps_diag_reset(sc);
if ((error = mps_transition_ready(sc)) != 0)
return (error);
}
/*
+ * Set flag if IR Firmware is loaded.
+ */
+ if (sc->facts->IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)
+ sc->ir_firmware = 1;
+
+ /*
+ * Check if controller supports FW diag buffers and set flag to enable
+ * each type.
+ */
+ if (sc->facts->IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
+ sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE].enabled =
+ TRUE;
+ if (sc->facts->IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
+ sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT].enabled =
+ TRUE;
+ if (sc->facts->IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
+ sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED].enabled =
+ TRUE;
+
+ /*
+ * Set flag if EEDP is supported and if TLR is supported.
+ */
+ if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP)
+ sc->eedp_enabled = TRUE;
+ if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)
+ sc->control_TLR = TRUE;
+
+ /*
* Size the queues. Since the reply queues always need one free entry,
* we'll just deduct one reply message here.
*/
sc->num_reqs = MIN(MPS_REQ_FRAMES, sc->facts->RequestCredit);
sc->num_replies = MIN(MPS_REPLY_FRAMES + MPS_EVT_REPLY_FRAMES,
sc->facts->MaxReplyDescriptorPostQueueDepth) - 1;
- mps_dprint(sc, MPS_INFO, "num_reqs %d, num_replies %d\n", sc->num_reqs,
- sc->num_replies);
TAILQ_INIT(&sc->req_list);
+ TAILQ_INIT(&sc->high_priority_req_list);
TAILQ_INIT(&sc->chain_list);
TAILQ_INIT(&sc->tm_list);
- TAILQ_INIT(&sc->io_list);
if (((error = mps_alloc_queues(sc)) != 0) ||
((error = mps_alloc_replies(sc)) != 0) ||
((error = mps_alloc_requests(sc)) != 0)) {
+ mps_printf(sc, "%s failed to alloc\n", __func__);
mps_free(sc);
return (error);
}
if (((error = mps_init_queues(sc)) != 0) ||
((error = mps_transition_operational(sc)) != 0)) {
+ mps_printf(sc, "%s failed to transition operational\n", __func__);
mps_free(sc);
return (error);
}
@@ -964,6 +1165,8 @@ mps_attach(struct mps_softc *sc)
sc->facts->NumberOfPorts, M_MPT2, M_ZERO|M_WAITOK);
for (i = 0; i < sc->facts->NumberOfPorts; i++) {
if ((error = mps_get_portfacts(sc, &sc->pfacts[i], i)) != 0) {
+ mps_printf(sc, "%s failed to get portfacts for port %d\n",
+ __func__, i);
mps_free(sc);
return (error);
}
@@ -982,10 +1185,17 @@ mps_attach(struct mps_softc *sc)
}
if ((error = mps_pci_setup_interrupts(sc)) != 0) {
+ mps_printf(sc, "%s failed to setup interrupts\n", __func__);
mps_free(sc);
return (error);
}
+ /*
+ * The static page function currently read is ioc page8. Others can be
+ * added in future.
+ */
+ mps_base_static_config_pages(sc);
+
/* Start the periodic watchdog check on the IOC Doorbell */
mps_periodic(sc);
@@ -1001,11 +1211,24 @@ mps_attach(struct mps_softc *sc)
error = EINVAL;
}
+ /*
+ * Allow IR to shutdown gracefully when shutdown occurs.
+ */
+ sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
+ mpssas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT);
+
+ if (sc->shutdown_eh == NULL)
+ mps_dprint(sc, MPS_FAULT, "shutdown event registration "
+ "failed\n");
+
+ mps_setup_sysctl(sc);
+
sc->mps_flags |= MPS_FLAGS_ATTACH_DONE;
return (error);
}
+/* Run through any late-start handlers. */
static void
mps_startup(void *arg)
{
@@ -1015,7 +1238,9 @@ mps_startup(void *arg)
mps_lock(sc);
mps_unmask_intr(sc);
- mps_send_portenable(sc);
+ /* initialize device mapping tables */
+ mps_mapping_initialize(sc);
+ mpssas_startup(sc);
mps_unlock(sc);
}
@@ -1033,36 +1258,14 @@ mps_periodic(void *arg)
db = mps_regread(sc, MPI2_DOORBELL_OFFSET);
if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
device_printf(sc->mps_dev, "IOC Fault 0x%08x, Resetting\n", db);
- /* XXX Need to broaden this to re-initialize the chip */
- mps_hard_reset(sc);
- db = mps_regread(sc, MPI2_DOORBELL_OFFSET);
- if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
- device_printf(sc->mps_dev, "Second IOC Fault 0x%08x, "
- "Giving up!\n", db);
- return;
- }
+
+ mps_reinit(sc);
}
callout_reset(&sc->periodic, MPS_PERIODIC_DELAY * hz, mps_periodic, sc);
}
static void
-mps_startup_complete(struct mps_softc *sc, struct mps_command *cm)
-{
- MPI2_PORT_ENABLE_REPLY *reply;
-
- mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
-
- reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
- if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
- mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
-
- mps_free_command(sc, cm);
- config_intrhook_disestablish(&sc->mps_ich);
-
-}
-
-static void
mps_log_evt_handler(struct mps_softc *sc, uintptr_t data,
MPI2_EVENT_NOTIFICATION_REPLY *event)
{
@@ -1134,7 +1337,7 @@ mps_free(struct mps_softc *sc)
/* Put the IOC back in the READY state. */
mps_lock(sc);
- if ((error = mps_send_mur(sc)) != 0) {
+ if ((error = mps_transition_ready(sc)) != 0) {
mps_unlock(sc);
return (error);
}
@@ -1197,6 +1400,12 @@ mps_free(struct mps_softc *sc)
if (sc->sysctl_tree != NULL)
sysctl_ctx_free(&sc->sysctl_ctx);
+ mps_mapping_free_memory(sc);
+
+ /* Deregister the shutdown function */
+ if (sc->shutdown_eh != NULL)
+ EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh);
+
mtx_destroy(&sc->mps_mtx);
return (0);
@@ -1208,14 +1417,26 @@ mps_complete_command(struct mps_command *cm)
if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
- if (cm->cm_complete != NULL)
+ if (cm->cm_complete != NULL) {
+ mps_dprint(cm->cm_sc, MPS_TRACE,
+ "%s cm %p calling cm_complete %p data %p reply %p\n",
+ __func__, cm, cm->cm_complete, cm->cm_complete_data,
+ cm->cm_reply);
cm->cm_complete(cm->cm_sc, cm);
+ }
if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
mps_dprint(cm->cm_sc, MPS_TRACE, "%s: waking up %p\n",
__func__, cm);
wakeup(cm);
}
+
+ if (cm->cm_sc->io_cmds_active != 0) {
+ cm->cm_sc->io_cmds_active--;
+ } else {
+ mps_dprint(cm->cm_sc, MPS_INFO, "Warning: io_cmds_active is "
+ "out of sync - resynching to 0\n");
+ }
}
void
@@ -1251,6 +1472,7 @@ mps_intr_msi(void *data)
struct mps_softc *sc;
sc = (struct mps_softc *)data;
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
mps_lock(sc);
mps_intr_locked(data);
mps_unlock(sc);
@@ -1268,20 +1490,35 @@ mps_intr_locked(void *data)
struct mps_command *cm = NULL;
uint8_t flags;
u_int pq;
+ MPI2_DIAG_RELEASE_REPLY *rel_rep;
+ mps_fw_diagnostic_buffer_t *pBuffer;
sc = (struct mps_softc *)data;
pq = sc->replypostindex;
+ mps_dprint(sc, MPS_TRACE,
+ "%s sc %p starting with replypostindex %u\n",
+ __func__, sc, sc->replypostindex);
for ( ;; ) {
cm = NULL;
- desc = &sc->post_queue[pq];
+ desc = &sc->post_queue[sc->replypostindex];
flags = desc->Default.ReplyFlags &
MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
|| (desc->Words.High == 0xffffffff))
break;
+ /* increment the replypostindex now, so that event handlers
+ * and cm completion handlers which decide to do a diag
+ * reset can zero it without it getting incremented again
+ * afterwards, and we break out of this loop on the next
+ * iteration since the reply post queue has been cleared to
+ * 0xFF and all descriptors look unused (which they are).
+ */
+ if (++sc->replypostindex >= sc->pqdepth)
+ sc->replypostindex = 0;
+
switch (flags) {
case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS:
cm = &sc->commands[desc->SCSIIOSuccess.SMID];
@@ -1323,8 +1560,32 @@ mps_intr_locked(void *data)
panic("Reply address out of range");
}
if (desc->AddressReply.SMID == 0) {
- mps_dispatch_event(sc, baddr,
- (MPI2_EVENT_NOTIFICATION_REPLY *) reply);
+ if (((MPI2_DEFAULT_REPLY *)reply)->Function ==
+ MPI2_FUNCTION_DIAG_BUFFER_POST) {
+ /*
+ * If SMID is 0 for Diag Buffer Post,
+ * this implies that the reply is due to
+ * a release function with a status that
+ * the buffer has been released. Set
+ * the buffer flags accordingly.
+ */
+ rel_rep =
+ (MPI2_DIAG_RELEASE_REPLY *)reply;
+ if (rel_rep->IOCStatus ==
+ MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED)
+ {
+ pBuffer =
+ &sc->fw_diag_buffer_list[
+ rel_rep->BufferType];
+ pBuffer->valid_data = TRUE;
+ pBuffer->owned_by_firmware =
+ FALSE;
+ pBuffer->immediate = FALSE;
+ }
+ } else
+ mps_dispatch_event(sc, baddr,
+ (MPI2_EVENT_NOTIFICATION_REPLY *)
+ reply);
} else {
cm = &sc->commands[desc->AddressReply.SMID];
cm->cm_reply = reply;
@@ -1349,14 +1610,13 @@ mps_intr_locked(void *data)
desc->Words.Low = 0xffffffff;
desc->Words.High = 0xffffffff;
- if (++pq >= sc->pqdepth)
- pq = 0;
}
if (pq != sc->replypostindex) {
- mps_dprint(sc, MPS_INFO, "writing postindex %d\n", pq);
- mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, pq);
- sc->replypostindex = pq;
+ mps_dprint(sc, MPS_TRACE,
+ "%s sc %p writing postindex %d\n",
+ __func__, sc, sc->replypostindex);
+ mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, sc->replypostindex);
}
return;
@@ -1379,6 +1639,28 @@ mps_dispatch_event(struct mps_softc *sc, uintptr_t data,
if (handled == 0)
device_printf(sc->mps_dev, "Unhandled event 0x%x\n", event);
+
+ /*
+ * This is the only place that the event/reply should be freed.
+ * Anything wanting to hold onto the event data should have
+ * already copied it into their own storage.
+ */
+ mps_free_reply(sc, data);
+}
+
+static void
+mps_reregister_events_complete(struct mps_softc *sc, struct mps_command *cm)
+{
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ if (cm->cm_reply)
+ mps_print_event(sc,
+ (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply);
+
+ mps_free_command(sc, cm);
+
+ /* next, send a port enable */
+ mpssas_startup(sc);
}
/*
@@ -1445,14 +1727,60 @@ mps_update_events(struct mps_softc *sc, struct mps_event_handle *handle,
error = mps_request_polled(sc, cm);
reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply;
- if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
+ if ((reply == NULL) ||
+ (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
error = ENXIO;
mps_print_event(sc, reply);
+ mps_dprint(sc, MPS_TRACE, "%s finished error %d\n", __func__, error);
mps_free_command(sc, cm);
return (error);
}
+static int
+mps_reregister_events(struct mps_softc *sc)
+{
+ MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
+ struct mps_command *cm;
+ struct mps_event_handle *eh;
+ int error, i;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ /* first, reregister events */
+
+ memset(sc->event_mask, 0xff, 16);
+
+ TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
+ for (i = 0; i < 16; i++)
+ sc->event_mask[i] &= ~eh->mask[i];
+ }
+
+ if ((cm = mps_alloc_command(sc)) == NULL)
+ return (EBUSY);
+ evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
+ evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
+ evtreq->MsgFlags = 0;
+ evtreq->SASBroadcastPrimitiveMasks = 0;
+#ifdef MPS_DEBUG_ALL_EVENTS
+ {
+ u_char fullmask[16];
+ memset(fullmask, 0x00, 16);
+ bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16);
+ }
+#else
+ bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16);
+#endif
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ cm->cm_complete = mps_reregister_events_complete;
+
+ error = mps_map_command(sc, cm);
+
+ mps_dprint(sc, MPS_TRACE, "%s finished with error %d\n", __func__, error);
+ return (error);
+}
+
int
mps_deregister_events(struct mps_softc *sc, struct mps_event_handle *handle)
{
@@ -1511,6 +1839,7 @@ mps_push_sge(struct mps_command *cm, void *sgep, size_t len, int segsleft)
MPI2_SGE_TRANSACTION_UNION *tc = sgep;
MPI2_SGE_SIMPLE64 *sge = sgep;
int error, type;
+ uint32_t saved_buf_len, saved_address_low, saved_address_high;
type = (tc->Flags & MPI2_SGE_FLAGS_ELEMENT_MASK);
@@ -1609,12 +1938,48 @@ mps_push_sge(struct mps_command *cm, void *sgep, size_t len, int segsleft)
if (segsleft == 1 && type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) {
/*
- * Last element of the last segment of the entire
- * buffer.
+ * If this is a bi-directional request, need to account for that
+ * here. Save the pre-filled sge values. These will be used
+ * either for the 2nd SGL or for a single direction SGL. If
+ * cm_out_len is non-zero, this is a bi-directional request, so
+ * fill in the OUT SGL first, then the IN SGL, otherwise just
+ * fill in the IN SGL. Note that at this time, when filling in
+ * 2 SGL's for a bi-directional request, they both use the same
+ * DMA buffer (same cm command).
*/
- sge->FlagsLength |= ((MPI2_SGE_FLAGS_LAST_ELEMENT |
+ saved_buf_len = sge->FlagsLength & 0x00FFFFFF;
+ saved_address_low = sge->Address.Low;
+ saved_address_high = sge->Address.High;
+ if (cm->cm_out_len) {
+ sge->FlagsLength = cm->cm_out_len |
+ ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_HOST_TO_IOC |
+ MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
+ MPI2_SGE_FLAGS_SHIFT);
+ cm->cm_sglsize -= len;
+ bcopy(sgep, cm->cm_sge, len);
+ cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge
+ + len);
+ }
+ sge->FlagsLength = saved_buf_len |
+ ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER |
- MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
+ MPI2_SGE_FLAGS_LAST_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_LIST |
+ MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
+ MPI2_SGE_FLAGS_SHIFT);
+ if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) {
+ sge->FlagsLength |=
+ ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
+ MPI2_SGE_FLAGS_SHIFT);
+ } else {
+ sge->FlagsLength |=
+ ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
+ MPI2_SGE_FLAGS_SHIFT);
+ }
+ sge->Address.Low = saved_address_low;
+ sge->Address.High = saved_address_high;
}
cm->cm_sglsize -= len;
@@ -1633,10 +1998,10 @@ mps_add_dmaseg(struct mps_command *cm, vm_paddr_t pa, size_t len, u_int flags,
MPI2_SGE_SIMPLE64 sge;
/*
- * This driver always uses 64-bit address elements for
- * simplicity.
+ * This driver always uses 64-bit address elements for simplicity.
*/
- flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_ADDRESS_SIZE;
+ flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT);
mps_from_u64(pa, &sge.Address);
@@ -1664,8 +2029,8 @@ mps_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
}
/*
- * Set up DMA direction flags. Note that we don't support
- * bi-directional transfers, with the exception of SMP passthrough.
+ * Set up DMA direction flags. Bi-directional requests are also handled
+ * here. In that case, both direction flags will be set.
*/
sflags = 0;
if (cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) {
@@ -1691,14 +2056,13 @@ mps_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
sflags |= MPI2_SGE_FLAGS_DIRECTION |
MPI2_SGE_FLAGS_END_OF_BUFFER;
} else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) {
- sflags |= MPI2_SGE_FLAGS_DIRECTION;
+ sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
dir = BUS_DMASYNC_PREWRITE;
} else
dir = BUS_DMASYNC_PREREAD;
for (i = 0; i < nsegs; i++) {
- if ((cm->cm_flags & MPS_CM_FLAGS_SMP_PASS)
- && (i != 0)) {
+ if ((cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) && (i != 0)) {
sflags &= ~MPI2_SGE_FLAGS_DIRECTION;
}
error = mps_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len,
@@ -1726,8 +2090,11 @@ mps_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize,
}
/*
+ * This is the routine to enqueue commands ansynchronously.
* Note that the only error path here is from bus_dmamap_load(), which can
- * return EINPROGRESS if it is waiting for resources.
+ * return EINPROGRESS if it is waiting for resources. Other than this, it's
+ * assumed that if you have a command in-hand, then you have enough credits
+ * to use it.
*/
int
mps_map_command(struct mps_softc *sc, struct mps_command *cm)
@@ -1752,7 +2119,58 @@ mps_map_command(struct mps_softc *sc, struct mps_command *cm)
MPI2_SGE_FLAGS_SHIFT;
sge->Address = 0;
}
- mps_enqueue_request(sc, cm);
+ mps_enqueue_request(sc, cm);
+ }
+
+ return (error);
+}
+
+/*
+ * This is the routine to enqueue commands synchronously. An error of
+ * EINPROGRESS from mps_map_command() is ignored since the command will
+ * be executed and enqueued automatically. Other errors come from msleep().
+ */
+int
+mps_wait_command(struct mps_softc *sc, struct mps_command *cm, int timeout)
+{
+ int error;
+
+ mtx_assert(&sc->mps_mtx, MA_OWNED);
+
+ cm->cm_complete = NULL;
+ cm->cm_flags |= MPS_CM_FLAGS_WAKEUP;
+ error = mps_map_command(sc, cm);
+ if ((error != 0) && (error != EINPROGRESS))
+ return (error);
+ error = msleep(cm, &sc->mps_mtx, 0, "mpswait", timeout);
+ if (error == EWOULDBLOCK)
+ error = ETIMEDOUT;
+ return (error);
+}
+
+/*
+ * This is the routine to enqueue a command synchonously and poll for
+ * completion. Its use should be rare.
+ */
+int
+mps_request_polled(struct mps_softc *sc, struct mps_command *cm)
+{
+ int error, timeout = 0;
+
+ error = 0;
+
+ cm->cm_flags |= MPS_CM_FLAGS_POLLED;
+ cm->cm_complete = NULL;
+ mps_map_command(sc, cm);
+
+ while ((cm->cm_flags & MPS_CM_FLAGS_COMPLETE) == 0) {
+ mps_intr_locked(sc);
+ DELAY(50 * 1000);
+ if (timeout++ > 1000) {
+ mps_dprint(sc, MPS_FAULT, "polling failed\n");
+ error = ETIMEDOUT;
+ break;
+ }
}
return (error);
@@ -1816,11 +2234,13 @@ mps_read_config_page(struct mps_softc *sc, struct mps_config_params *params)
cm->cm_complete = mps_config_complete;
return (mps_map_command(sc, cm));
} else {
- cm->cm_complete = NULL;
- cm->cm_flags |= MPS_CM_FLAGS_WAKEUP;
- if ((error = mps_map_command(sc, cm)) != 0)
+ error = mps_wait_command(sc, cm, 0);
+ if (error) {
+ mps_dprint(sc, MPS_FAULT,
+ "Error %d reading config page\n", error);
+ mps_free_command(sc, cm);
return (error);
- msleep(cm, &sc->mps_mtx, 0, "mpswait", 0);
+ }
mps_config_complete(sc, cm);
}
@@ -1853,10 +2273,14 @@ mps_config_complete(struct mps_softc *sc, struct mps_command *cm)
*/
if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
params->status = MPI2_IOCSTATUS_BUSY;
- goto bailout;
+ goto done;
}
reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (reply == NULL) {
+ params->status = MPI2_IOCSTATUS_BUSY;
+ goto done;
+ }
params->status = reply->IOCStatus;
if (params->hdr.Ext.ExtPageType != 0) {
params->hdr.Ext.ExtPageType = reply->ExtPageType;
@@ -1868,8 +2292,7 @@ mps_config_complete(struct mps_softc *sc, struct mps_command *cm)
params->hdr.Struct.PageVersion = reply->Header.PageVersion;
}
-bailout:
-
+done:
mps_free_command(sc, cm);
if (params->callback != NULL)
params->callback(sc, params);
diff --git a/sys/dev/mps/mps_config.c b/sys/dev/mps/mps_config.c
new file mode 100644
index 0000000..da1f3fd
--- /dev/null
+++ b/sys/dev/mps/mps_config.c
@@ -0,0 +1,1393 @@
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* TODO Move headers to mpsvar */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/sysctl.h>
+#include <sys/eventhandler.h>
+#include <sys/uio.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <dev/mps/mpi/mpi2_type.h>
+#include <dev/mps/mpi/mpi2.h>
+#include <dev/mps/mpi/mpi2_ioc.h>
+#include <dev/mps/mpi/mpi2_sas.h>
+#include <dev/mps/mpi/mpi2_cnfg.h>
+#include <dev/mps/mpi/mpi2_init.h>
+#include <dev/mps/mpi/mpi2_tool.h>
+#include <dev/mps/mps_ioctl.h>
+#include <dev/mps/mpsvar.h>
+
+/**
+ * mps_config_get_ioc_pg8 - obtain ioc page 8
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mps_config_get_ioc_pg8(struct mps_softc *sc, Mpi2ConfigReply_t *mpi_reply,
+ Mpi2IOCPage8_t *config_page)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mps_command *cm;
+ MPI2_CONFIG_PAGE_IOC_8 *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_IOC;
+ request->Header.PageNumber = 8;
+ request->Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mps_free_command(sc, cm);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_IOC;
+ request->Header.PageNumber = 8;
+ request->Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
+ request->Header.PageLength = mpi_reply->Header.PageLength;
+ cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc((cm->cm_length), M_MPT2, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, MIN(cm->cm_length, (sizeof(Mpi2IOCPage8_t))));
+
+out:
+ free(page, M_MPT2);
+ if (cm)
+ mps_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mps_config_get_man_pg10 - obtain Manufacturing Page 10 data and set flags
+ * accordingly. Currently, this page does not need to return to caller.
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mps_config_get_man_pg10(struct mps_softc *sc, Mpi2ConfigReply_t *mpi_reply)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mps_command *cm;
+ pMpi2ManufacturingPagePS_t page = NULL;
+ uint32_t *pPS_info;
+ uint8_t OEM_Value = 0;
+ int error = 0;
+ u16 ioc_status;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ request->Header.PageNumber = 10;
+ request->Header.PageVersion = MPI2_MANUFACTURING10_PAGEVERSION;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mps_free_command(sc, cm);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ request->Header.PageNumber = 10;
+ request->Header.PageVersion = MPI2_MANUFACTURING10_PAGEVERSION;
+ request->Header.PageLength = mpi_reply->Header.PageLength;
+ cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(MPS_MAN_PAGE10_SIZE, M_MPT2, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+
+ /*
+ * If OEM ID is unknown, fail the request.
+ */
+ sc->WD_hide_expose = MPS_WD_HIDE_ALWAYS;
+ OEM_Value = (uint8_t)(page->ProductSpecificInfo & 0x000000FF);
+ if (OEM_Value != MPS_WD_LSI_OEM) {
+ mps_dprint(sc, MPS_FAULT, "Unknown OEM value for WarpDrive "
+ "(0x%x)\n", OEM_Value);
+ error = ENXIO;
+ goto out;
+ }
+
+ /*
+ * Set the phys disks hide/expose value.
+ */
+ pPS_info = &page->ProductSpecificInfo;
+ sc->WD_hide_expose = (uint8_t)(pPS_info[5]);
+ sc->WD_hide_expose &= MPS_WD_HIDE_EXPOSE_MASK;
+ if ((sc->WD_hide_expose != MPS_WD_HIDE_ALWAYS) &&
+ (sc->WD_hide_expose != MPS_WD_EXPOSE_ALWAYS) &&
+ (sc->WD_hide_expose != MPS_WD_HIDE_IF_VOLUME)) {
+ mps_dprint(sc, MPS_FAULT, "Unknown value for WarpDrive "
+ "hide/expose: 0x%x\n", sc->WD_hide_expose);
+ error = ENXIO;
+ goto out;
+ }
+
+out:
+ free(page, M_MPT2);
+ if (cm)
+ mps_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mps_base_static_config_pages - static start of day config pages.
+ * @sc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mps_base_static_config_pages(struct mps_softc *sc)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ int retry;
+
+ retry = 0;
+ while (mps_config_get_ioc_pg8(sc, &mpi_reply, &sc->ioc_pg8)) {
+ retry++;
+ if (retry > 5) {
+ /* We need to Handle this situation */
+ /*FIXME*/
+ break;
+ }
+ }
+}
+
+/**
+ * mps_wd_config_pages - get info required to support WarpDrive. This needs to
+ * be called after discovery is complete to guarentee that IR info is there.
+ * @sc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mps_wd_config_pages(struct mps_softc *sc)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ pMpi2RaidVolPage0_t raid_vol_pg0 = NULL;
+ Mpi2RaidPhysDiskPage0_t phys_disk_pg0;
+ pMpi2RaidVol0PhysDisk_t pRVPD;
+ uint32_t stripe_size, phys_disk_page_address;
+ uint16_t block_size;
+ uint8_t index, stripe_exp = 0, block_exp = 0;
+
+ /*
+ * Get the WD settings from manufacturing page 10 if using a WD HBA.
+ * This will be used to determine if phys disks should always be
+ * hidden, hidden only if part of a WD volume, or never hidden. Also,
+ * get the WD RAID Volume info and fail if volume does not exist or if
+ * volume does not meet the requirements for a WD volume. No retry
+ * here. Just default to HIDE ALWAYS if man Page10 fails, or clear WD
+ * Valid flag if Volume info fails.
+ */
+ sc->WD_valid_config = FALSE;
+ if (sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) {
+ if (mps_config_get_man_pg10(sc, &mpi_reply)) {
+ mps_dprint(sc, MPS_FAULT,
+ "mps_config_get_man_pg10 failed! Using 0 (Hide "
+ "Always) for WarpDrive hide/expose value.\n");
+ sc->WD_hide_expose = MPS_WD_HIDE_ALWAYS;
+ }
+
+ /*
+ * Get first RAID Volume Page0 using GET_NEXT_HANDLE.
+ */
+ raid_vol_pg0 = malloc(sizeof(Mpi2RaidVolPage0_t) +
+ (sizeof(Mpi2RaidVol0PhysDisk_t) * MPS_MAX_DISKS_IN_VOL),
+ M_MPT2, M_ZERO | M_NOWAIT);
+ if (!raid_vol_pg0) {
+ printf("%s: page alloc failed\n", __func__);
+ goto out;
+ }
+
+ if (mps_config_get_raid_volume_pg0(sc, &mpi_reply, raid_vol_pg0,
+ 0x0000FFFF)) {
+ mps_dprint(sc, MPS_INFO,
+ "mps_config_get_raid_volume_pg0 failed! Assuming "
+ "WarpDrive IT mode.\n");
+ goto out;
+ }
+
+ /*
+ * Check for valid WD configuration:
+ * volume type is RAID0
+ * number of phys disks in the volume is no more than 8
+ */
+ if ((raid_vol_pg0->VolumeType != MPI2_RAID_VOL_TYPE_RAID0) ||
+ (raid_vol_pg0->NumPhysDisks > 8)) {
+ mps_dprint(sc, MPS_FAULT,
+ "Invalid WarpDrive configuration. Direct Drive I/O "
+ "will not be used.\n");
+ goto out;
+ }
+
+ /*
+ * Save the WD RAID data to be used during WD I/O.
+ */
+ sc->DD_max_lba = le64toh((uint64_t)raid_vol_pg0->MaxLBA.High <<
+ 32 | (uint64_t)raid_vol_pg0->MaxLBA.Low);
+ sc->DD_num_phys_disks = raid_vol_pg0->NumPhysDisks;
+ sc->DD_dev_handle = raid_vol_pg0->DevHandle;
+ sc->DD_stripe_size = raid_vol_pg0->StripeSize;
+ sc->DD_block_size = raid_vol_pg0->BlockSize;
+
+ /*
+ * Find power of 2 of stripe size and set this as the exponent.
+ * Fail if stripe size is 0.
+ */
+ stripe_size = raid_vol_pg0->StripeSize;
+ for (index = 0; index < 32; index++) {
+ if (stripe_size & 1)
+ break;
+ stripe_exp++;
+ stripe_size >>= 1;
+ }
+ if (index == 32) {
+ mps_dprint(sc, MPS_FAULT,
+ "RAID Volume's stripe size is 0. Direct Drive I/O "
+ "will not be used.\n");
+ goto out;
+ }
+ sc->DD_stripe_exponent = stripe_exp;
+
+ /*
+ * Find power of 2 of block size and set this as the exponent.
+ * Fail if block size is 0.
+ */
+ block_size = raid_vol_pg0->BlockSize;
+ for (index = 0; index < 16; index++) {
+ if (block_size & 1)
+ break;
+ block_exp++;
+ block_size >>= 1;
+ }
+ if (index == 16) {
+ mps_dprint(sc, MPS_FAULT,
+ "RAID Volume's block size is 0. Direct Drive I/O "
+ "will not be used.\n");
+ goto out;
+ }
+ sc->DD_block_exponent = block_exp;
+
+ /*
+ * Loop through all of the volume's Phys Disks to map the phys
+ * disk number into the columm map. This is used during Direct
+ * Drive I/O to send the request to the correct SSD.
+ */
+ pRVPD = (pMpi2RaidVol0PhysDisk_t)&raid_vol_pg0->PhysDisk;
+ for (index = 0; index < raid_vol_pg0->NumPhysDisks; index++) {
+ sc->DD_column_map[pRVPD->PhysDiskMap].phys_disk_num =
+ pRVPD->PhysDiskNum;
+ pRVPD++;
+ }
+
+ /*
+ * Get second RAID Volume Page0 using previous handle. This
+ * page should not exist. If it does, must not proceed with WD
+ * handling.
+ */
+ if (mps_config_get_raid_volume_pg0(sc, &mpi_reply,
+ raid_vol_pg0, (u32)raid_vol_pg0->DevHandle)) {
+ if (mpi_reply.IOCStatus !=
+ MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) {
+ mps_dprint(sc, MPS_FAULT,
+ "Multiple RAID Volume Page0! Direct Drive "
+ "I/O will not be used.\n");
+ goto out;
+ }
+ } else {
+ mps_dprint(sc, MPS_FAULT,
+ "Multiple volumes! Direct Drive I/O will not be "
+ "used.\n");
+ goto out;
+ }
+
+ /*
+ * Get RAID Volume Phys Disk Page 0 for all SSDs in the volume.
+ */
+ for (index = 0; index < raid_vol_pg0->NumPhysDisks; index++) {
+ phys_disk_page_address =
+ MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM +
+ sc->DD_column_map[index].phys_disk_num;
+ if (mps_config_get_raid_pd_pg0(sc, &mpi_reply,
+ &phys_disk_pg0, phys_disk_page_address)) {
+ mps_dprint(sc, MPS_FAULT,
+ "mps_config_get_raid_pd_pg0 failed! Direct "
+ "Drive I/O will not be used.\n");
+ goto out;
+ }
+ if (phys_disk_pg0.DevHandle == 0xFFFF) {
+ mps_dprint(sc, MPS_FAULT,
+ "Invalid Phys Disk DevHandle! Direct Drive "
+ "I/O will not be used.\n");
+ goto out;
+ }
+ sc->DD_column_map[index].dev_handle =
+ phys_disk_pg0.DevHandle;
+ }
+ sc->WD_valid_config = TRUE;
+out:
+ if (raid_vol_pg0)
+ free(raid_vol_pg0, M_MPT2);
+ }
+}
+
+/**
+ * mps_config_get_dpm_pg0 - obtain driver persistent mapping page0
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mps_config_get_dpm_pg0(struct mps_softc *sc, Mpi2ConfigReply_t *mpi_reply,
+ Mpi2DriverMappingPage0_t *config_page, u16 sz)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mps_command *cm;
+ Mpi2DriverMappingPage0_t *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ memset(config_page, 0, sz);
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_DRIVERMAPPING0_PAGEVERSION;
+ request->PageAddress = sc->max_dpm_entries <<
+ MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mps_free_command(sc, cm);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_NVRAM;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_DRIVERMAPPING0_PAGEVERSION;
+ request->PageAddress = sc->max_dpm_entries <<
+ MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT;
+ request->ExtPageLength = mpi_reply->ExtPageLength;
+ cm->cm_length = le16toh(request->ExtPageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(cm->cm_length, M_MPT2, M_ZERO|M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, MIN(cm->cm_length, sz));
+out:
+ free(page, M_MPT2);
+ if (cm)
+ mps_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mps_config_set_dpm_pg0 - write an entry in driver persistent mapping page0
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @entry_idx: entry index in DPM Page0 to be modified
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+
+int mps_config_set_dpm_pg0(struct mps_softc *sc, Mpi2ConfigReply_t *mpi_reply,
+ Mpi2DriverMappingPage0_t *config_page, u16 entry_idx)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mps_command *cm;
+ MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_DRIVERMAPPING0_PAGEVERSION;
+ request->PageAddress = 1 << MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT;
+ request->PageAddress |= htole16(entry_idx);
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mps_free_command(sc, cm);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_DRIVERMAPPING0_PAGEVERSION;
+ request->ExtPageLength = mpi_reply->ExtPageLength;
+ request->PageAddress = 1 << MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT;
+ request->PageAddress |= htole16(entry_idx);
+ cm->cm_length = le16toh(mpi_reply->ExtPageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAOUT;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(cm->cm_length, M_MPT2, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ bcopy(config_page, page, MIN(cm->cm_length,
+ (sizeof(Mpi2DriverMappingPage0_t))));
+ cm->cm_data = page;
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: page written with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+out:
+ free(page, M_MPT2);
+ if (cm)
+ mps_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mps_config_get_sas_device_pg0 - obtain sas device page 0
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mps_config_get_sas_device_pg0(struct mps_softc *sc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasDevicePage0_t *config_page, u32 form, u16 handle)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mps_command *cm;
+ Mpi2SasDevicePage0_t *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mps_free_command(sc, cm);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION;
+ request->ExtPageLength = mpi_reply->ExtPageLength;
+ request->PageAddress = htole32(form | handle);
+ cm->cm_length = le16toh(mpi_reply->ExtPageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(cm->cm_length, M_MPT2, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, MIN(cm->cm_length,
+ sizeof(Mpi2SasDevicePage0_t)));
+out:
+ free(page, M_MPT2);
+ if (cm)
+ mps_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mps_config_get_bios_pg3 - obtain BIOS page 3
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mps_config_get_bios_pg3(struct mps_softc *sc, Mpi2ConfigReply_t *mpi_reply,
+ Mpi2BiosPage3_t *config_page)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mps_command *cm;
+ Mpi2BiosPage3_t *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ request->Header.PageNumber = 3;
+ request->Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mps_free_command(sc, cm);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ request->Header.PageNumber = 3;
+ request->Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION;
+ request->Header.PageLength = mpi_reply->Header.PageLength;
+ cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(cm->cm_length, M_MPT2, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, MIN(cm->cm_length, sizeof(Mpi2BiosPage3_t)));
+out:
+ free(page, M_MPT2);
+ if (cm)
+ mps_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mps_config_get_raid_volume_pg0 - obtain raid volume page 0
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @page_address: form and handle value used to get page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mps_config_get_raid_volume_pg0(struct mps_softc *sc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 page_address)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mps_command *cm;
+ Mpi2RaidVolPage0_t *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mps_free_command(sc, cm);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ request->Header.PageNumber = 0;
+ request->Header.PageLength = mpi_reply->Header.PageLength;
+ request->Header.PageVersion = mpi_reply->Header.PageVersion;
+ request->PageAddress = page_address;
+ cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(cm->cm_length, M_MPT2, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, cm->cm_length);
+out:
+ free(page, M_MPT2);
+ if (cm)
+ mps_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mps_config_get_raid_volume_pg1 - obtain raid volume page 1
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: volume handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mps_config_get_raid_volume_pg1(struct mps_softc *sc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, u16 handle)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mps_command *cm;
+ Mpi2RaidVolPage1_t *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ request->Header.PageNumber = 1;
+ request->Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mps_free_command(sc, cm);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ request->Header.PageNumber = 1;
+ request->Header.PageLength = mpi_reply->Header.PageLength;
+ request->Header.PageVersion = mpi_reply->Header.PageVersion;
+ request->PageAddress = htole32(form | handle);
+ cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(cm->cm_length, M_MPT2, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, MIN(cm->cm_length,
+ sizeof(Mpi2RaidVolPage1_t)));
+out:
+ free(page, M_MPT2);
+ if (cm)
+ mps_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mps_config_get_volume_wwid - returns wwid given the volume handle
+ * @sc: per adapter object
+ * @volume_handle: volume handle
+ * @wwid: volume wwid
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mps_config_get_volume_wwid(struct mps_softc *sc, u16 volume_handle, u64 *wwid)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2RaidVolPage1_t raid_vol_pg1;
+
+ *wwid = 0;
+ if (!(mps_config_get_raid_volume_pg1(sc, &mpi_reply, &raid_vol_pg1,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, volume_handle))) {
+ *wwid = le64toh((u64)raid_vol_pg1.WWID.High << 32 |
+ raid_vol_pg1.WWID.Low);
+ return 0;
+ } else
+ return -1;
+}
+
+/**
+ * mps_config_get_pd_pg0 - obtain raid phys disk page 0
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @page_address: form and handle value used to get page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mps_config_get_raid_pd_pg0(struct mps_softc *sc, Mpi2ConfigReply_t *mpi_reply,
+ Mpi2RaidPhysDiskPage0_t *config_page, u32 page_address)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mps_command *cm;
+ Mpi2RaidPhysDiskPage0_t *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mps_free_command(sc, cm);
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK;
+ request->Header.PageNumber = 0;
+ request->Header.PageLength = mpi_reply->Header.PageLength;
+ request->Header.PageVersion = mpi_reply->Header.PageVersion;
+ request->PageAddress = page_address;
+ cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(cm->cm_length, M_MPT2, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+
+ error = mps_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, MIN(cm->cm_length,
+ sizeof(Mpi2RaidPhysDiskPage0_t)));
+out:
+ free(page, M_MPT2);
+ if (cm)
+ mps_free_command(sc, cm);
+ return (error);
+}
diff --git a/sys/dev/mps/mps_ioctl.h b/sys/dev/mps/mps_ioctl.h
index 811b13e..cc28337 100644
--- a/sys/dev/mps/mps_ioctl.h
+++ b/sys/dev/mps/mps_ioctl.h
@@ -31,6 +31,35 @@
*
* $FreeBSD$
*/
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
#ifndef _MPS_IOCTL_H_
#define _MPS_IOCTL_H_
@@ -93,8 +122,237 @@ struct mps_usr_command {
uint32_t flags;
};
-#define MPSIO_MPS_COMMAND_FLAG_VERBOSE 0x01
-#define MPSIO_MPS_COMMAND_FLAG_DEBUG 0x02
+typedef struct mps_pci_bits
+{
+ union {
+ struct {
+ uint32_t DeviceNumber :5;
+ uint32_t FunctionNumber :3;
+ uint32_t BusNumber :24;
+ } bits;
+ uint32_t AsDWORD;
+ } u;
+ uint32_t PciSegmentId;
+} mps_pci_bits_t;
+
+/*
+ * The following is the MPSIOCTL_GET_ADAPTER_DATA data structure. This data
+ * structure is setup so that we hopefully are properly aligned for both
+ * 32-bit and 64-bit mode applications.
+ *
+ * Adapter Type - Value = 4 = SCSI Protocol through SAS-2 adapter
+ *
+ * MPI Port Number - The PCI Function number for this device
+ *
+ * PCI Device HW Id - The PCI device number for this device
+ *
+ */
+#define MPSIOCTL_ADAPTER_TYPE_SAS2 4
+#define MPSIOCTL_ADAPTER_TYPE_SAS2_SSS6200 5
+typedef struct mps_adapter_data
+{
+ uint32_t StructureLength;
+ uint32_t AdapterType;
+ uint32_t MpiPortNumber;
+ uint32_t PCIDeviceHwId;
+ uint32_t PCIDeviceHwRev;
+ uint32_t SubSystemId;
+ uint32_t SubsystemVendorId;
+ uint32_t Reserved1;
+ uint32_t MpiFirmwareVersion;
+ uint32_t BiosVersion;
+ uint8_t DriverVersion[32];
+ uint8_t Reserved2;
+ uint8_t ScsiId;
+ uint16_t Reserved3;
+ mps_pci_bits_t PciInformation;
+} mps_adapter_data_t;
+
+
+typedef struct mps_update_flash
+{
+ uint64_t PtrBuffer;
+ uint32_t ImageChecksum;
+ uint32_t ImageOffset;
+ uint32_t ImageSize;
+ uint32_t ImageType;
+} mps_update_flash_t;
+
+
+#define MPS_PASS_THRU_DIRECTION_NONE 0
+#define MPS_PASS_THRU_DIRECTION_READ 1
+#define MPS_PASS_THRU_DIRECTION_WRITE 2
+#define MPS_PASS_THRU_DIRECTION_BOTH 3
+
+typedef struct mps_pass_thru
+{
+ uint64_t PtrRequest;
+ uint64_t PtrReply;
+ uint64_t PtrData;
+ uint32_t RequestSize;
+ uint32_t ReplySize;
+ uint32_t DataSize;
+ uint32_t DataDirection;
+ uint64_t PtrDataOut;
+ uint32_t DataOutSize;
+ uint32_t Timeout;
+} mps_pass_thru_t;
+
+
+/*
+ * Event queue defines
+ */
+#define MPS_EVENT_QUEUE_SIZE (50) /* Max Events stored in driver */
+#define MPS_MAX_EVENT_DATA_LENGTH (48) /* Size of each event in Dwords */
+
+typedef struct mps_event_query
+{
+ uint16_t Entries;
+ uint16_t Reserved;
+ uint32_t Types[4];
+} mps_event_query_t;
+
+typedef struct mps_event_enable
+{
+ uint32_t Types[4];
+} mps_event_enable_t;
+
+/*
+ * Event record entry for ioctl.
+ */
+typedef struct mps_event_entry
+{
+ uint32_t Type;
+ uint32_t Number;
+ uint32_t Data[MPS_MAX_EVENT_DATA_LENGTH];
+} mps_event_entry_t;
+
+typedef struct mps_event_report
+{
+ uint32_t Size;
+ uint64_t PtrEvents;
+} mps_event_report_t;
+
+
+typedef struct mps_pci_info
+{
+ uint32_t BusNumber;
+ uint8_t DeviceNumber;
+ uint8_t FunctionNumber;
+ uint16_t InterruptVector;
+ uint8_t PciHeader[256];
+} mps_pci_info_t;
+
+
+typedef struct mps_diag_action
+{
+ uint32_t Action;
+ uint32_t Length;
+ uint64_t PtrDiagAction;
+ uint32_t ReturnCode;
+} mps_diag_action_t;
+
+#define MPS_FW_DIAGNOSTIC_UID_NOT_FOUND (0xFF)
+
+#define MPS_FW_DIAG_NEW (0x806E6577)
+
+#define MPS_FW_DIAG_TYPE_REGISTER (0x00000001)
+#define MPS_FW_DIAG_TYPE_UNREGISTER (0x00000002)
+#define MPS_FW_DIAG_TYPE_QUERY (0x00000003)
+#define MPS_FW_DIAG_TYPE_READ_BUFFER (0x00000004)
+#define MPS_FW_DIAG_TYPE_RELEASE (0x00000005)
+
+#define MPS_FW_DIAG_INVALID_UID (0x00000000)
+
+#define MPS_DIAG_SUCCESS 0
+#define MPS_DIAG_FAILURE 1
+
+#define MPS_FW_DIAG_ERROR_SUCCESS (0x00000000)
+#define MPS_FW_DIAG_ERROR_FAILURE (0x00000001)
+#define MPS_FW_DIAG_ERROR_INVALID_PARAMETER (0x00000002)
+#define MPS_FW_DIAG_ERROR_POST_FAILED (0x00000010)
+#define MPS_FW_DIAG_ERROR_INVALID_UID (0x00000011)
+#define MPS_FW_DIAG_ERROR_RELEASE_FAILED (0x00000012)
+#define MPS_FW_DIAG_ERROR_NO_BUFFER (0x00000013)
+#define MPS_FW_DIAG_ERROR_ALREADY_RELEASED (0x00000014)
+
+
+typedef struct mps_fw_diag_register
+{
+ uint8_t ExtendedType;
+ uint8_t BufferType;
+ uint16_t ApplicationFlags;
+ uint32_t DiagnosticFlags;
+ uint32_t ProductSpecific[23];
+ uint32_t RequestedBufferSize;
+ uint32_t UniqueId;
+} mps_fw_diag_register_t;
+
+typedef struct mps_fw_diag_unregister
+{
+ uint32_t UniqueId;
+} mps_fw_diag_unregister_t;
+
+#define MPS_FW_DIAG_FLAG_APP_OWNED (0x0001)
+#define MPS_FW_DIAG_FLAG_BUFFER_VALID (0x0002)
+#define MPS_FW_DIAG_FLAG_FW_BUFFER_ACCESS (0x0004)
+
+typedef struct mps_fw_diag_query
+{
+ uint8_t ExtendedType;
+ uint8_t BufferType;
+ uint16_t ApplicationFlags;
+ uint32_t DiagnosticFlags;
+ uint32_t ProductSpecific[23];
+ uint32_t TotalBufferSize;
+ uint32_t DriverAddedBufferSize;
+ uint32_t UniqueId;
+} mps_fw_diag_query_t;
+
+typedef struct mps_fw_diag_release
+{
+ uint32_t UniqueId;
+} mps_fw_diag_release_t;
+
+#define MPS_FW_DIAG_FLAG_REREGISTER (0x0001)
+#define MPS_FW_DIAG_FLAG_FORCE_RELEASE (0x0002)
+
+typedef struct mps_diag_read_buffer
+{
+ uint8_t Status;
+ uint8_t Reserved;
+ uint16_t Flags;
+ uint32_t StartingOffset;
+ uint32_t BytesToRead;
+ uint32_t UniqueId;
+ uint64_t PtrDataBuffer;
+} mps_diag_read_buffer_t;
+
+/*
+ * Register Access
+ */
+#define REG_IO_READ 1
+#define REG_IO_WRITE 2
+#define REG_MEM_READ 3
+#define REG_MEM_WRITE 4
+
+typedef struct mps_reg_access
+{
+ uint32_t Command;
+ uint32_t RegOffset;
+ uint32_t RegData;
+} mps_reg_access_t;
+
+typedef struct mps_btdh_mapping
+{
+ uint16_t TargetID;
+ uint16_t Bus;
+ uint16_t DevHandle;
+ uint16_t Reserved;
+} mps_btdh_mapping_t;
+
+#define MPSIO_MPS_COMMAND_FLAG_VERBOSE 0x01
+#define MPSIO_MPS_COMMAND_FLAG_DEBUG 0x02
#define MPSIO_READ_CFG_HEADER _IOWR('M', 200, struct mps_cfg_page_req)
#define MPSIO_READ_CFG_PAGE _IOWR('M', 201, struct mps_cfg_page_req)
#define MPSIO_READ_EXT_CFG_HEADER _IOWR('M', 202, struct mps_ext_cfg_page_req)
@@ -103,4 +361,27 @@ struct mps_usr_command {
#define MPSIO_RAID_ACTION _IOWR('M', 205, struct mps_raid_action)
#define MPSIO_MPS_COMMAND _IOWR('M', 210, struct mps_usr_command)
+#define MPTIOCTL ('I')
+#define MPTIOCTL_GET_ADAPTER_DATA _IOWR(MPTIOCTL, 1,\
+ struct mps_adapter_data)
+#define MPTIOCTL_UPDATE_FLASH _IOWR(MPTIOCTL, 2,\
+ struct mps_update_flash)
+#define MPTIOCTL_RESET_ADAPTER _IO(MPTIOCTL, 3)
+#define MPTIOCTL_PASS_THRU _IOWR(MPTIOCTL, 4,\
+ struct mps_pass_thru)
+#define MPTIOCTL_EVENT_QUERY _IOWR(MPTIOCTL, 5,\
+ struct mps_event_query)
+#define MPTIOCTL_EVENT_ENABLE _IOWR(MPTIOCTL, 6,\
+ struct mps_event_enable)
+#define MPTIOCTL_EVENT_REPORT _IOWR(MPTIOCTL, 7,\
+ struct mps_event_report)
+#define MPTIOCTL_GET_PCI_INFO _IOWR(MPTIOCTL, 8,\
+ struct mps_pci_info)
+#define MPTIOCTL_DIAG_ACTION _IOWR(MPTIOCTL, 9,\
+ struct mps_diag_action)
+#define MPTIOCTL_REG_ACCESS _IOWR(MPTIOCTL, 10,\
+ struct mps_reg_access)
+#define MPTIOCTL_BTDH_MAPPING _IOWR(MPTIOCTL, 11,\
+ struct mps_btdh_mapping)
+
#endif /* !_MPS_IOCTL_H_ */
diff --git a/sys/dev/mps/mps_mapping.c b/sys/dev/mps/mps_mapping.c
new file mode 100644
index 0000000..e897dd3
--- /dev/null
+++ b/sys/dev/mps/mps_mapping.c
@@ -0,0 +1,2268 @@
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* TODO Move headers to mpsvar */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/sysctl.h>
+#include <sys/eventhandler.h>
+#include <sys/uio.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <dev/mps/mpi/mpi2_type.h>
+#include <dev/mps/mpi/mpi2.h>
+#include <dev/mps/mpi/mpi2_ioc.h>
+#include <dev/mps/mpi/mpi2_sas.h>
+#include <dev/mps/mpi/mpi2_cnfg.h>
+#include <dev/mps/mpi/mpi2_init.h>
+#include <dev/mps/mpi/mpi2_tool.h>
+#include <dev/mps/mps_ioctl.h>
+#include <dev/mps/mpsvar.h>
+#include <dev/mps/mps_mapping.h>
+
+/**
+ * _mapping_clear_entry - Clear a particular mapping entry.
+ * @map_entry: map table entry
+ *
+ * Returns nothing.
+ */
+static inline void
+_mapping_clear_map_entry(struct dev_mapping_table *map_entry)
+{
+ map_entry->physical_id = 0;
+ map_entry->device_info = 0;
+ map_entry->phy_bits = 0;
+ map_entry->dpm_entry_num = MPS_DPM_BAD_IDX;
+ map_entry->dev_handle = 0;
+ map_entry->channel = -1;
+ map_entry->id = -1;
+ map_entry->missing_count = 0;
+ map_entry->init_complete = 0;
+ map_entry->TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
+}
+
+/**
+ * _mapping_clear_enc_entry - Clear a particular enclosure table entry.
+ * @enc_entry: enclosure table entry
+ *
+ * Returns nothing.
+ */
+static inline void
+_mapping_clear_enc_entry(struct enc_mapping_table *enc_entry)
+{
+ enc_entry->enclosure_id = 0;
+ enc_entry->start_index = MPS_MAPTABLE_BAD_IDX;
+ enc_entry->phy_bits = 0;
+ enc_entry->dpm_entry_num = MPS_DPM_BAD_IDX;
+ enc_entry->enc_handle = 0;
+ enc_entry->num_slots = 0;
+ enc_entry->start_slot = 0;
+ enc_entry->missing_count = 0;
+ enc_entry->removal_flag = 0;
+ enc_entry->skip_search = 0;
+ enc_entry->init_complete = 0;
+}
+
+/**
+ * _mapping_commit_enc_entry - write a particular enc entry in DPM page0.
+ * @sc: per adapter object
+ * @enc_entry: enclosure table entry
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_mapping_commit_enc_entry(struct mps_softc *sc,
+ struct enc_mapping_table *et_entry)
+{
+ Mpi2DriverMap0Entry_t *dpm_entry;
+ struct dev_mapping_table *mt_entry;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2DriverMappingPage0_t config_page;
+
+ if (!sc->is_dpm_enable)
+ return 0;
+
+ memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t));
+ memcpy(&config_page.Header, (u8 *) sc->dpm_pg0,
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry += et_entry->dpm_entry_num;
+ dpm_entry->PhysicalIdentifier.Low =
+ ( 0xFFFFFFFF & et_entry->enclosure_id);
+ dpm_entry->PhysicalIdentifier.High =
+ ( et_entry->enclosure_id >> 32);
+ mt_entry = &sc->mapping_table[et_entry->start_index];
+ dpm_entry->DeviceIndex = htole16(mt_entry->id);
+ dpm_entry->MappingInformation = et_entry->num_slots;
+ dpm_entry->MappingInformation <<= MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
+ dpm_entry->MappingInformation |= et_entry->missing_count;
+ dpm_entry->MappingInformation = htole16(dpm_entry->MappingInformation);
+ dpm_entry->PhysicalBitsMapping = htole32(et_entry->phy_bits);
+ dpm_entry->Reserved1 = 0;
+
+ memcpy(&config_page.Entry, (u8 *)dpm_entry,
+ sizeof(Mpi2DriverMap0Entry_t));
+ if (mps_config_set_dpm_pg0(sc, &mpi_reply, &config_page,
+ et_entry->dpm_entry_num)) {
+ printf("%s: write of dpm entry %d for enclosure failed\n",
+ __func__, et_entry->dpm_entry_num);
+ dpm_entry->MappingInformation = le16toh(dpm_entry->
+ MappingInformation);
+ dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
+ dpm_entry->PhysicalBitsMapping =
+ le32toh(dpm_entry->PhysicalBitsMapping);
+ return -1;
+ }
+ dpm_entry->MappingInformation = le16toh(dpm_entry->
+ MappingInformation);
+ dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
+ dpm_entry->PhysicalBitsMapping =
+ le32toh(dpm_entry->PhysicalBitsMapping);
+ return 0;
+}
+
+/**
+ * _mapping_commit_map_entry - write a particular map table entry in DPM page0.
+ * @sc: per adapter object
+ * @enc_entry: enclosure table entry
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+
+static int
+_mapping_commit_map_entry(struct mps_softc *sc,
+ struct dev_mapping_table *mt_entry)
+{
+ Mpi2DriverMap0Entry_t *dpm_entry;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2DriverMappingPage0_t config_page;
+
+ if (!sc->is_dpm_enable)
+ return 0;
+
+ memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t));
+ memcpy(&config_page.Header, (u8 *)sc->dpm_pg0,
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *) sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry = dpm_entry + mt_entry->dpm_entry_num;
+ dpm_entry->PhysicalIdentifier.Low = (0xFFFFFFFF &
+ mt_entry->physical_id);
+ dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32);
+ dpm_entry->DeviceIndex = htole16(mt_entry->id);
+ dpm_entry->MappingInformation = htole16(mt_entry->missing_count);
+ dpm_entry->PhysicalBitsMapping = 0;
+ dpm_entry->Reserved1 = 0;
+ dpm_entry->MappingInformation = htole16(dpm_entry->MappingInformation);
+ memcpy(&config_page.Entry, (u8 *)dpm_entry,
+ sizeof(Mpi2DriverMap0Entry_t));
+ if (mps_config_set_dpm_pg0(sc, &mpi_reply, &config_page,
+ mt_entry->dpm_entry_num)) {
+ printf("%s: write of dpm entry %d for device failed\n",
+ __func__, mt_entry->dpm_entry_num);
+ dpm_entry->MappingInformation = le16toh(dpm_entry->
+ MappingInformation);
+ dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
+ return -1;
+ }
+
+ dpm_entry->MappingInformation = le16toh(dpm_entry->MappingInformation);
+ dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
+ return 0;
+}
+
+/**
+ * _mapping_get_ir_maprange - get start and end index for IR map range.
+ * @sc: per adapter object
+ * @start_idx: place holder for start index
+ * @end_idx: place holder for end index
+ *
+ * The IR volumes can be mapped either at start or end of the mapping table
+ * this function gets the detail of where IR volume mapping starts and ends
+ * in the device mapping table
+ *
+ * Returns nothing.
+ */
+static void
+_mapping_get_ir_maprange(struct mps_softc *sc, u32 *start_idx, u32 *end_idx)
+{
+ u16 volume_mapping_flags;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+
+ volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ if (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
+ *start_idx = 0;
+ if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0)
+ *start_idx = 1;
+ } else
+ *start_idx = sc->max_devices - sc->max_volumes;
+ *end_idx = *start_idx + sc->max_volumes - 1;
+}
+
+/**
+ * _mapping_get_enc_idx_from_id - get enclosure index from enclosure ID
+ * @sc: per adapter object
+ * @enc_id: enclosure logical identifier
+ *
+ * Returns the index of enclosure entry on success or bad index.
+ */
+static u8
+_mapping_get_enc_idx_from_id(struct mps_softc *sc, u64 enc_id,
+ u64 phy_bits)
+{
+ struct enc_mapping_table *et_entry;
+ u8 enc_idx = 0;
+
+ for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) {
+ et_entry = &sc->enclosure_table[enc_idx];
+ if ((et_entry->enclosure_id == le64toh(enc_id)) &&
+ (!et_entry->phy_bits || (et_entry->phy_bits &
+ le32toh(phy_bits))))
+ return enc_idx;
+ }
+ return MPS_ENCTABLE_BAD_IDX;
+}
+
+/**
+ * _mapping_get_enc_idx_from_handle - get enclosure index from handle
+ * @sc: per adapter object
+ * @enc_id: enclosure handle
+ *
+ * Returns the index of enclosure entry on success or bad index.
+ */
+static u8
+_mapping_get_enc_idx_from_handle(struct mps_softc *sc, u16 handle)
+{
+ struct enc_mapping_table *et_entry;
+ u8 enc_idx = 0;
+
+ for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) {
+ et_entry = &sc->enclosure_table[enc_idx];
+ if (et_entry->missing_count)
+ continue;
+ if (et_entry->enc_handle == handle)
+ return enc_idx;
+ }
+ return MPS_ENCTABLE_BAD_IDX;
+}
+
+/**
+ * _mapping_get_high_missing_et_idx - get missing enclosure index
+ * @sc: per adapter object
+ *
+ * Search through the enclosure table and identifies the enclosure entry
+ * with high missing count and returns it's index
+ *
+ * Returns the index of enclosure entry on success or bad index.
+ */
+static u8
+_mapping_get_high_missing_et_idx(struct mps_softc *sc)
+{
+ struct enc_mapping_table *et_entry;
+ u8 high_missing_count = 0;
+ u8 enc_idx, high_idx = MPS_ENCTABLE_BAD_IDX;
+
+ for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) {
+ et_entry = &sc->enclosure_table[enc_idx];
+ if ((et_entry->missing_count > high_missing_count) &&
+ !et_entry->skip_search) {
+ high_missing_count = et_entry->missing_count;
+ high_idx = enc_idx;
+ }
+ }
+ return high_idx;
+}
+
+/**
+ * _mapping_get_high_missing_mt_idx - get missing map table index
+ * @sc: per adapter object
+ *
+ * Search through the map table and identifies the device entry
+ * with high missing count and returns it's index
+ *
+ * Returns the index of map table entry on success or bad index.
+ */
+static u32
+_mapping_get_high_missing_mt_idx(struct mps_softc *sc)
+{
+ u32 map_idx, high_idx = MPS_ENCTABLE_BAD_IDX;
+ u8 high_missing_count = 0;
+ u32 start_idx, end_idx, start_idx_ir, end_idx_ir;
+ struct dev_mapping_table *mt_entry;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+
+ start_idx = 0;
+ end_idx = sc->max_devices;
+ if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0)
+ start_idx = 1;
+ if (sc->ir_firmware)
+ _mapping_get_ir_maprange(sc, &start_idx_ir, &end_idx_ir);
+ if (start_idx == start_idx_ir)
+ start_idx = end_idx_ir + 1;
+ else
+ end_idx = start_idx_ir;
+ mt_entry = &sc->mapping_table[start_idx];
+ for (map_idx = start_idx; map_idx < end_idx; map_idx++, mt_entry++) {
+ if (mt_entry->missing_count > high_missing_count) {
+ high_missing_count = mt_entry->missing_count;
+ high_idx = map_idx;
+ }
+ }
+ return high_idx;
+}
+
+/**
+ * _mapping_get_ir_mt_idx_from_wwid - get map table index from volume WWID
+ * @sc: per adapter object
+ * @wwid: world wide unique ID of the volume
+ *
+ * Returns the index of map table entry on success or bad index.
+ */
+static u32
+_mapping_get_ir_mt_idx_from_wwid(struct mps_softc *sc, u64 wwid)
+{
+ u32 start_idx, end_idx, map_idx;
+ struct dev_mapping_table *mt_entry;
+
+ _mapping_get_ir_maprange(sc, &start_idx, &end_idx);
+ mt_entry = &sc->mapping_table[start_idx];
+ for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++)
+ if (mt_entry->physical_id == wwid)
+ return map_idx;
+
+ return MPS_MAPTABLE_BAD_IDX;
+}
+
+/**
+ * _mapping_get_mt_idx_from_id - get map table index from a device ID
+ * @sc: per adapter object
+ * @dev_id: device identifer (SAS Address)
+ *
+ * Returns the index of map table entry on success or bad index.
+ */
+static u32
+_mapping_get_mt_idx_from_id(struct mps_softc *sc, u64 dev_id)
+{
+ u32 map_idx;
+ struct dev_mapping_table *mt_entry;
+
+ for (map_idx = 0; map_idx < sc->max_devices; map_idx++) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if (mt_entry->physical_id == dev_id)
+ return map_idx;
+ }
+ return MPS_MAPTABLE_BAD_IDX;
+}
+
+/**
+ * _mapping_get_ir_mt_idx_from_handle - get map table index from volume handle
+ * @sc: per adapter object
+ * @wwid: volume device handle
+ *
+ * Returns the index of map table entry on success or bad index.
+ */
+static u32
+_mapping_get_ir_mt_idx_from_handle(struct mps_softc *sc, u16 volHandle)
+{
+ u32 start_idx, end_idx, map_idx;
+ struct dev_mapping_table *mt_entry;
+
+ _mapping_get_ir_maprange(sc, &start_idx, &end_idx);
+ mt_entry = &sc->mapping_table[start_idx];
+ for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++)
+ if (mt_entry->dev_handle == volHandle)
+ return map_idx;
+
+ return MPS_MAPTABLE_BAD_IDX;
+}
+
+/**
+ * _mapping_get_mt_idx_from_handle - get map table index from handle
+ * @sc: per adapter object
+ * @dev_id: device handle
+ *
+ * Returns the index of map table entry on success or bad index.
+ */
+static u32
+_mapping_get_mt_idx_from_handle(struct mps_softc *sc, u16 handle)
+{
+ u32 map_idx;
+ struct dev_mapping_table *mt_entry;
+
+ for (map_idx = 0; map_idx < sc->max_devices; map_idx++) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if (mt_entry->dev_handle == handle)
+ return map_idx;
+ }
+ return MPS_MAPTABLE_BAD_IDX;
+}
+
+/**
+ * _mapping_get_free_ir_mt_idx - get first free index for a volume
+ * @sc: per adapter object
+ *
+ * Search through mapping table for free index for a volume and if no free
+ * index then looks for a volume with high mapping index
+ *
+ * Returns the index of map table entry on success or bad index.
+ */
+static u32
+_mapping_get_free_ir_mt_idx(struct mps_softc *sc)
+{
+ u8 high_missing_count = 0;
+ u32 start_idx, end_idx, map_idx;
+ u32 high_idx = MPS_MAPTABLE_BAD_IDX;
+ struct dev_mapping_table *mt_entry;
+
+ _mapping_get_ir_maprange(sc, &start_idx, &end_idx);
+
+ mt_entry = &sc->mapping_table[start_idx];
+ for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++)
+ if (!(mt_entry->device_info & MPS_MAP_IN_USE))
+ return map_idx;
+
+ mt_entry = &sc->mapping_table[start_idx];
+ for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) {
+ if (mt_entry->missing_count > high_missing_count) {
+ high_missing_count = mt_entry->missing_count;
+ high_idx = map_idx;
+ }
+ }
+ return high_idx;
+}
+
+/**
+ * _mapping_get_free_mt_idx - get first free index for a device
+ * @sc: per adapter object
+ * @start_idx: offset in the table to start search
+ *
+ * Returns the index of map table entry on success or bad index.
+ */
+static u32
+_mapping_get_free_mt_idx(struct mps_softc *sc, u32 start_idx)
+{
+ u32 map_idx, max_idx = sc->max_devices;
+ struct dev_mapping_table *mt_entry = &sc->mapping_table[start_idx];
+ u16 volume_mapping_flags;
+
+ volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ if (sc->ir_firmware && (volume_mapping_flags ==
+ MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING))
+ max_idx -= sc->max_volumes;
+ for (map_idx = start_idx; map_idx < max_idx; map_idx++, mt_entry++)
+ if (!(mt_entry->device_info & (MPS_MAP_IN_USE |
+ MPS_DEV_RESERVED)))
+ return map_idx;
+
+ return MPS_MAPTABLE_BAD_IDX;
+}
+
+/**
+ * _mapping_get_dpm_idx_from_id - get DPM index from ID
+ * @sc: per adapter object
+ * @id: volume WWID or enclosure ID or device ID
+ *
+ * Returns the index of DPM entry on success or bad index.
+ */
+static u16
+_mapping_get_dpm_idx_from_id(struct mps_softc *sc, u64 id, u32 phy_bits)
+{
+ u16 entry_num;
+ uint64_t PhysicalIdentifier;
+ Mpi2DriverMap0Entry_t *dpm_entry;
+
+ dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ PhysicalIdentifier = dpm_entry->PhysicalIdentifier.High;
+ PhysicalIdentifier = (PhysicalIdentifier << 32) |
+ dpm_entry->PhysicalIdentifier.Low;
+ for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++,
+ dpm_entry++)
+ if ((id == PhysicalIdentifier) &&
+ (!phy_bits || !dpm_entry->PhysicalBitsMapping ||
+ (phy_bits & dpm_entry->PhysicalBitsMapping)))
+ return entry_num;
+
+ return MPS_DPM_BAD_IDX;
+}
+
+
+/**
+ * _mapping_get_free_dpm_idx - get first available DPM index
+ * @sc: per adapter object
+ *
+ * Returns the index of DPM entry on success or bad index.
+ */
+static u32
+_mapping_get_free_dpm_idx(struct mps_softc *sc)
+{
+ u16 entry_num;
+
+ for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) {
+ if (!sc->dpm_entry_used[entry_num])
+ return entry_num;
+ }
+ return MPS_DPM_BAD_IDX;
+}
+
+/**
+ * _mapping_update_ir_missing_cnt - Updates missing count for a volume
+ * @sc: per adapter object
+ * @map_idx: map table index of the volume
+ * @element: IR configuration change element
+ * @wwid: IR volume ID.
+ *
+ * Updates the missing count in the map table and in the DPM entry for a volume
+ *
+ * Returns nothing.
+ */
+static void
+_mapping_update_ir_missing_cnt(struct mps_softc *sc, u32 map_idx,
+ Mpi2EventIrConfigElement_t *element, u64 wwid)
+{
+ struct dev_mapping_table *mt_entry;
+ u8 missing_cnt, reason = element->ReasonCode;
+ u16 dpm_idx;
+ Mpi2DriverMap0Entry_t *dpm_entry;
+
+ if (!sc->is_dpm_enable)
+ return;
+ mt_entry = &sc->mapping_table[map_idx];
+ if (reason == MPI2_EVENT_IR_CHANGE_RC_ADDED) {
+ mt_entry->missing_count = 0;
+ } else if (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED) {
+ mt_entry->missing_count = 0;
+ mt_entry->init_complete = 0;
+ } else if ((reason == MPI2_EVENT_IR_CHANGE_RC_REMOVED) ||
+ (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED)) {
+ if (!mt_entry->init_complete) {
+ if (mt_entry->missing_count < MPS_MAX_MISSING_COUNT)
+ mt_entry->missing_count++;
+ else
+ mt_entry->init_complete = 1;
+ }
+ if (!mt_entry->missing_count)
+ mt_entry->missing_count++;
+ mt_entry->dev_handle = 0;
+ }
+
+ dpm_idx = mt_entry->dpm_entry_num;
+ if (dpm_idx == MPS_DPM_BAD_IDX) {
+ if ((reason == MPI2_EVENT_IR_CHANGE_RC_ADDED) ||
+ (reason == MPI2_EVENT_IR_CHANGE_RC_REMOVED))
+ dpm_idx = _mapping_get_dpm_idx_from_id(sc,
+ mt_entry->physical_id, 0);
+ else if (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED)
+ return;
+ }
+ if (dpm_idx != MPS_DPM_BAD_IDX) {
+ dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry += dpm_idx;
+ missing_cnt = dpm_entry->MappingInformation &
+ MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
+ if ((mt_entry->physical_id ==
+ le64toh((u64)dpm_entry->PhysicalIdentifier.High |
+ dpm_entry->PhysicalIdentifier.Low)) && (missing_cnt ==
+ mt_entry->missing_count))
+ mt_entry->init_complete = 1;
+ } else {
+ dpm_idx = _mapping_get_free_dpm_idx(sc);
+ mt_entry->init_complete = 0;
+ }
+
+ if ((dpm_idx != MPS_DPM_BAD_IDX) && !mt_entry->init_complete) {
+ mt_entry->init_complete = 1;
+ mt_entry->dpm_entry_num = dpm_idx;
+ dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry += dpm_idx;
+ dpm_entry->PhysicalIdentifier.Low =
+ (0xFFFFFFFF & mt_entry->physical_id);
+ dpm_entry->PhysicalIdentifier.High =
+ (mt_entry->physical_id >> 32);
+ dpm_entry->DeviceIndex = map_idx;
+ dpm_entry->MappingInformation = mt_entry->missing_count;
+ dpm_entry->PhysicalBitsMapping = 0;
+ dpm_entry->Reserved1 = 0;
+ sc->dpm_flush_entry[dpm_idx] = 1;
+ sc->dpm_entry_used[dpm_idx] = 1;
+ } else if (dpm_idx == MPS_DPM_BAD_IDX) {
+ printf("%s: no space to add entry in DPM table\n", __func__);
+ mt_entry->init_complete = 1;
+ }
+}
+
+/**
+ * _mapping_add_to_removal_table - mark an entry for removal
+ * @sc: per adapter object
+ * @handle: Handle of enclosures/device/volume
+ *
+ * Adds the handle or DPM entry number in removal table.
+ *
+ * Returns nothing.
+ */
+static void
+_mapping_add_to_removal_table(struct mps_softc *sc, u16 handle,
+ u16 dpm_idx)
+{
+ struct map_removal_table *remove_entry;
+ u32 i;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+
+ remove_entry = sc->removal_table;
+
+ for (i = 0; i < sc->max_devices; i++, remove_entry++) {
+ if (remove_entry->dev_handle || remove_entry->dpm_entry_num !=
+ MPS_DPM_BAD_IDX)
+ continue;
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
+ if (dpm_idx)
+ remove_entry->dpm_entry_num = dpm_idx;
+ if (remove_entry->dpm_entry_num == MPS_DPM_BAD_IDX)
+ remove_entry->dev_handle = handle;
+ } else if ((ioc_pg8_flags &
+ MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING)
+ remove_entry->dev_handle = handle;
+ break;
+ }
+
+}
+
+/**
+ * _mapping_update_missing_count - Update missing count for a device
+ * @sc: per adapter object
+ * @topo_change: Topology change event entry
+ *
+ * Search through the topology change list and if any device is found not
+ * responding it's associated map table entry and DPM entry is updated
+ *
+ * Returns nothing.
+ */
+static void
+_mapping_update_missing_count(struct mps_softc *sc,
+ struct _map_topology_change *topo_change)
+{
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ u8 entry;
+ struct _map_phy_change *phy_change;
+ u32 map_idx;
+ struct dev_mapping_table *mt_entry;
+ Mpi2DriverMap0Entry_t *dpm_entry;
+
+ for (entry = 0; entry < topo_change->num_entries; entry++) {
+ phy_change = &topo_change->phy_details[entry];
+ if (!phy_change->dev_handle || (phy_change->reason !=
+ MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
+ continue;
+ map_idx = _mapping_get_mt_idx_from_handle(sc, phy_change->
+ dev_handle);
+ phy_change->is_processed = 1;
+ if (map_idx == MPS_MAPTABLE_BAD_IDX) {
+ printf("%s: device is already removed from mapping "
+ "table\n", __func__);
+ continue;
+ }
+ mt_entry = &sc->mapping_table[map_idx];
+ if (!mt_entry->init_complete) {
+ if (mt_entry->missing_count < MPS_MAX_MISSING_COUNT)
+ mt_entry->missing_count++;
+ else
+ mt_entry->init_complete = 1;
+ }
+ if (!mt_entry->missing_count)
+ mt_entry->missing_count++;
+ _mapping_add_to_removal_table(sc, mt_entry->dev_handle, 0);
+ mt_entry->dev_handle = 0;
+
+ if (((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) &&
+ sc->is_dpm_enable && !mt_entry->init_complete &&
+ mt_entry->dpm_entry_num != MPS_DPM_BAD_IDX) {
+ dpm_entry =
+ (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry += mt_entry->dpm_entry_num;
+ dpm_entry->MappingInformation = mt_entry->missing_count;
+ sc->dpm_flush_entry[mt_entry->dpm_entry_num] = 1;
+ }
+ mt_entry->init_complete = 1;
+ }
+}
+
+/**
+ * _mapping_find_enc_map_space -find map table entries for enclosure
+ * @sc: per adapter object
+ * @et_entry: enclosure entry
+ *
+ * Search through the mapping table defragment it and provide contiguous
+ * space in map table for a particular enclosure entry
+ *
+ * Returns start index in map table or bad index.
+ */
+static u32
+_mapping_find_enc_map_space(struct mps_softc *sc,
+ struct enc_mapping_table *et_entry)
+{
+ u16 vol_mapping_flags;
+ u32 skip_count, end_of_table, map_idx, enc_idx;
+ u16 num_found;
+ u32 start_idx = MPS_MAPTABLE_BAD_IDX;
+ struct dev_mapping_table *mt_entry;
+ struct enc_mapping_table *enc_entry;
+ unsigned char done_flag = 0, found_space;
+ u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs);
+
+ skip_count = sc->num_rsvd_entries;
+ num_found = 0;
+
+ vol_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+
+ if (!sc->ir_firmware)
+ end_of_table = sc->max_devices;
+ else if (vol_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING)
+ end_of_table = sc->max_devices;
+ else
+ end_of_table = sc->max_devices - sc->max_volumes;
+
+ for (map_idx = (max_num_phy_ids + skip_count);
+ map_idx < end_of_table; map_idx++) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if ((et_entry->enclosure_id == mt_entry->physical_id) &&
+ (!mt_entry->phy_bits || (mt_entry->phy_bits &
+ et_entry->phy_bits))) {
+ num_found += 1;
+ if (num_found == et_entry->num_slots) {
+ start_idx = (map_idx - num_found) + 1;
+ return start_idx;
+ }
+ } else
+ num_found = 0;
+ }
+ for (map_idx = (max_num_phy_ids + skip_count);
+ map_idx < end_of_table; map_idx++) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if (!(mt_entry->device_info & MPS_DEV_RESERVED)) {
+ num_found += 1;
+ if (num_found == et_entry->num_slots) {
+ start_idx = (map_idx - num_found) + 1;
+ return start_idx;
+ }
+ } else
+ num_found = 0;
+ }
+
+ while (!done_flag) {
+ enc_idx = _mapping_get_high_missing_et_idx(sc);
+ if (enc_idx == MPS_ENCTABLE_BAD_IDX)
+ return MPS_MAPTABLE_BAD_IDX;
+ enc_entry = &sc->enclosure_table[enc_idx];
+ /*VSP FIXME*/
+ enc_entry->skip_search = 1;
+ mt_entry = &sc->mapping_table[enc_entry->start_index];
+ for (map_idx = enc_entry->start_index; map_idx <
+ (enc_entry->start_index + enc_entry->num_slots); map_idx++,
+ mt_entry++)
+ mt_entry->device_info &= ~MPS_DEV_RESERVED;
+ found_space = 0;
+ for (map_idx = (max_num_phy_ids +
+ skip_count); map_idx < end_of_table; map_idx++) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if (!(mt_entry->device_info & MPS_DEV_RESERVED)) {
+ num_found += 1;
+ if (num_found == et_entry->num_slots) {
+ start_idx = (map_idx - num_found) + 1;
+ found_space = 1;
+ }
+ } else
+ num_found = 0;
+ }
+
+ if (!found_space)
+ continue;
+ for (map_idx = start_idx; map_idx < (start_idx + num_found);
+ map_idx++) {
+ enc_entry = sc->enclosure_table;
+ for (enc_idx = 0; enc_idx < sc->num_enc_table_entries;
+ enc_idx++, enc_entry++) {
+ if (map_idx < enc_entry->start_index ||
+ map_idx > (enc_entry->start_index +
+ enc_entry->num_slots))
+ continue;
+ if (!enc_entry->removal_flag) {
+ enc_entry->removal_flag = 1;
+ _mapping_add_to_removal_table(sc, 0,
+ enc_entry->dpm_entry_num);
+ }
+ mt_entry = &sc->mapping_table[map_idx];
+ if (mt_entry->device_info &
+ MPS_MAP_IN_USE) {
+ _mapping_add_to_removal_table(sc,
+ mt_entry->dev_handle, 0);
+ _mapping_clear_map_entry(mt_entry);
+ }
+ if (map_idx == (enc_entry->start_index +
+ enc_entry->num_slots - 1))
+ _mapping_clear_enc_entry(et_entry);
+ }
+ }
+ enc_entry = sc->enclosure_table;
+ for (enc_idx = 0; enc_idx < sc->num_enc_table_entries;
+ enc_idx++, enc_entry++) {
+ if (!enc_entry->removal_flag) {
+ mt_entry = &sc->mapping_table[enc_entry->
+ start_index];
+ for (map_idx = enc_entry->start_index; map_idx <
+ (enc_entry->start_index +
+ enc_entry->num_slots); map_idx++,
+ mt_entry++)
+ mt_entry->device_info |=
+ MPS_DEV_RESERVED;
+ et_entry->skip_search = 0;
+ }
+ }
+ done_flag = 1;
+ }
+ return start_idx;
+}
+
+/**
+ * _mapping_get_dev_info -get information about newly added devices
+ * @sc: per adapter object
+ * @topo_change: Topology change event entry
+ *
+ * Search through the topology change event list and issues sas device pg0
+ * requests for the newly added device and reserved entries in tables
+ *
+ * Returns nothing
+ */
+static void
+_mapping_get_dev_info(struct mps_softc *sc,
+ struct _map_topology_change *topo_change)
+{
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u8 entry, enc_idx, phy_idx;
+ u32 map_idx, index, device_info;
+ struct _map_phy_change *phy_change, *tmp_phy_change;
+ uint64_t sas_address;
+ struct enc_mapping_table *et_entry;
+ struct dev_mapping_table *mt_entry;
+ u8 add_code = MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED;
+ int rc;
+
+ for (entry = 0; entry < topo_change->num_entries; entry++) {
+ phy_change = &topo_change->phy_details[entry];
+ if (phy_change->is_processed || !phy_change->dev_handle ||
+ phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED)
+ continue;
+ if (mps_config_get_sas_device_pg0(sc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ phy_change->dev_handle)) {
+ phy_change->is_processed = 1;
+ continue;
+ }
+
+ device_info = le32toh(sas_device_pg0.DeviceInfo);
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
+ if ((device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE) &&
+ (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)) {
+ rc = mpssas_get_sas_address_for_sata_disk(sc,
+ &sas_address, phy_change->dev_handle,
+ device_info);
+ if (rc) {
+ printf("%s: failed to compute the "
+ "hashed SAS Address for SATA "
+ "device with handle 0x%04x\n",
+ __func__, phy_change->dev_handle);
+ sas_address =
+ sas_device_pg0.SASAddress.High;
+ sas_address = (sas_address << 32) |
+ sas_device_pg0.SASAddress.Low;
+ }
+ mps_dprint(sc, MPS_INFO, "SAS Address for SATA "
+ "device = %jx\n", sas_address);
+ } else {
+ sas_address =
+ sas_device_pg0.SASAddress.High;
+ sas_address = (sas_address << 32) |
+ sas_device_pg0.SASAddress.Low;
+ }
+ } else {
+ sas_address = sas_device_pg0.SASAddress.High;
+ sas_address = (sas_address << 32) |
+ sas_device_pg0.SASAddress.Low;
+ }
+ phy_change->physical_id = sas_address;
+ phy_change->slot = le16toh(sas_device_pg0.Slot);
+ phy_change->device_info =
+ le32toh(sas_device_pg0.DeviceInfo);
+
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
+ enc_idx = _mapping_get_enc_idx_from_handle(sc,
+ topo_change->enc_handle);
+ if (enc_idx == MPS_ENCTABLE_BAD_IDX) {
+ phy_change->is_processed = 1;
+ printf("%s: failed to add the device with "
+ "handle 0x%04x because the enclosure is "
+ "not in the mapping table\n", __func__,
+ phy_change->dev_handle);
+ continue;
+ }
+ if (!((phy_change->device_info &
+ MPI2_SAS_DEVICE_INFO_END_DEVICE) &&
+ (phy_change->device_info &
+ (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
+ MPI2_SAS_DEVICE_INFO_STP_TARGET |
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))) {
+ phy_change->is_processed = 1;
+ continue;
+ }
+ et_entry = &sc->enclosure_table[enc_idx];
+ if (et_entry->start_index != MPS_MAPTABLE_BAD_IDX)
+ continue;
+ if (!topo_change->exp_handle) {
+ map_idx = sc->num_rsvd_entries;
+ et_entry->start_index = map_idx;
+ } else {
+ map_idx = _mapping_find_enc_map_space(sc,
+ et_entry);
+ et_entry->start_index = map_idx;
+ if (et_entry->start_index ==
+ MPS_MAPTABLE_BAD_IDX) {
+ phy_change->is_processed = 1;
+ for (phy_idx = 0; phy_idx <
+ topo_change->num_entries;
+ phy_idx++) {
+ tmp_phy_change =
+ &topo_change->phy_details
+ [phy_idx];
+ if (tmp_phy_change->reason ==
+ add_code)
+ tmp_phy_change->
+ is_processed = 1;
+ }
+ break;
+ }
+ }
+ mt_entry = &sc->mapping_table[map_idx];
+ for (index = map_idx; index < (et_entry->num_slots
+ + map_idx); index++, mt_entry++) {
+ mt_entry->device_info = MPS_DEV_RESERVED;
+ mt_entry->physical_id = et_entry->enclosure_id;
+ mt_entry->phy_bits = et_entry->phy_bits;
+ }
+ }
+ }
+}
+
+/**
+ * _mapping_set_mid_to_eid -set map table data from enclosure table
+ * @sc: per adapter object
+ * @et_entry: enclosure entry
+ *
+ * Returns nothing
+ */
+static inline void
+_mapping_set_mid_to_eid(struct mps_softc *sc,
+ struct enc_mapping_table *et_entry)
+{
+ struct dev_mapping_table *mt_entry;
+ u16 slots = et_entry->num_slots, map_idx;
+ u32 start_idx = et_entry->start_index;
+ if (start_idx != MPS_MAPTABLE_BAD_IDX) {
+ mt_entry = &sc->mapping_table[start_idx];
+ for (map_idx = 0; map_idx < slots; map_idx++, mt_entry++)
+ mt_entry->physical_id = et_entry->enclosure_id;
+ }
+}
+
+/**
+ * _mapping_clear_removed_entries - mark the entries to be cleared
+ * @sc: per adapter object
+ *
+ * Search through the removal table and mark the entries which needs to be
+ * flushed to DPM and also updates the map table and enclosure table by
+ * clearing the corresponding entries.
+ *
+ * Returns nothing
+ */
+static void
+_mapping_clear_removed_entries(struct mps_softc *sc)
+{
+ u32 remove_idx;
+ struct map_removal_table *remove_entry;
+ Mpi2DriverMap0Entry_t *dpm_entry;
+ u8 done_flag = 0, num_entries, m, i;
+ struct enc_mapping_table *et_entry, *from, *to;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+
+ if (sc->is_dpm_enable) {
+ remove_entry = sc->removal_table;
+ for (remove_idx = 0; remove_idx < sc->max_devices;
+ remove_idx++, remove_entry++) {
+ if (remove_entry->dpm_entry_num != MPS_DPM_BAD_IDX) {
+ dpm_entry = (Mpi2DriverMap0Entry_t *)
+ ((u8 *) sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry += remove_entry->dpm_entry_num;
+ dpm_entry->PhysicalIdentifier.Low = 0;
+ dpm_entry->PhysicalIdentifier.High = 0;
+ dpm_entry->DeviceIndex = 0;
+ dpm_entry->MappingInformation = 0;
+ dpm_entry->PhysicalBitsMapping = 0;
+ sc->dpm_flush_entry[remove_entry->
+ dpm_entry_num] = 1;
+ sc->dpm_entry_used[remove_entry->dpm_entry_num]
+ = 0;
+ remove_entry->dpm_entry_num = MPS_DPM_BAD_IDX;
+ }
+ }
+ }
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
+ num_entries = sc->num_enc_table_entries;
+ while (!done_flag) {
+ done_flag = 1;
+ et_entry = sc->enclosure_table;
+ for (i = 0; i < num_entries; i++, et_entry++) {
+ if (!et_entry->enc_handle && et_entry->
+ init_complete) {
+ done_flag = 0;
+ if (i != (num_entries - 1)) {
+ from = &sc->enclosure_table
+ [i+1];
+ to = &sc->enclosure_table[i];
+ for (m = i; m < (num_entries -
+ 1); m++, from++, to++) {
+ _mapping_set_mid_to_eid
+ (sc, to);
+ *to = *from;
+ }
+ _mapping_clear_enc_entry(to);
+ sc->num_enc_table_entries--;
+ num_entries =
+ sc->num_enc_table_entries;
+ } else {
+ _mapping_clear_enc_entry
+ (et_entry);
+ sc->num_enc_table_entries--;
+ num_entries =
+ sc->num_enc_table_entries;
+ }
+ }
+ }
+ }
+ }
+}
+
+/**
+ * _mapping_add_new_device -Add the new device into mapping table
+ * @sc: per adapter object
+ * @topo_change: Topology change event entry
+ *
+ * Search through the topology change event list and updates map table,
+ * enclosure table and DPM pages for for the newly added devices.
+ *
+ * Returns nothing
+ */
+static void
+_mapping_add_new_device(struct mps_softc *sc,
+ struct _map_topology_change *topo_change)
+{
+ u8 enc_idx, missing_cnt, is_removed = 0;
+ u16 dpm_idx;
+ u32 search_idx, map_idx;
+ u32 entry;
+ struct dev_mapping_table *mt_entry;
+ struct enc_mapping_table *et_entry;
+ struct _map_phy_change *phy_change;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ Mpi2DriverMap0Entry_t *dpm_entry;
+ uint64_t temp64_var;
+ u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
+ u8 hdr_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER);
+ u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs);
+
+ for (entry = 0; entry < topo_change->num_entries; entry++) {
+ phy_change = &topo_change->phy_details[entry];
+ if (phy_change->is_processed)
+ continue;
+ if (phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED ||
+ !phy_change->dev_handle) {
+ phy_change->is_processed = 1;
+ continue;
+ }
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
+ enc_idx = _mapping_get_enc_idx_from_handle
+ (sc, topo_change->enc_handle);
+ if (enc_idx == MPS_ENCTABLE_BAD_IDX) {
+ phy_change->is_processed = 1;
+ printf("%s: failed to add the device with "
+ "handle 0x%04x because the enclosure is "
+ "not in the mapping table\n", __func__,
+ phy_change->dev_handle);
+ continue;
+ }
+ et_entry = &sc->enclosure_table[enc_idx];
+ if (et_entry->start_index == MPS_MAPTABLE_BAD_IDX) {
+ phy_change->is_processed = 1;
+ if (!sc->mt_full_retry) {
+ sc->mt_add_device_failed = 1;
+ continue;
+ }
+ printf("%s: failed to add the device with "
+ "handle 0x%04x because there is no free "
+ "space available in the mapping table\n",
+ __func__, phy_change->dev_handle);
+ continue;
+ }
+ map_idx = et_entry->start_index + phy_change->slot -
+ et_entry->start_slot;
+ mt_entry = &sc->mapping_table[map_idx];
+ mt_entry->physical_id = phy_change->physical_id;
+ mt_entry->channel = 0;
+ mt_entry->id = map_idx;
+ mt_entry->dev_handle = phy_change->dev_handle;
+ mt_entry->missing_count = 0;
+ mt_entry->dpm_entry_num = et_entry->dpm_entry_num;
+ mt_entry->device_info = phy_change->device_info |
+ (MPS_DEV_RESERVED | MPS_MAP_IN_USE);
+ if (sc->is_dpm_enable) {
+ dpm_idx = et_entry->dpm_entry_num;
+ if (dpm_idx == MPS_DPM_BAD_IDX)
+ dpm_idx = _mapping_get_dpm_idx_from_id
+ (sc, et_entry->enclosure_id,
+ et_entry->phy_bits);
+ if (dpm_idx == MPS_DPM_BAD_IDX) {
+ dpm_idx = _mapping_get_free_dpm_idx(sc);
+ if (dpm_idx != MPS_DPM_BAD_IDX) {
+ dpm_entry =
+ (Mpi2DriverMap0Entry_t *)
+ ((u8 *) sc->dpm_pg0 +
+ hdr_sz);
+ dpm_entry += dpm_idx;
+ dpm_entry->
+ PhysicalIdentifier.Low =
+ (0xFFFFFFFF &
+ et_entry->enclosure_id);
+ dpm_entry->
+ PhysicalIdentifier.High =
+ ( et_entry->enclosure_id
+ >> 32);
+ dpm_entry->DeviceIndex =
+ (U16)et_entry->start_index;
+ dpm_entry->MappingInformation =
+ et_entry->num_slots;
+ dpm_entry->MappingInformation
+ <<= map_shift;
+ dpm_entry->PhysicalBitsMapping
+ = et_entry->phy_bits;
+ et_entry->dpm_entry_num =
+ dpm_idx;
+ /* FIXME Do I need to set the dpm_idxin mt_entry too */
+ sc->dpm_entry_used[dpm_idx] = 1;
+ sc->dpm_flush_entry[dpm_idx] =
+ 1;
+ phy_change->is_processed = 1;
+ } else {
+ phy_change->is_processed = 1;
+ printf("%s: failed to add the "
+ "device with handle 0x%04x "
+ "to persistent table "
+ "because there is no free "
+ "space available\n",
+ __func__,
+ phy_change->dev_handle);
+ }
+ } else {
+ et_entry->dpm_entry_num = dpm_idx;
+ mt_entry->dpm_entry_num = dpm_idx;
+ }
+ }
+ /* FIXME Why not mt_entry too? */
+ et_entry->init_complete = 1;
+ } else if ((ioc_pg8_flags &
+ MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
+ map_idx = _mapping_get_mt_idx_from_id
+ (sc, phy_change->physical_id);
+ if (map_idx == MPS_MAPTABLE_BAD_IDX) {
+ search_idx = sc->num_rsvd_entries;
+ if (topo_change->exp_handle)
+ search_idx += max_num_phy_ids;
+ map_idx = _mapping_get_free_mt_idx(sc,
+ search_idx);
+ }
+ if (map_idx == MPS_MAPTABLE_BAD_IDX) {
+ map_idx = _mapping_get_high_missing_mt_idx(sc);
+ if (map_idx != MPS_MAPTABLE_BAD_IDX) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if (mt_entry->dev_handle) {
+ _mapping_add_to_removal_table
+ (sc, mt_entry->dev_handle,
+ 0);
+ is_removed = 1;
+ }
+ mt_entry->init_complete = 0;
+ }
+ }
+ if (map_idx != MPS_MAPTABLE_BAD_IDX) {
+ mt_entry = &sc->mapping_table[map_idx];
+ mt_entry->physical_id = phy_change->physical_id;
+ mt_entry->channel = 0;
+ mt_entry->id = map_idx;
+ mt_entry->dev_handle = phy_change->dev_handle;
+ mt_entry->missing_count = 0;
+ mt_entry->device_info = phy_change->device_info
+ | (MPS_DEV_RESERVED | MPS_MAP_IN_USE);
+ } else {
+ phy_change->is_processed = 1;
+ if (!sc->mt_full_retry) {
+ sc->mt_add_device_failed = 1;
+ continue;
+ }
+ printf("%s: failed to add the device with "
+ "handle 0x%04x because there is no free "
+ "space available in the mapping table\n",
+ __func__, phy_change->dev_handle);
+ continue;
+ }
+ if (sc->is_dpm_enable) {
+ if (mt_entry->dpm_entry_num !=
+ MPS_DPM_BAD_IDX) {
+ dpm_idx = mt_entry->dpm_entry_num;
+ dpm_entry = (Mpi2DriverMap0Entry_t *)
+ ((u8 *)sc->dpm_pg0 + hdr_sz);
+ dpm_entry += dpm_idx;
+ missing_cnt = dpm_entry->
+ MappingInformation &
+ MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
+ temp64_var = dpm_entry->
+ PhysicalIdentifier.High;
+ temp64_var = (temp64_var << 32) |
+ dpm_entry->PhysicalIdentifier.Low;
+ if ((mt_entry->physical_id ==
+ temp64_var) && !missing_cnt)
+ mt_entry->init_complete = 1;
+ } else {
+ dpm_idx = _mapping_get_free_dpm_idx(sc);
+ mt_entry->init_complete = 0;
+ }
+ if (dpm_idx != MPS_DPM_BAD_IDX &&
+ !mt_entry->init_complete) {
+ mt_entry->init_complete = 1;
+ mt_entry->dpm_entry_num = dpm_idx;
+ dpm_entry = (Mpi2DriverMap0Entry_t *)
+ ((u8 *)sc->dpm_pg0 + hdr_sz);
+ dpm_entry += dpm_idx;
+ dpm_entry->PhysicalIdentifier.Low =
+ (0xFFFFFFFF &
+ mt_entry->physical_id);
+ dpm_entry->PhysicalIdentifier.High =
+ (mt_entry->physical_id >> 32);
+ dpm_entry->DeviceIndex = (U16) map_idx;
+ dpm_entry->MappingInformation = 0;
+ dpm_entry->PhysicalBitsMapping = 0;
+ sc->dpm_entry_used[dpm_idx] = 1;
+ sc->dpm_flush_entry[dpm_idx] = 1;
+ phy_change->is_processed = 1;
+ } else if (dpm_idx == MPS_DPM_BAD_IDX) {
+ phy_change->is_processed = 1;
+ printf("%s: failed to add the "
+ "device with handle 0x%04x "
+ "to persistent table "
+ "because there is no free "
+ "space available\n",
+ __func__,
+ phy_change->dev_handle);
+ }
+ }
+ mt_entry->init_complete = 1;
+ }
+
+ phy_change->is_processed = 1;
+ }
+ if (is_removed)
+ _mapping_clear_removed_entries(sc);
+}
+
+/**
+ * _mapping_flush_dpm_pages -Flush the DPM pages to NVRAM
+ * @sc: per adapter object
+ *
+ * Returns nothing
+ */
+static void
+_mapping_flush_dpm_pages(struct mps_softc *sc)
+{
+ Mpi2DriverMap0Entry_t *dpm_entry;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2DriverMappingPage0_t config_page;
+ u16 entry_num;
+
+ for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) {
+ if (!sc->dpm_flush_entry[entry_num])
+ continue;
+ memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t));
+ memcpy(&config_page.Header, (u8 *)sc->dpm_pg0,
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry += entry_num;
+ dpm_entry->MappingInformation = htole16(dpm_entry->
+ MappingInformation);
+ dpm_entry->DeviceIndex = htole16(dpm_entry->DeviceIndex);
+ dpm_entry->PhysicalBitsMapping = htole32(dpm_entry->
+ PhysicalBitsMapping);
+ memcpy(&config_page.Entry, (u8 *)dpm_entry,
+ sizeof(Mpi2DriverMap0Entry_t));
+ /* TODO-How to handle failed writes? */
+ if (mps_config_set_dpm_pg0(sc, &mpi_reply, &config_page,
+ entry_num)) {
+ printf("%s: write of dpm entry %d for device failed\n",
+ __func__, entry_num);
+ } else
+ sc->dpm_flush_entry[entry_num] = 0;
+ dpm_entry->MappingInformation = le16toh(dpm_entry->
+ MappingInformation);
+ dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
+ dpm_entry->PhysicalBitsMapping = le32toh(dpm_entry->
+ PhysicalBitsMapping);
+ }
+}
+
+/**
+ * _mapping_allocate_memory- allocates the memory required for mapping tables
+ * @sc: per adapter object
+ *
+ * Allocates the memory for all the tables required for host mapping
+ *
+ * Return 0 on success or non-zero on failure.
+ */
+int
+mps_mapping_allocate_memory(struct mps_softc *sc)
+{
+ uint32_t dpm_pg0_sz;
+
+ sc->mapping_table = malloc((sizeof(struct dev_mapping_table) *
+ sc->max_devices), M_MPT2, M_ZERO|M_NOWAIT);
+ if (!sc->mapping_table)
+ goto free_resources;
+
+ sc->removal_table = malloc((sizeof(struct map_removal_table) *
+ sc->max_devices), M_MPT2, M_ZERO|M_NOWAIT);
+ if (!sc->removal_table)
+ goto free_resources;
+
+ sc->enclosure_table = malloc((sizeof(struct enc_mapping_table) *
+ sc->max_enclosures), M_MPT2, M_ZERO|M_NOWAIT);
+ if (!sc->enclosure_table)
+ goto free_resources;
+
+ sc->dpm_entry_used = malloc((sizeof(u8) * sc->max_dpm_entries),
+ M_MPT2, M_ZERO|M_NOWAIT);
+ if (!sc->dpm_entry_used)
+ goto free_resources;
+
+ sc->dpm_flush_entry = malloc((sizeof(u8) * sc->max_dpm_entries),
+ M_MPT2, M_ZERO|M_NOWAIT);
+ if (!sc->dpm_flush_entry)
+ goto free_resources;
+
+ dpm_pg0_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER) +
+ (sc->max_dpm_entries * sizeof(MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY));
+
+ sc->dpm_pg0 = malloc(dpm_pg0_sz, M_MPT2, M_ZERO|M_NOWAIT);
+ if (!sc->dpm_pg0) {
+ printf("%s: memory alloc failed for dpm page; disabling dpm\n",
+ __func__);
+ sc->is_dpm_enable = 0;
+ }
+
+ return 0;
+
+free_resources:
+ free(sc->mapping_table, M_MPT2);
+ free(sc->removal_table, M_MPT2);
+ free(sc->enclosure_table, M_MPT2);
+ free(sc->dpm_entry_used, M_MPT2);
+ free(sc->dpm_flush_entry, M_MPT2);
+ free(sc->dpm_pg0, M_MPT2);
+ printf("%s: device initialization failed due to failure in mapping "
+ "table memory allocation\n", __func__);
+ return -1;
+}
+
+/**
+ * mps_mapping_free_memory- frees the memory allocated for mapping tables
+ * @sc: per adapter object
+ *
+ * Returns nothing.
+ */
+void
+mps_mapping_free_memory(struct mps_softc *sc)
+{
+ free(sc->mapping_table, M_MPT2);
+ free(sc->removal_table, M_MPT2);
+ free(sc->enclosure_table, M_MPT2);
+ free(sc->dpm_entry_used, M_MPT2);
+ free(sc->dpm_flush_entry, M_MPT2);
+ free(sc->dpm_pg0, M_MPT2);
+}
+
+
+static void
+_mapping_process_dpm_pg0(struct mps_softc *sc)
+{
+ u8 missing_cnt, enc_idx;
+ u16 slot_id, entry_num, num_slots;
+ u32 map_idx, dev_idx, start_idx, end_idx;
+ struct dev_mapping_table *mt_entry;
+ Mpi2DriverMap0Entry_t *dpm_entry;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs);
+ struct enc_mapping_table *et_entry;
+ u64 physical_id;
+ u32 phy_bits = 0;
+
+ if (sc->ir_firmware)
+ _mapping_get_ir_maprange(sc, &start_idx, &end_idx);
+
+ dpm_entry = (Mpi2DriverMap0Entry_t *) ((uint8_t *) sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++,
+ dpm_entry++) {
+ physical_id = dpm_entry->PhysicalIdentifier.High;
+ physical_id = (physical_id << 32) |
+ dpm_entry->PhysicalIdentifier.Low;
+ if (!physical_id) {
+ sc->dpm_entry_used[entry_num] = 0;
+ continue;
+ }
+ sc->dpm_entry_used[entry_num] = 1;
+ dpm_entry->MappingInformation = le16toh(dpm_entry->
+ MappingInformation);
+ missing_cnt = dpm_entry->MappingInformation &
+ MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
+ dev_idx = le16toh(dpm_entry->DeviceIndex);
+ phy_bits = le32toh(dpm_entry->PhysicalBitsMapping);
+ if (sc->ir_firmware && (dev_idx >= start_idx) &&
+ (dev_idx <= end_idx)) {
+ mt_entry = &sc->mapping_table[dev_idx];
+ mt_entry->physical_id = dpm_entry->PhysicalIdentifier.High;
+ mt_entry->physical_id = (mt_entry->physical_id << 32) |
+ dpm_entry->PhysicalIdentifier.Low;
+ mt_entry->channel = MPS_RAID_CHANNEL;
+ mt_entry->id = dev_idx;
+ mt_entry->missing_count = missing_cnt;
+ mt_entry->dpm_entry_num = entry_num;
+ mt_entry->device_info = MPS_DEV_RESERVED;
+ continue;
+ }
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
+ if (dev_idx < (sc->num_rsvd_entries +
+ max_num_phy_ids)) {
+ slot_id = 0;
+ if (ioc_pg8_flags &
+ MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1)
+ slot_id = 1;
+ num_slots = max_num_phy_ids;
+ } else {
+ slot_id = 0;
+ num_slots = dpm_entry->MappingInformation &
+ MPI2_DRVMAP0_MAPINFO_SLOT_MASK;
+ num_slots >>= MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
+ }
+ enc_idx = sc->num_enc_table_entries;
+ if (enc_idx >= sc->max_enclosures) {
+ printf("%s: enclosure entries exceed max "
+ "enclosures of %d\n", __func__,
+ sc->max_enclosures);
+ break;
+ }
+ sc->num_enc_table_entries++;
+ et_entry = &sc->enclosure_table[enc_idx];
+ physical_id = dpm_entry->PhysicalIdentifier.High;
+ et_entry->enclosure_id = (physical_id << 32) |
+ dpm_entry->PhysicalIdentifier.Low;
+ et_entry->start_index = dev_idx;
+ et_entry->dpm_entry_num = entry_num;
+ et_entry->num_slots = num_slots;
+ et_entry->start_slot = slot_id;
+ et_entry->missing_count = missing_cnt;
+ et_entry->phy_bits = phy_bits;
+
+ mt_entry = &sc->mapping_table[dev_idx];
+ for (map_idx = dev_idx; map_idx < (dev_idx + num_slots);
+ map_idx++, mt_entry++) {
+ if (mt_entry->dpm_entry_num !=
+ MPS_DPM_BAD_IDX) {
+ printf("%s: conflict in mapping table "
+ "for enclosure %d\n", __func__,
+ enc_idx);
+ break;
+ }
+ physical_id = dpm_entry->PhysicalIdentifier.High;
+ mt_entry->physical_id = (physical_id << 32) |
+ dpm_entry->PhysicalIdentifier.Low;
+ mt_entry->phy_bits = phy_bits;
+ mt_entry->channel = 0;
+ mt_entry->id = dev_idx;
+ mt_entry->dpm_entry_num = entry_num;
+ mt_entry->missing_count = missing_cnt;
+ mt_entry->device_info = MPS_DEV_RESERVED;
+ }
+ } else if ((ioc_pg8_flags &
+ MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
+ map_idx = dev_idx;
+ mt_entry = &sc->mapping_table[map_idx];
+ if (mt_entry->dpm_entry_num != MPS_DPM_BAD_IDX) {
+ printf("%s: conflict in mapping table for "
+ "device %d\n", __func__, map_idx);
+ break;
+ }
+ physical_id = dpm_entry->PhysicalIdentifier.High;
+ mt_entry->physical_id = (physical_id << 32) |
+ dpm_entry->PhysicalIdentifier.Low;
+ mt_entry->phy_bits = phy_bits;
+ mt_entry->channel = 0;
+ mt_entry->id = dev_idx;
+ mt_entry->missing_count = missing_cnt;
+ mt_entry->dpm_entry_num = entry_num;
+ mt_entry->device_info = MPS_DEV_RESERVED;
+ }
+ } /*close the loop for DPM table */
+}
+
+/*
+ * mps_mapping_check_devices - start of the day check for device availabilty
+ * @sc: per adapter object
+ * @sleep_flag: Flag indicating whether this function can sleep or not
+ *
+ * Returns nothing.
+ */
+void
+mps_mapping_check_devices(struct mps_softc *sc, int sleep_flag)
+{
+ u32 i;
+/* u32 cntdn, i;
+ u32 timeout = 60;*/
+ struct dev_mapping_table *mt_entry;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ struct enc_mapping_table *et_entry;
+ u32 start_idx, end_idx;
+
+ /* We need to ucomment this when this function is called
+ * from the port enable complete */
+#if 0
+ sc->track_mapping_events = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ if (!sc->pending_map_events)
+ break;
+ if (sleep_flag == CAN_SLEEP)
+ pause("mps_pause", (hz/1000));/* 1msec sleep */
+ else
+ DELAY(500); /* 500 useconds delay */
+ } while (--cntdn);
+
+
+ if (!cntdn)
+ printf("%s: there are %d"
+ " pending events after %d seconds of delay\n",
+ __func__, sc->pending_map_events, timeout);
+#endif
+ sc->pending_map_events = 0;
+
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
+ et_entry = sc->enclosure_table;
+ for (i = 0; i < sc->num_enc_table_entries; i++, et_entry++) {
+ if (!et_entry->init_complete) {
+ if (et_entry->missing_count <
+ MPS_MAX_MISSING_COUNT) {
+ et_entry->missing_count++;
+ if (et_entry->dpm_entry_num !=
+ MPS_DPM_BAD_IDX)
+ _mapping_commit_enc_entry(sc,
+ et_entry);
+ }
+ et_entry->init_complete = 1;
+ }
+ }
+ if (!sc->ir_firmware)
+ return;
+ _mapping_get_ir_maprange(sc, &start_idx, &end_idx);
+ mt_entry = &sc->mapping_table[start_idx];
+ for (i = start_idx; i < (end_idx + 1); i++, mt_entry++) {
+ if (mt_entry->device_info & MPS_DEV_RESERVED
+ && !mt_entry->physical_id)
+ mt_entry->init_complete = 1;
+ else if (mt_entry->device_info & MPS_DEV_RESERVED) {
+ if (!mt_entry->init_complete) {
+ if (mt_entry->missing_count <
+ MPS_MAX_MISSING_COUNT) {
+ mt_entry->missing_count++;
+ if (mt_entry->dpm_entry_num !=
+ MPS_DPM_BAD_IDX)
+ _mapping_commit_map_entry(sc,
+ mt_entry);
+ }
+ mt_entry->init_complete = 1;
+ }
+ }
+ }
+ } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
+ mt_entry = sc->mapping_table;
+ for (i = 0; i < sc->max_devices; i++, mt_entry++) {
+ if (mt_entry->device_info & MPS_DEV_RESERVED
+ && !mt_entry->physical_id)
+ mt_entry->init_complete = 1;
+ else if (mt_entry->device_info & MPS_DEV_RESERVED) {
+ if (!mt_entry->init_complete) {
+ if (mt_entry->missing_count <
+ MPS_MAX_MISSING_COUNT) {
+ mt_entry->missing_count++;
+ if (mt_entry->dpm_entry_num !=
+ MPS_DPM_BAD_IDX)
+ _mapping_commit_map_entry(sc,
+ mt_entry);
+ }
+ mt_entry->init_complete = 1;
+ }
+ }
+ }
+ }
+}
+
+
+/**
+ * mps_mapping_is_reinit_required - check whether event replay required
+ * @sc: per adapter object
+ *
+ * Checks the per ioc flags and decide whether reinit of events required
+ *
+ * Returns 1 for reinit of ioc 0 for not.
+ */
+int mps_mapping_is_reinit_required(struct mps_softc *sc)
+{
+ if (!sc->mt_full_retry && sc->mt_add_device_failed) {
+ sc->mt_full_retry = 1;
+ sc->mt_add_device_failed = 0;
+ _mapping_flush_dpm_pages(sc);
+ return 1;
+ }
+ sc->mt_full_retry = 1;
+ return 0;
+}
+
+/**
+ * mps_mapping_initialize - initialize mapping tables
+ * @sc: per adapter object
+ *
+ * Read controller persitant mapping tables into internal data area.
+ *
+ * Return 0 for success or non-zero for failure.
+ */
+int
+mps_mapping_initialize(struct mps_softc *sc)
+{
+ uint16_t volume_mapping_flags, dpm_pg0_sz;
+ uint32_t i;
+ Mpi2ConfigReply_t mpi_reply;
+ int error;
+ uint8_t retry_count;
+ uint16_t ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+
+ /* The additional 1 accounts for the virtual enclosure
+ * created for the controller
+ */
+ sc->max_enclosures = sc->facts->MaxEnclosures + 1;
+ sc->max_expanders = sc->facts->MaxSasExpanders;
+ sc->max_volumes = sc->facts->MaxVolumes;
+ sc->max_devices = sc->facts->MaxTargets + sc->max_volumes;
+ sc->pending_map_events = 0;
+ sc->num_enc_table_entries = 0;
+ sc->num_rsvd_entries = 0;
+ sc->num_channels = 1;
+ sc->max_dpm_entries = sc->ioc_pg8.MaxPersistentEntries;
+ sc->is_dpm_enable = (sc->max_dpm_entries) ? 1 : 0;
+ sc->track_mapping_events = 0;
+
+ if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING)
+ sc->is_dpm_enable = 0;
+
+ if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0)
+ sc->num_rsvd_entries = 1;
+
+ volume_mapping_flags = sc->ioc_pg8.IRVolumeMappingFlags &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ if (sc->ir_firmware && (volume_mapping_flags ==
+ MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING))
+ sc->num_rsvd_entries += sc->max_volumes;
+
+ error = mps_mapping_allocate_memory(sc);
+ if (error)
+ return (error);
+
+ for (i = 0; i < sc->max_devices; i++)
+ _mapping_clear_map_entry(sc->mapping_table + i);
+
+ for (i = 0; i < sc->max_enclosures; i++)
+ _mapping_clear_enc_entry(sc->enclosure_table + i);
+
+ for (i = 0; i < sc->max_devices; i++) {
+ sc->removal_table[i].dev_handle = 0;
+ sc->removal_table[i].dpm_entry_num = MPS_DPM_BAD_IDX;
+ }
+
+ memset(sc->dpm_entry_used, 0, sc->max_dpm_entries);
+ memset(sc->dpm_flush_entry, 0, sc->max_dpm_entries);
+
+ if (sc->is_dpm_enable) {
+ dpm_pg0_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER) +
+ (sc->max_dpm_entries *
+ sizeof(MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY));
+ retry_count = 0;
+
+retry_read_dpm:
+ if (mps_config_get_dpm_pg0(sc, &mpi_reply, sc->dpm_pg0,
+ dpm_pg0_sz)) {
+ printf("%s: dpm page read failed; disabling dpm\n",
+ __func__);
+ if (retry_count < 3) {
+ retry_count++;
+ goto retry_read_dpm;
+ }
+ sc->is_dpm_enable = 0;
+ }
+ }
+
+ if (sc->is_dpm_enable)
+ _mapping_process_dpm_pg0(sc);
+
+ sc->track_mapping_events = 1;
+ return 0;
+}
+
+/**
+ * mps_mapping_exit - clear mapping table and associated memory
+ * @sc: per adapter object
+ *
+ * Returns nothing.
+ */
+void
+mps_mapping_exit(struct mps_softc *sc)
+{
+ _mapping_flush_dpm_pages(sc);
+ mps_mapping_free_memory(sc);
+}
+
+/**
+ * mps_mapping_get_sas_id - assign a target id for sas device
+ * @sc: per adapter object
+ * @sas_address: sas address of the device
+ * @handle: device handle
+ *
+ * Returns valid ID on success or BAD_ID.
+ */
+unsigned int
+mps_mapping_get_sas_id(struct mps_softc *sc, uint64_t sas_address, u16 handle)
+{
+ u32 map_idx;
+ struct dev_mapping_table *mt_entry;
+
+ for (map_idx = 0; map_idx < sc->max_devices; map_idx++) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if (mt_entry->dev_handle == handle && mt_entry->physical_id ==
+ sas_address)
+ return mt_entry->id;
+ }
+
+ return MPS_MAP_BAD_ID;
+}
+
+/**
+ * mps_mapping_get_sas_id_from_handle - find a target id in mapping table using
+ * only the dev handle. This is just a wrapper function for the local function
+ * _mapping_get_mt_idx_from_handle.
+ * @sc: per adapter object
+ * @handle: device handle
+ *
+ * Returns valid ID on success or BAD_ID.
+ */
+unsigned int
+mps_mapping_get_sas_id_from_handle(struct mps_softc *sc, u16 handle)
+{
+ return (_mapping_get_mt_idx_from_handle(sc, handle));
+}
+
+/**
+ * mps_mapping_get_raid_id - assign a target id for raid device
+ * @sc: per adapter object
+ * @wwid: world wide identifier for raid volume
+ * @handle: device handle
+ *
+ * Returns valid ID on success or BAD_ID.
+ */
+unsigned int
+mps_mapping_get_raid_id(struct mps_softc *sc, u64 wwid, u16 handle)
+{
+ u32 map_idx;
+ struct dev_mapping_table *mt_entry;
+
+ for (map_idx = 0; map_idx < sc->max_devices; map_idx++) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if (mt_entry->dev_handle == handle && mt_entry->physical_id ==
+ wwid)
+ return mt_entry->id;
+ }
+
+ return MPS_MAP_BAD_ID;
+}
+
+/**
+ * mps_mapping_get_raid_id_from_handle - find raid device in mapping table
+ * using only the volume dev handle. This is just a wrapper function for the
+ * local function _mapping_get_ir_mt_idx_from_handle.
+ * @sc: per adapter object
+ * @volHandle: volume device handle
+ *
+ * Returns valid ID on success or BAD_ID.
+ */
+unsigned int
+mps_mapping_get_raid_id_from_handle(struct mps_softc *sc, u16 volHandle)
+{
+ return (_mapping_get_ir_mt_idx_from_handle(sc, volHandle));
+}
+
+/**
+ * mps_mapping_enclosure_dev_status_change_event - handle enclosure events
+ * @sc: per adapter object
+ * @event_data: event data payload
+ *
+ * Return nothing.
+ */
+void
+mps_mapping_enclosure_dev_status_change_event(struct mps_softc *sc,
+ Mpi2EventDataSasEnclDevStatusChange_t *event_data)
+{
+ u8 enc_idx, missing_count;
+ struct enc_mapping_table *et_entry;
+ Mpi2DriverMap0Entry_t *dpm_entry;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
+ u8 update_phy_bits = 0;
+ u32 saved_phy_bits;
+ uint64_t temp64_var;
+
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) !=
+ MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING)
+ goto out;
+
+ dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+
+ if (event_data->ReasonCode == MPI2_EVENT_SAS_ENCL_RC_ADDED) {
+ if (!event_data->NumSlots) {
+ printf("%s: enclosure with handle = 0x%x reported 0 "
+ "slots\n", __func__,
+ le16toh(event_data->EnclosureHandle));
+ goto out;
+ }
+ temp64_var = event_data->EnclosureLogicalID.High;
+ temp64_var = (temp64_var << 32) |
+ event_data->EnclosureLogicalID.Low;
+ enc_idx = _mapping_get_enc_idx_from_id(sc, temp64_var,
+ event_data->PhyBits);
+ if (enc_idx != MPS_ENCTABLE_BAD_IDX) {
+ et_entry = &sc->enclosure_table[enc_idx];
+ if (et_entry->init_complete &&
+ !et_entry->missing_count) {
+ printf("%s: enclosure %d is already present "
+ "with handle = 0x%x\n",__func__, enc_idx,
+ et_entry->enc_handle);
+ goto out;
+ }
+ et_entry->enc_handle = le16toh(event_data->
+ EnclosureHandle);
+ et_entry->start_slot = le16toh(event_data->StartSlot);
+ saved_phy_bits = et_entry->phy_bits;
+ et_entry->phy_bits |= le32toh(event_data->PhyBits);
+ if (saved_phy_bits != et_entry->phy_bits)
+ update_phy_bits = 1;
+ if (et_entry->missing_count || update_phy_bits) {
+ et_entry->missing_count = 0;
+ if (sc->is_dpm_enable &&
+ et_entry->dpm_entry_num !=
+ MPS_DPM_BAD_IDX) {
+ dpm_entry += et_entry->dpm_entry_num;
+ missing_count =
+ (u8)(dpm_entry->MappingInformation &
+ MPI2_DRVMAP0_MAPINFO_MISSING_MASK);
+ if (!et_entry->init_complete && (
+ missing_count || update_phy_bits)) {
+ dpm_entry->MappingInformation
+ = et_entry->num_slots;
+ dpm_entry->MappingInformation
+ <<= map_shift;
+ dpm_entry->PhysicalBitsMapping
+ = et_entry->phy_bits;
+ sc->dpm_flush_entry[et_entry->
+ dpm_entry_num] = 1;
+ }
+ }
+ }
+ } else {
+ enc_idx = sc->num_enc_table_entries;
+ if (enc_idx >= sc->max_enclosures) {
+ printf("%s: enclosure can not be added; "
+ "mapping table is full\n", __func__);
+ goto out;
+ }
+ sc->num_enc_table_entries++;
+ et_entry = &sc->enclosure_table[enc_idx];
+ et_entry->enc_handle = le16toh(event_data->
+ EnclosureHandle);
+ et_entry->enclosure_id = event_data->
+ EnclosureLogicalID.High;
+ et_entry->enclosure_id = ( et_entry->enclosure_id <<
+ 32) | event_data->EnclosureLogicalID.Low;
+ et_entry->start_index = MPS_MAPTABLE_BAD_IDX;
+ et_entry->dpm_entry_num = MPS_DPM_BAD_IDX;
+ et_entry->num_slots = le16toh(event_data->NumSlots);
+ et_entry->start_slot = le16toh(event_data->StartSlot);
+ et_entry->phy_bits = le32toh(event_data->PhyBits);
+ }
+ et_entry->init_complete = 1;
+ } else if (event_data->ReasonCode ==
+ MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING) {
+ enc_idx = _mapping_get_enc_idx_from_handle(sc,
+ le16toh(event_data->EnclosureHandle));
+ if (enc_idx == MPS_ENCTABLE_BAD_IDX) {
+ printf("%s: cannot unmap enclosure %d because it has "
+ "already been deleted", __func__, enc_idx);
+ goto out;
+ }
+ et_entry = &sc->enclosure_table[enc_idx];
+ if (!et_entry->init_complete) {
+ if (et_entry->missing_count < MPS_MAX_MISSING_COUNT)
+ et_entry->missing_count++;
+ else
+ et_entry->init_complete = 1;
+ }
+ if (!et_entry->missing_count)
+ et_entry->missing_count++;
+ if (sc->is_dpm_enable && !et_entry->init_complete &&
+ et_entry->dpm_entry_num != MPS_DPM_BAD_IDX) {
+ dpm_entry += et_entry->dpm_entry_num;
+ dpm_entry->MappingInformation = et_entry->num_slots;
+ dpm_entry->MappingInformation <<= map_shift;
+ dpm_entry->MappingInformation |=
+ et_entry->missing_count;
+ sc->dpm_flush_entry[et_entry->dpm_entry_num] = 1;
+ }
+ et_entry->init_complete = 1;
+ }
+
+out:
+ _mapping_flush_dpm_pages(sc);
+ if (sc->pending_map_events)
+ sc->pending_map_events--;
+}
+
+/**
+ * mps_mapping_topology_change_event - handle topology change events
+ * @sc: per adapter object
+ * @event_data: event data payload
+ *
+ * Returns nothing.
+ */
+void
+mps_mapping_topology_change_event(struct mps_softc *sc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ struct _map_topology_change topo_change;
+ struct _map_phy_change *phy_change;
+ Mpi2EventSasTopoPhyEntry_t *event_phy_change;
+ u8 i, num_entries;
+
+ topo_change.enc_handle = le16toh(event_data->EnclosureHandle);
+ topo_change.exp_handle = le16toh(event_data->ExpanderDevHandle);
+ num_entries = event_data->NumEntries;
+ topo_change.num_entries = num_entries;
+ topo_change.start_phy_num = event_data->StartPhyNum;
+ topo_change.num_phys = event_data->NumPhys;
+ topo_change.exp_status = event_data->ExpStatus;
+ event_phy_change = event_data->PHY;
+ topo_change.phy_details = NULL;
+
+ if (!num_entries)
+ goto out;
+ phy_change = malloc(sizeof(struct _map_phy_change) * num_entries,
+ M_MPT2, M_NOWAIT|M_ZERO);
+ topo_change.phy_details = phy_change;
+ if (!phy_change)
+ goto out;
+ for (i = 0; i < num_entries; i++, event_phy_change++, phy_change++) {
+ phy_change->dev_handle = le16toh(event_phy_change->
+ AttachedDevHandle);
+ phy_change->reason = event_phy_change->PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ }
+ _mapping_update_missing_count(sc, &topo_change);
+ _mapping_get_dev_info(sc, &topo_change);
+ _mapping_clear_removed_entries(sc);
+ _mapping_add_new_device(sc, &topo_change);
+
+out:
+ free(topo_change.phy_details, M_MPT2);
+ _mapping_flush_dpm_pages(sc);
+ if (sc->pending_map_events)
+ sc->pending_map_events--;
+}
+
+/**
+ * _mapping_check_update_ir_mt_idx - Check and update IR map table index
+ * @sc: per adapter object
+ * @event_data: event data payload
+ * @evt_idx: current event index
+ * @map_idx: current index and the place holder for new map table index
+ * @wwid_table: world wide name for volumes in the element table
+ *
+ * pass through IR events and find whether any events matches and if so
+ * tries to find new index if not returns failure
+ *
+ * Returns 0 on success and 1 on failure
+ */
+static int
+_mapping_check_update_ir_mt_idx(struct mps_softc *sc,
+ Mpi2EventDataIrConfigChangeList_t *event_data, int evt_idx, u32 *map_idx,
+ u64 *wwid_table)
+{
+ struct dev_mapping_table *mt_entry;
+ u32 st_idx, end_idx, mt_idx = *map_idx;
+ u8 match = 0;
+ Mpi2EventIrConfigElement_t *element;
+ u16 element_flags;
+ int i;
+
+ mt_entry = &sc->mapping_table[mt_idx];
+ _mapping_get_ir_maprange(sc, &st_idx, &end_idx);
+search_again:
+ match = 0;
+ for (i = evt_idx + 1; i < event_data->NumElements; i++) {
+ element = (Mpi2EventIrConfigElement_t *)
+ &event_data->ConfigElement[i];
+ element_flags = le16toh(element->ElementFlags);
+ if ((element_flags &
+ MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK) !=
+ MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT)
+ continue;
+ if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_ADDED ||
+ element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED) {
+ if (mt_entry->physical_id == wwid_table[i]) {
+ match = 1;
+ break;
+ }
+ }
+ }
+
+ if (match) {
+ do {
+ mt_idx++;
+ if (mt_idx > end_idx)
+ return 1;
+ mt_entry = &sc->mapping_table[mt_idx];
+ } while (mt_entry->device_info & MPS_MAP_IN_USE);
+ goto search_again;
+ }
+ *map_idx = mt_idx;
+ return 0;
+}
+
+/**
+ * mps_mapping_ir_config_change_event - handle IR config change list events
+ * @sc: per adapter object
+ * @event_data: event data payload
+ *
+ * Returns nothing.
+ */
+void
+mps_mapping_ir_config_change_event(struct mps_softc *sc,
+ Mpi2EventDataIrConfigChangeList_t *event_data)
+{
+ Mpi2EventIrConfigElement_t *element;
+ int i;
+ u64 *wwid_table;
+ u32 map_idx, flags;
+ struct dev_mapping_table *mt_entry;
+ u16 element_flags;
+ u8 log_full_error = 0;
+
+ wwid_table = malloc(sizeof(u64) * event_data->NumElements, M_MPT2,
+ M_NOWAIT | M_ZERO);
+ if (!wwid_table)
+ goto out;
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ flags = le32toh(event_data->Flags);
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ element_flags = le16toh(element->ElementFlags);
+ if ((element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_ADDED) &&
+ (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_REMOVED) &&
+ (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE)
+ && (element->ReasonCode !=
+ MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED))
+ continue;
+ if ((element_flags &
+ MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK) ==
+ MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT) {
+ mps_config_get_volume_wwid(sc,
+ le16toh(element->VolDevHandle), &wwid_table[i]);
+ map_idx = _mapping_get_ir_mt_idx_from_wwid(sc,
+ wwid_table[i]);
+ if (map_idx != MPS_MAPTABLE_BAD_IDX) {
+ mt_entry = &sc->mapping_table[map_idx];
+ mt_entry->device_info |= MPS_MAP_IN_USE;
+ }
+ }
+ }
+ if (flags == MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+ goto out;
+ else {
+ element = (Mpi2EventIrConfigElement_t *)&event_data->
+ ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_ADDED ||
+ element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED) {
+ map_idx = _mapping_get_ir_mt_idx_from_wwid
+ (sc, wwid_table[i]);
+ if (map_idx != MPS_MAPTABLE_BAD_IDX) {
+ mt_entry = &sc->mapping_table[map_idx];
+ mt_entry->channel = MPS_RAID_CHANNEL;
+ mt_entry->id = map_idx;
+ mt_entry->dev_handle = le16toh
+ (element->VolDevHandle);
+ mt_entry->device_info =
+ MPS_DEV_RESERVED | MPS_MAP_IN_USE;
+ _mapping_update_ir_missing_cnt(sc,
+ map_idx, element, wwid_table[i]);
+ continue;
+ }
+ map_idx = _mapping_get_free_ir_mt_idx(sc);
+ if (map_idx == MPS_MAPTABLE_BAD_IDX)
+ log_full_error = 1;
+ else if (i < (event_data->NumElements - 1)) {
+ log_full_error =
+ _mapping_check_update_ir_mt_idx
+ (sc, event_data, i, &map_idx,
+ wwid_table);
+ }
+ if (log_full_error) {
+ printf("%s: no space to add the RAID "
+ "volume with handle 0x%04x in "
+ "mapping table\n", __func__, le16toh
+ (element->VolDevHandle));
+ continue;
+ }
+ mt_entry = &sc->mapping_table[map_idx];
+ mt_entry->physical_id = wwid_table[i];
+ mt_entry->channel = MPS_RAID_CHANNEL;
+ mt_entry->id = map_idx;
+ mt_entry->dev_handle = le16toh(element->
+ VolDevHandle);
+ mt_entry->device_info = MPS_DEV_RESERVED |
+ MPS_MAP_IN_USE;
+ mt_entry->init_complete = 0;
+ _mapping_update_ir_missing_cnt(sc, map_idx,
+ element, wwid_table[i]);
+ } else if (element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
+ map_idx = _mapping_get_ir_mt_idx_from_wwid(sc,
+ wwid_table[i]);
+ if (map_idx == MPS_MAPTABLE_BAD_IDX) {
+ printf("%s: failed to remove a volume "
+ "because it has already been "
+ "removed\n", __func__);
+ continue;
+ }
+ _mapping_update_ir_missing_cnt(sc, map_idx,
+ element, wwid_table[i]);
+ } else if (element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED) {
+ map_idx = _mapping_get_mt_idx_from_handle(sc,
+ le16toh(element->VolDevHandle));
+ if (map_idx == MPS_MAPTABLE_BAD_IDX) {
+ printf("%s: failed to remove volume "
+ "with handle 0x%04x because it has "
+ "already been removed\n", __func__,
+ le16toh(element->VolDevHandle));
+ continue;
+ }
+ mt_entry = &sc->mapping_table[map_idx];
+ _mapping_update_ir_missing_cnt(sc, map_idx,
+ element, mt_entry->physical_id);
+ }
+ }
+ }
+
+out:
+ _mapping_flush_dpm_pages(sc);
+ free(wwid_table, M_MPT2);
+ if (sc->pending_map_events)
+ sc->pending_map_events--;
+}
diff --git a/sys/dev/mps/mps_mapping.h b/sys/dev/mps/mps_mapping.h
new file mode 100644
index 0000000..5e1877b
--- /dev/null
+++ b/sys/dev/mps/mps_mapping.h
@@ -0,0 +1,71 @@
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MPS_MAPPING_H
+#define _MPS_MAPPING_H
+
+/**
+ * struct _map_phy_change - PHY entries recieved in Topology change list
+ * @physical_id: SAS address of the device attached with the associate PHY
+ * @device_info: bitfield provides detailed info about the device
+ * @dev_handle: device handle for the device pointed by this entry
+ * @slot: slot ID
+ * @is_processed: Flag to indicate whether this entry is processed or not
+ */
+struct _map_phy_change {
+ uint64_t physical_id;
+ uint32_t device_info;
+ uint16_t dev_handle;
+ uint16_t slot;
+ uint8_t reason;
+ uint8_t is_processed;
+};
+
+/**
+ * struct _map_topology_change - entries to be removed from mapping table
+ * @dpm_entry_num: index of this device in device persistent map table
+ * @dev_handle: device handle for the device pointed by this entry
+ */
+struct _map_topology_change {
+ uint16_t enc_handle;
+ uint16_t exp_handle;
+ uint8_t num_entries;
+ uint8_t start_phy_num;
+ uint8_t num_phys;
+ uint8_t exp_status;
+ struct _map_phy_change *phy_details;
+};
+
+
+extern int
+mpssas_get_sas_address_for_sata_disk(struct mps_softc *ioc,
+ u64 *sas_address, u16 handle, u32 device_info);
+
+#endif
diff --git a/sys/dev/mps/mps_pci.c b/sys/dev/mps/mps_pci.c
index 9b27488..c50dd25 100644
--- a/sys/dev/mps/mps_pci.c
+++ b/sys/dev/mps/mps_pci.c
@@ -29,6 +29,7 @@ __FBSDID("$FreeBSD$");
/* PCI/PCI-X/PCIe bus interface for the LSI MPT2 controllers */
+/* TODO Move headers to mpsvar */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
@@ -46,12 +47,17 @@ __FBSDID("$FreeBSD$");
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_private.h>
#include <dev/mps/mpi/mpi2_type.h>
#include <dev/mps/mpi/mpi2.h>
#include <dev/mps/mpi/mpi2_ioc.h>
#include <dev/mps/mpi/mpi2_cnfg.h>
+#include <dev/mps/mpi/mpi2_tool.h>
+#include <sys/queue.h>
+#include <sys/kthread.h>
+#include <dev/mps/mps_ioctl.h>
#include <dev/mps/mpsvar.h>
static int mps_pci_probe(device_t);
@@ -63,15 +69,6 @@ static void mps_pci_free(struct mps_softc *);
static int mps_alloc_msix(struct mps_softc *sc, int msgs);
static int mps_alloc_msi(struct mps_softc *sc, int msgs);
-int mps_disable_msix = 0;
-TUNABLE_INT("hw.mps.disable_msix", &mps_disable_msix);
-SYSCTL_INT(_hw_mps, OID_AUTO, disable_msix, CTLFLAG_RD, &mps_disable_msix, 0,
- "Disable MSIX interrupts\n");
-int mps_disable_msi = 0;
-TUNABLE_INT("hw.mps.disable_msi", &mps_disable_msi);
-SYSCTL_INT(_hw_mps, OID_AUTO, disable_msi, CTLFLAG_RD, &mps_disable_msi, 0,
- "Disable MSI interrupts\n");
-
static device_method_t mps_methods[] = {
DEVMETHOD(device_probe, mps_pci_probe),
DEVMETHOD(device_attach, mps_pci_attach),
@@ -125,10 +122,24 @@ struct mps_ident {
0xffff, 0xffff, 0, "LSI SAS2208" },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
0xffff, 0xffff, 0, "LSI SAS2208" },
- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_7,
- 0xffff, 0xffff, 0, "LSI SAS2208" },
- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_8,
- 0xffff, 0xffff, 0, "LSI SAS2208" },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
+ 0xffff, 0xffff, 0, "LSI SAS2308" },
+ // Add Customer specific vender/subdevice id before generic
+ // (0xffff) vender/subdevice id.
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
+ 0x8086, 0x3516, 0, "Intel(R) Integrated RAID Module RMS25JB080" },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
+ 0x8086, 0x3517, 0, "Intel(R) Integrated RAID Module RMS25JB040" },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
+ 0x8086, 0x3518, 0, "Intel(R) Integrated RAID Module RMS25KB080" },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
+ 0x8086, 0x3519, 0, "Intel(R) Integrated RAID Module RMS25KB040" },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
+ 0xffff, 0xffff, 0, "LSI SAS2308" },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
+ 0xffff, 0xffff, 0, "LSI SAS2308" },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
+ 0xffff, 0xffff, MPS_FLAGS_WD_AVAILABLE, "LSI SSS6200" },
{ 0, 0, 0, 0, 0, NULL }
};
@@ -161,7 +172,7 @@ mps_pci_probe(device_t dev)
if ((id = mps_find_ident(dev)) != NULL) {
device_set_desc(dev, id->desc);
- return (BUS_PROBE_DEFAULT);
+ return (BUS_PROBE_VENDOR);
}
return (ENXIO);
}
@@ -205,7 +216,7 @@ mps_pci_attach(device_t dev)
sc->mps_bhandle = rman_get_bushandle(sc->mps_regs_resource);
/* Allocate the parent DMA tag */
- if (bus_dma_tag_create( NULL, /* parent */
+ if (bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
@@ -235,10 +246,10 @@ mps_pci_setup_interrupts(struct mps_softc *sc)
dev = sc->mps_dev;
error = ENXIO;
- if ((mps_disable_msix == 0) &&
+ if ((sc->disable_msix == 0) &&
((msgs = pci_msix_count(dev)) >= MPS_MSI_COUNT))
error = mps_alloc_msix(sc, MPS_MSI_COUNT);
- if ((error != 0) && (mps_disable_msi == 0) &&
+ if ((error != 0) && (sc->disable_msi == 0) &&
((msgs = pci_msi_count(dev)) >= MPS_MSI_COUNT))
error = mps_alloc_msi(sc, MPS_MSI_COUNT);
@@ -362,3 +373,20 @@ mps_alloc_msi(struct mps_softc *sc, int msgs)
return (error);
}
+int
+mps_pci_restore(struct mps_softc *sc)
+{
+ struct pci_devinfo *dinfo;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ dinfo = device_get_ivars(sc->mps_dev);
+ if (dinfo == NULL) {
+ mps_dprint(sc, MPS_FAULT, "%s: NULL dinfo\n", __func__);
+ return (EINVAL);
+ }
+
+ pci_cfg_restore(sc->mps_dev, dinfo);
+ return (0);
+}
+
diff --git a/sys/dev/mps/mps_sas.c b/sys/dev/mps/mps_sas.c
index c253ee0..fb57b63 100644
--- a/sys/dev/mps/mps_sas.c
+++ b/sys/dev/mps/mps_sas.c
@@ -23,12 +23,42 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* Communications core for LSI MPT2 */
+/* TODO Move headers to mpsvar */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
@@ -41,15 +71,21 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/uio.h>
#include <sys/sysctl.h>
-#include <sys/sglist.h>
#include <sys/endian.h>
+#include <sys/queue.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
+#include <sys/sbuf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
+#include <machine/stdarg.h>
+
#include <cam/cam.h>
#include <cam/cam_ccb.h>
+#include <cam/cam_xpt.h>
#include <cam/cam_debug.h>
#include <cam/cam_sim.h>
#include <cam/cam_xpt_sim.h>
@@ -67,426 +103,319 @@ __FBSDID("$FreeBSD$");
#include <dev/mps/mpi/mpi2_sas.h>
#include <dev/mps/mpi/mpi2_cnfg.h>
#include <dev/mps/mpi/mpi2_init.h>
+#include <dev/mps/mpi/mpi2_tool.h>
+#include <dev/mps/mps_ioctl.h>
#include <dev/mps/mpsvar.h>
#include <dev/mps/mps_table.h>
+#include <dev/mps/mps_sas.h>
-struct mpssas_target {
- uint16_t handle;
- uint8_t linkrate;
- uint64_t devname;
- uint64_t sasaddr;
- uint32_t devinfo;
- uint16_t encl_handle;
- uint16_t encl_slot;
- uint16_t parent_handle;
- int flags;
-#define MPSSAS_TARGET_INABORT (1 << 0)
-#define MPSSAS_TARGET_INRESET (1 << 1)
-#define MPSSAS_TARGET_INCHIPRESET (1 << 2)
-#define MPSSAS_TARGET_INRECOVERY 0x7
- uint16_t tid;
-};
-
-struct mpssas_softc {
- struct mps_softc *sc;
- u_int flags;
-#define MPSSAS_IN_DISCOVERY (1 << 0)
-#define MPSSAS_IN_STARTUP (1 << 1)
-#define MPSSAS_DISCOVERY_TIMEOUT_PENDING (1 << 2)
-#define MPSSAS_QUEUE_FROZEN (1 << 3)
- struct mpssas_target *targets;
- struct cam_devq *devq;
- struct cam_sim *sim;
- struct cam_path *path;
- struct intr_config_hook sas_ich;
- struct callout discovery_callout;
- u_int discovery_timeouts;
- struct mps_event_handle *mpssas_eh;
-};
+#define MPSSAS_DISCOVERY_TIMEOUT 20
+#define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
-struct mpssas_devprobe {
- struct mps_config_params params;
- u_int state;
-#define MPSSAS_PROBE_DEV1 0x01
-#define MPSSAS_PROBE_DEV2 0x02
-#define MPSSAS_PROBE_PHY 0x03
-#define MPSSAS_PROBE_EXP 0x04
-#define MPSSAS_PROBE_PHY2 0x05
-#define MPSSAS_PROBE_EXP2 0x06
- struct mpssas_target target;
+/*
+ * static array to check SCSI OpCode for EEDP protection bits
+ */
+#define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
+#define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
+#define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
+static uint8_t op_code_prot[256] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
+ 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
-#define MPSSAS_DISCOVERY_TIMEOUT 20
-#define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
+MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
-static MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
-
-static __inline int mpssas_set_lun(uint8_t *lun, u_int ccblun);
-static struct mpssas_target * mpssas_alloc_target(struct mpssas_softc *,
- struct mpssas_target *);
-static struct mpssas_target * mpssas_find_target(struct mpssas_softc *, int,
- uint16_t);
-static void mpssas_announce_device(struct mpssas_softc *,
- struct mpssas_target *);
-static void mpssas_startup(void *data);
-static void mpssas_discovery_end(struct mpssas_softc *sassc);
+static struct mpssas_target * mpssas_find_target_by_handle(struct mpssas_softc *, int, uint16_t);
static void mpssas_discovery_timeout(void *data);
-static void mpssas_prepare_remove(struct mpssas_softc *,
- MPI2_EVENT_SAS_TOPO_PHY_ENTRY *);
static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
static void mpssas_poll(struct cam_sim *sim);
-static void mpssas_probe_device(struct mps_softc *sc, uint16_t handle);
-static void mpssas_probe_device_complete(struct mps_softc *sc,
- struct mps_config_params *params);
static void mpssas_scsiio_timeout(void *data);
static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
-static void mpssas_recovery(struct mps_softc *, struct mps_command *);
-static int mpssas_map_tm_request(struct mps_softc *sc, struct mps_command *cm);
-static void mpssas_issue_tm_request(struct mps_softc *sc,
- struct mps_command *cm);
-static void mpssas_tm_complete(struct mps_softc *sc, struct mps_command *cm,
- int error);
-static int mpssas_complete_tm_request(struct mps_softc *sc,
- struct mps_command *cm, int free_cm);
+static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
+ struct mps_command *cm, union ccb *ccb);
static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
+static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
#if __FreeBSD_version >= 900026
static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
uint64_t sasaddr);
static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
-#endif /* __FreeBSD_version >= 900026 */
-static void mpssas_resetdev(struct mpssas_softc *, struct mps_command *);
-static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
+#endif //FreeBSD_version >= 900026
static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
-static void mpssas_freeze_device(struct mpssas_softc *, struct mpssas_target *);
-static void mpssas_unfreeze_device(struct mpssas_softc *, struct mpssas_target *) __unused;
-
-/*
- * Abstracted so that the driver can be backwards and forwards compatible
- * with future versions of CAM that will provide this functionality.
- */
-#define MPS_SET_LUN(lun, ccblun) \
- mpssas_set_lun(lun, ccblun)
+static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
+static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
+static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
+static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
+static void mpssas_scanner_thread(void *arg);
+#if __FreeBSD_version >= 1000006
+static void mpssas_async(void *callback_arg, uint32_t code,
+ struct cam_path *path, void *arg);
+#else
+static void mpssas_check_eedp(struct mpssas_softc *sassc);
+static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
+#endif
+static int mpssas_send_portenable(struct mps_softc *sc);
+static void mpssas_portenable_complete(struct mps_softc *sc,
+ struct mps_command *cm);
-static __inline int
-mpssas_set_lun(uint8_t *lun, u_int ccblun)
+static struct mpssas_target *
+mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
{
- uint64_t *newlun;
-
- newlun = (uint64_t *)lun;
- *newlun = 0;
- if (ccblun <= 0xff) {
- /* Peripheral device address method, LUN is 0 to 255 */
- lun[1] = ccblun;
- } else if (ccblun <= 0x3fff) {
- /* Flat space address method, LUN is <= 16383 */
- scsi_ulto2b(ccblun, lun);
- lun[0] |= 0x40;
- } else if (ccblun <= 0xffffff) {
- /* Extended flat space address method, LUN is <= 16777215 */
- scsi_ulto3b(ccblun, &lun[1]);
- /* Extended Flat space address method */
- lun[0] = 0xc0;
- /* Length = 1, i.e. LUN is 3 bytes long */
- lun[0] |= 0x10;
- /* Extended Address Method */
- lun[0] |= 0x02;
- } else {
- return (EINVAL);
+ struct mpssas_target *target;
+ int i;
+
+ for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
+ target = &sassc->targets[i];
+ if (target->handle == handle)
+ return (target);
}
- return (0);
+ return (NULL);
}
-static struct mpssas_target *
-mpssas_alloc_target(struct mpssas_softc *sassc, struct mpssas_target *probe)
+/* we need to freeze the simq during attach and diag reset, to avoid failing
+ * commands before device handles have been found by discovery. Since
+ * discovery involves reading config pages and possibly sending commands,
+ * discovery actions may continue even after we receive the end of discovery
+ * event, so refcount discovery actions instead of assuming we can unfreeze
+ * the simq when we get the event.
+ */
+void
+mpssas_startup_increment(struct mpssas_softc *sassc)
{
- struct mpssas_target *target;
- int start;
-
- mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
-
- /*
- * If it's not a sata or sas target, CAM won't be able to see it. Put
- * it into a high-numbered slot so that it's accessible but not
- * interrupting the target numbering sequence of real drives.
- */
- if ((probe->devinfo & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
- MPI2_SAS_DEVICE_INFO_STP_TARGET | MPI2_SAS_DEVICE_INFO_SATA_DEVICE))
- == 0) {
- start = 200;
- } else {
- /*
- * Use the enclosure number and slot number as a hint for target
- * numbering. If that doesn't produce a sane result, search the
- * entire space.
- */
-#if 0
- start = probe->encl_handle * 16 + probe->encl_slot;
-#else
- start = probe->encl_slot;
-#endif
- if (start >= sassc->sc->facts->MaxTargets)
- start = 0;
+ if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
+ if (sassc->startup_refcount++ == 0) {
+ /* just starting, freeze the simq */
+ mps_dprint(sassc->sc, MPS_INFO,
+ "%s freezing simq\n", __func__);
+ xpt_freeze_simq(sassc->sim, 1);
+ }
+ mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
+ sassc->startup_refcount);
}
+}
- target = mpssas_find_target(sassc, start, 0);
+void
+mpssas_startup_decrement(struct mpssas_softc *sassc)
+{
+ if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
+ if (--sassc->startup_refcount == 0) {
+ /* finished all discovery-related actions, release
+ * the simq and rescan for the latest topology.
+ */
+ mps_dprint(sassc->sc, MPS_INFO,
+ "%s releasing simq\n", __func__);
+ sassc->flags &= ~MPSSAS_IN_STARTUP;
+ xpt_release_simq(sassc->sim, 1);
+ mpssas_rescan_target(sassc->sc, NULL);
+ }
+ mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
+ sassc->startup_refcount);
+ }
+}
- /*
- * Nothing found on the first pass, try a second pass that searches the
- * entire space.
- */
- if (target == NULL)
- target = mpssas_find_target(sassc, 0, 0);
+/* LSI's firmware requires us to stop sending commands when we're doing task
+ * management, so refcount the TMs and keep the simq frozen when any are in
+ * use.
+ */
+struct mps_command *
+mpssas_alloc_tm(struct mps_softc *sc)
+{
+ struct mps_command *tm;
- return (target);
+ tm = mps_alloc_high_priority_command(sc);
+ if (tm != NULL) {
+ if (sc->sassc->tm_count++ == 0) {
+ mps_printf(sc, "%s freezing simq\n", __func__);
+ xpt_freeze_simq(sc->sassc->sim, 1);
+ }
+ mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
+ sc->sassc->tm_count);
+ }
+ return tm;
}
-static struct mpssas_target *
-mpssas_find_target(struct mpssas_softc *sassc, int start, uint16_t handle)
+void
+mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
{
- struct mpssas_target *target;
- int i;
+ if (tm == NULL)
+ return;
- for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
- target = &sassc->targets[i];
- if (target->handle == handle)
- return (target);
+ /* if there are no TMs in use, we can release the simq. We use our
+ * own refcount so that it's easier for a diag reset to cleanup and
+ * release the simq.
+ */
+ if (--sc->sassc->tm_count == 0) {
+ mps_printf(sc, "%s releasing simq\n", __func__);
+ xpt_release_simq(sc->sassc->sim, 1);
}
+ mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
+ sc->sassc->tm_count);
- return (NULL);
+ mps_free_high_priority_command(sc, tm);
}
-/*
- * Start the probe sequence for a given device handle. This will not
- * block.
- */
-static void
-mpssas_probe_device(struct mps_softc *sc, uint16_t handle)
+
+void
+mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
{
- struct mpssas_devprobe *probe;
- struct mps_config_params *params;
- MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
- int error;
+ struct mpssas_softc *sassc = sc->sassc;
+ path_id_t pathid;
+ target_id_t targetid;
+ union ccb *ccb;
- mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+ pathid = cam_sim_path(sassc->sim);
+ if (targ == NULL)
+ targetid = CAM_TARGET_WILDCARD;
+ else
+ targetid = targ - sassc->targets;
- probe = malloc(sizeof(*probe), M_MPSSAS, M_NOWAIT | M_ZERO);
- if (probe == NULL) {
- mps_dprint(sc, MPS_FAULT, "Out of memory starting probe\n");
+ /*
+ * Allocate a CCB and schedule a rescan.
+ */
+ ccb = xpt_alloc_ccb_nowait();
+ if (ccb == NULL) {
+ mps_dprint(sc, MPS_FAULT, "unable to alloc CCB for rescan\n");
return;
}
- params = &probe->params;
- hdr = &params->hdr.Ext;
-
- params->action = MPI2_CONFIG_ACTION_PAGE_HEADER;
- params->page_address = MPI2_SAS_DEVICE_PGAD_FORM_HANDLE | handle;
- hdr->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
- hdr->ExtPageLength = 0;
- hdr->PageNumber = 0;
- hdr->PageVersion = 0;
- params->buffer = NULL;
- params->length = 0;
- params->callback = mpssas_probe_device_complete;
- params->cbdata = probe;
- probe->target.handle = handle;
- probe->state = MPSSAS_PROBE_DEV1;
-
- if ((error = mps_read_config_page(sc, params)) != 0) {
- free(probe, M_MPSSAS);
- mps_dprint(sc, MPS_FAULT, "Failure starting device probe\n");
+
+ if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
+ targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
+ xpt_free_ccb(ccb);
return;
}
+
+ /* XXX Hardwired to scan the bus for now */
+ ccb->ccb_h.func_code = XPT_SCAN_BUS;
+ mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
+ mpssas_rescan(sassc, ccb);
}
static void
-mpssas_probe_device_complete(struct mps_softc *sc,
- struct mps_config_params *params)
+mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
{
- MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
- struct mpssas_devprobe *probe;
- int error;
+ struct sbuf sb;
+ va_list ap;
+ char str[192];
+ char path_str[64];
- mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+ if (cm == NULL)
+ return;
- hdr = &params->hdr.Ext;
- probe = params->cbdata;
-
- switch (probe->state) {
- case MPSSAS_PROBE_DEV1:
- case MPSSAS_PROBE_PHY:
- case MPSSAS_PROBE_EXP:
- if (params->status != MPI2_IOCSTATUS_SUCCESS) {
- mps_dprint(sc, MPS_FAULT,
- "Probe Failure 0x%x state %d\n", params->status,
- probe->state);
- free(probe, M_MPSSAS);
- return;
- }
- params->action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
- params->length = hdr->ExtPageLength * 4;
- params->buffer = malloc(params->length, M_MPSSAS,
- M_ZERO|M_NOWAIT);
- if (params->buffer == NULL) {
- mps_dprint(sc, MPS_FAULT, "Out of memory at state "
- "0x%x, size 0x%x\n", probe->state, params->length);
- free(probe, M_MPSSAS);
- return;
- }
- if (probe->state == MPSSAS_PROBE_DEV1)
- probe->state = MPSSAS_PROBE_DEV2;
- else if (probe->state == MPSSAS_PROBE_PHY)
- probe->state = MPSSAS_PROBE_PHY2;
- else if (probe->state == MPSSAS_PROBE_EXP)
- probe->state = MPSSAS_PROBE_EXP2;
- error = mps_read_config_page(sc, params);
- break;
- case MPSSAS_PROBE_DEV2:
- {
- MPI2_CONFIG_PAGE_SAS_DEV_0 *buf;
-
- if (params->status != MPI2_IOCSTATUS_SUCCESS) {
- mps_dprint(sc, MPS_FAULT,
- "Probe Failure 0x%x state %d\n", params->status,
- probe->state);
- free(params->buffer, M_MPSSAS);
- free(probe, M_MPSSAS);
- return;
- }
- buf = params->buffer;
- mps_print_sasdev0(sc, buf);
-
- probe->target.devname = mps_to_u64(&buf->DeviceName);
- probe->target.devinfo = buf->DeviceInfo;
- probe->target.encl_handle = buf->EnclosureHandle;
- probe->target.encl_slot = buf->Slot;
- probe->target.sasaddr = mps_to_u64(&buf->SASAddress);
- probe->target.parent_handle = buf->ParentDevHandle;
-
- if (buf->DeviceInfo & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
- params->page_address =
- MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | buf->PhyNum;
- hdr->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY;
- hdr->PageNumber = 0;
- probe->state = MPSSAS_PROBE_PHY;
- } else {
- params->page_address =
- MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM |
- buf->ParentDevHandle | (buf->PhyNum << 16);
- hdr->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
- hdr->PageNumber = 1;
- probe->state = MPSSAS_PROBE_EXP;
+ sbuf_new(&sb, str, sizeof(str), 0);
+
+ va_start(ap, fmt);
+
+ if (cm->cm_ccb != NULL) {
+ xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
+ sizeof(path_str));
+ sbuf_cat(&sb, path_str);
+ if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
+ scsi_command_string(&cm->cm_ccb->csio, &sb);
+ sbuf_printf(&sb, "length %d ",
+ cm->cm_ccb->csio.dxfer_len);
}
- params->action = MPI2_CONFIG_ACTION_PAGE_HEADER;
- hdr->ExtPageLength = 0;
- hdr->PageVersion = 0;
- params->buffer = NULL;
- params->length = 0;
- free(buf, M_MPSSAS);
- error = mps_read_config_page(sc, params);
- break;
}
- case MPSSAS_PROBE_PHY2:
- case MPSSAS_PROBE_EXP2:
- {
- MPI2_CONFIG_PAGE_SAS_PHY_0 *phy;
- MPI2_CONFIG_PAGE_EXPANDER_1 *exp;
- struct mpssas_softc *sassc;
- struct mpssas_target *targ;
- char devstring[80];
- uint16_t handle;
-
- if (params->status != MPI2_IOCSTATUS_SUCCESS) {
- mps_dprint(sc, MPS_FAULT,
- "Probe Failure 0x%x state %d\n", params->status,
- probe->state);
- free(params->buffer, M_MPSSAS);
- free(probe, M_MPSSAS);
- return;
- }
+ else {
+ sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
+ cam_sim_name(cm->cm_sc->sassc->sim),
+ cam_sim_unit(cm->cm_sc->sassc->sim),
+ cam_sim_bus(cm->cm_sc->sassc->sim),
+ cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
+ cm->cm_lun);
+ }
- if (probe->state == MPSSAS_PROBE_PHY2) {
- phy = params->buffer;
- mps_print_sasphy0(sc, phy);
- probe->target.linkrate = phy->NegotiatedLinkRate & 0xf;
- } else {
- exp = params->buffer;
- mps_print_expander1(sc, exp);
- probe->target.linkrate = exp->NegotiatedLinkRate & 0xf;
- }
- free(params->buffer, M_MPSSAS);
+ sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
+ sbuf_vprintf(&sb, fmt, ap);
+ sbuf_finish(&sb);
+ printf("%s", sbuf_data(&sb));
- sassc = sc->sassc;
- handle = probe->target.handle;
- if ((targ = mpssas_find_target(sassc, 0, handle)) != NULL) {
- mps_printf(sc, "Ignoring dup device handle 0x%04x\n",
- handle);
- free(probe, M_MPSSAS);
- return;
- }
- if ((targ = mpssas_alloc_target(sassc, &probe->target)) == NULL) {
- mps_printf(sc, "Target table overflow, handle 0x%04x\n",
- handle);
- free(probe, M_MPSSAS);
- return;
- }
+ va_end(ap);
+}
- *targ = probe->target; /* Copy the attributes */
- targ->tid = targ - sassc->targets;
- mps_describe_devinfo(targ->devinfo, devstring, 80);
- if (bootverbose)
- mps_printf(sc, "Found device <%s> <%s> <0x%04x> "
- "<%d/%d>\n", devstring,
- mps_describe_table(mps_linkrate_names,
- targ->linkrate), targ->handle, targ->encl_handle,
- targ->encl_slot);
-
- free(probe, M_MPSSAS);
- mpssas_announce_device(sassc, targ);
- break;
- }
- default:
- printf("what?\n");
+static void
+mpssas_lost_target(struct mps_softc *sc, struct mpssas_target *targ)
+{
+ struct mpssas_softc *sassc = sc->sassc;
+ path_id_t pathid = cam_sim_path(sassc->sim);
+ struct cam_path *path;
+
+ mps_printf(sc, "%s targetid %u\n", __func__, targ->tid);
+ if (xpt_create_path(&path, NULL, pathid, targ->tid, 0) != CAM_REQ_CMP) {
+ mps_printf(sc, "unable to create path for lost target %d\n",
+ targ->tid);
+ return;
}
+
+ xpt_async(AC_LOST_DEVICE, path, NULL);
+ xpt_free_path(path);
}
/*
- * The MPT2 firmware performs debounce on the link to avoid transient link errors
- * and false removals. When it does decide that link has been lost and a device
- * need to go away, it expects that the host will perform a target reset and then
- * an op remove. The reset has the side-effect of aborting any outstanding
- * requests for the device, which is required for the op-remove to succeed. It's
- * not clear if the host should check for the device coming back alive after the
- * reset.
+ * The MPT2 firmware performs debounce on the link to avoid transient link
+ * errors and false removals. When it does decide that link has been lost
+ * and a device need to go away, it expects that the host will perform a
+ * target reset and then an op remove. The reset has the side-effect of
+ * aborting any outstanding requests for the device, which is required for
+ * the op-remove to succeed. It's not clear if the host should check for
+ * the device coming back alive after the reset.
*/
-static void
-mpssas_prepare_remove(struct mpssas_softc *sassc, MPI2_EVENT_SAS_TOPO_PHY_ENTRY *phy)
+void
+mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
{
MPI2_SCSI_TASK_MANAGE_REQUEST *req;
struct mps_softc *sc;
struct mps_command *cm;
struct mpssas_target *targ = NULL;
- uint16_t handle;
mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
- handle = phy->AttachedDevHandle;
- targ = mpssas_find_target(sassc, 0, handle);
- if (targ == NULL)
+ /*
+ * If this is a WD controller, determine if the disk should be exposed
+ * to the OS or not. If disk should be exposed, return from this
+ * function without doing anything.
+ */
+ sc = sassc->sc;
+ if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
+ MPS_WD_EXPOSE_ALWAYS)) {
+ return;
+ }
+
+ targ = mpssas_find_target_by_handle(sassc, 0, handle);
+ if (targ == NULL) {
+ /* FIXME: what is the action? */
/* We don't know about this device? */
+ printf("%s: invalid handle 0x%x \n", __func__, handle);
return;
+ }
- sc = sassc->sc;
- cm = mps_alloc_command(sc);
+ targ->flags |= MPSSAS_TARGET_INREMOVAL;
+
+ cm = mpssas_alloc_tm(sc);
if (cm == NULL) {
- mps_printf(sc, "comand alloc failure in mpssas_prepare_remove\n");
+ mps_printf(sc, "%s: command alloc failure\n", __func__);
return;
}
- mps_dprint(sc, MPS_INFO, "Preparing to remove target %d\n", targ->tid);
+ mpssas_lost_target(sc, targ);
req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
memset(req, 0, sizeof(*req));
@@ -497,15 +426,16 @@ mpssas_prepare_remove(struct mpssas_softc *sassc, MPI2_EVENT_SAS_TOPO_PHY_ENTRY
/* SAS Hard Link Reset / SATA Link Reset */
req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
+ cm->cm_targ = targ;
cm->cm_data = NULL;
- cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
cm->cm_complete = mpssas_remove_device;
- cm->cm_targ = targ;
- mpssas_issue_tm_request(sc, cm);
+ cm->cm_complete_data = (void *)(uintptr_t)handle;
+ mps_map_command(sc, cm);
}
static void
-mpssas_remove_device(struct mps_softc *sc, struct mps_command *cm)
+mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
{
MPI2_SCSI_TASK_MANAGE_REPLY *reply;
MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
@@ -515,150 +445,122 @@ mpssas_remove_device(struct mps_softc *sc, struct mps_command *cm)
mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
- reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)cm->cm_reply;
- handle = cm->cm_targ->handle;
-
- mpssas_complete_tm_request(sc, cm, /*free_cm*/ 0);
+ reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
+ handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
+ targ = tm->cm_targ;
/*
* Currently there should be no way we can hit this case. It only
* happens when we have a failure to allocate chain frames, and
* task management commands don't have S/G lists.
*/
- if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
+ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
- "This should not happen!\n", __func__, cm->cm_flags,
+ "This should not happen!\n", __func__, tm->cm_flags,
handle);
+ mpssas_free_tm(sc, tm);
+ return;
+ }
+
+ if (reply == NULL) {
+ /* XXX retry the remove after the diag reset completes? */
+ mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
+ __func__, handle);
+ mpssas_free_tm(sc, tm);
return;
}
if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
- mps_printf(sc, "Failure 0x%x reseting device 0x%04x\n",
+ mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
reply->IOCStatus, handle);
- mps_free_command(sc, cm);
+ mpssas_free_tm(sc, tm);
return;
}
mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
reply->TerminationCount);
- mps_free_reply(sc, cm->cm_reply_data);
+ mps_free_reply(sc, tm->cm_reply_data);
+ tm->cm_reply = NULL; /* Ensures the the reply won't get re-freed */
/* Reuse the existing command */
- req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)cm->cm_req;
+ req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
memset(req, 0, sizeof(*req));
req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
req->DevHandle = handle;
- cm->cm_data = NULL;
- cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
- cm->cm_flags &= ~MPS_CM_FLAGS_COMPLETE;
- cm->cm_complete = mpssas_remove_complete;
+ tm->cm_data = NULL;
+ tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ tm->cm_complete = mpssas_remove_complete;
+ tm->cm_complete_data = (void *)(uintptr_t)handle;
- mps_map_command(sc, cm);
+ mps_map_command(sc, tm);
- mps_dprint(sc, MPS_INFO, "clearing target handle 0x%04x\n", handle);
- TAILQ_FOREACH_SAFE(cm, &sc->io_list, cm_link, next_cm) {
+ mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
+ targ->tid, handle);
+ TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
union ccb *ccb;
- if (cm->cm_targ->handle != handle)
- continue;
-
- mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", cm);
- ccb = cm->cm_complete_data;
+ mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
+ ccb = tm->cm_complete_data;
ccb->ccb_h.status = CAM_DEV_NOT_THERE;
- mpssas_scsiio_complete(sc, cm);
- }
- targ = mpssas_find_target(sc->sassc, 0, handle);
- if (targ != NULL) {
- targ->handle = 0x0;
- mpssas_announce_device(sc->sassc, targ);
+ mpssas_scsiio_complete(sc, tm);
}
}
static void
-mpssas_remove_complete(struct mps_softc *sc, struct mps_command *cm)
+mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
{
MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
+ uint16_t handle;
+ struct mpssas_target *targ;
mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
- reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)cm->cm_reply;
-
- mps_printf(sc, "mpssas_remove_complete on target 0x%04x,"
- " IOCStatus= 0x%x\n", cm->cm_targ->tid, reply->IOCStatus);
-
- mps_free_command(sc, cm);
-}
-
-static void
-mpssas_evt_handler(struct mps_softc *sc, uintptr_t data,
- MPI2_EVENT_NOTIFICATION_REPLY *event)
-{
- struct mpssas_softc *sassc;
-
- mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
-
- sassc = sc->sassc;
- mps_print_evt_sas(sc, event);
-
- switch (event->Event) {
- case MPI2_EVENT_SAS_DISCOVERY:
- {
- MPI2_EVENT_DATA_SAS_DISCOVERY *data;
+ reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
+ handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
- data = (MPI2_EVENT_DATA_SAS_DISCOVERY *)&event->EventData;
+ /*
+ * Currently there should be no way we can hit this case. It only
+ * happens when we have a failure to allocate chain frames, and
+ * task management commands don't have S/G lists.
+ */
+ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
+ mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
+ "This should not happen!\n", __func__, tm->cm_flags,
+ handle);
+ mpssas_free_tm(sc, tm);
+ return;
+ }
- if (data->ReasonCode & MPI2_EVENT_SAS_DISC_RC_STARTED)
- mps_dprint(sc, MPS_TRACE,"SAS discovery start event\n");
- if (data->ReasonCode & MPI2_EVENT_SAS_DISC_RC_COMPLETED) {
- mps_dprint(sc, MPS_TRACE, "SAS discovery end event\n");
- sassc->flags &= ~MPSSAS_IN_DISCOVERY;
- mpssas_discovery_end(sassc);
- }
- break;
+ if (reply == NULL) {
+ /* most likely a chip reset */
+ mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
+ __func__, handle);
+ mpssas_free_tm(sc, tm);
+ return;
}
- case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
- {
- MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST *data;
- MPI2_EVENT_SAS_TOPO_PHY_ENTRY *phy;
- int i;
-
- data = (MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST *)
- &event->EventData;
-
- if (data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) {
- if (bootverbose)
- printf("Expander found at enclosure %d\n",
- data->EnclosureHandle);
- mpssas_probe_device(sc, data->ExpanderDevHandle);
- }
- for (i = 0; i < data->NumEntries; i++) {
- phy = &data->PHY[i];
- switch (phy->PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK) {
- case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
- mpssas_probe_device(sc, phy->AttachedDevHandle);
- break;
- case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
- mpssas_prepare_remove(sassc, phy);
- break;
- case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
- case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
- case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
- default:
- break;
- }
- }
+ mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
+ handle, reply->IOCStatus);
- break;
- }
- case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
- break;
- default:
- break;
+ /*
+ * Don't clear target if remove fails because things will get confusing.
+ * Leave the devname and sasaddr intact so that we know to avoid reusing
+ * this target id if possible, and so we can assign the same target id
+ * to this device if it comes back in the future.
+ */
+ if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
+ targ = tm->cm_targ;
+ targ->handle = 0x0;
+ targ->encl_handle = 0x0;
+ targ->encl_slot = 0x0;
+ targ->exp_dev_handle = 0x0;
+ targ->phy_num = 0x0;
+ targ->linkrate = 0x0;
+ targ->devinfo = 0x0;
}
- mps_free_reply(sc, data);
+ mpssas_free_tm(sc, tm);
}
static int
@@ -674,6 +576,11 @@ mpssas_register_events(struct mps_softc *sc)
setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
+ setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
+ setbit(events, MPI2_EVENT_IR_VOLUME);
+ setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
+ setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
+ setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
mps_register_events(sc, events, mpssas_evt_handler, NULL,
&sc->sassc->mpssas_eh);
@@ -685,8 +592,10 @@ int
mps_attach_sas(struct mps_softc *sc)
{
struct mpssas_softc *sassc;
- int error = 0;
- int num_sim_reqs;
+#if __FreeBSD_version >= 1000006
+ cam_status status;
+#endif
+ int unit, error = 0;
mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
@@ -696,42 +605,48 @@ mps_attach_sas(struct mps_softc *sc)
sc->sassc = sassc;
sassc->sc = sc;
- /*
- * Tell CAM that we can handle 5 fewer requests than we have
- * allocated. If we allow the full number of requests, all I/O
- * will halt when we run out of resources. Things work fine with
- * just 1 less request slot given to CAM than we have allocated.
- * We also need a couple of extra commands so that we can send down
- * abort, reset, etc. requests when commands time out. Otherwise
- * we could wind up in a situation with sc->num_reqs requests down
- * on the card and no way to send an abort.
- *
- * XXX KDM need to figure out why I/O locks up if all commands are
- * used.
- */
- num_sim_reqs = sc->num_reqs - 5;
-
- if ((sassc->devq = cam_simq_alloc(num_sim_reqs)) == NULL) {
+ if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
error = ENOMEM;
goto out;
}
+ unit = device_get_unit(sc->mps_dev);
sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
- device_get_unit(sc->mps_dev), &sc->mps_mtx, num_sim_reqs,
- num_sim_reqs, sassc->devq);
+ unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
if (sassc->sim == NULL) {
mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
error = EINVAL;
goto out;
}
+ TAILQ_INIT(&sassc->ev_queue);
+
+ /* Initialize taskqueue for Event Handling */
+ TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
+ sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
+ taskqueue_thread_enqueue, &sassc->ev_tq);
+
+ /* Run the task queue with lowest priority */
+ taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
+ device_get_nameunit(sc->mps_dev));
+
+ TAILQ_INIT(&sassc->ccb_scanq);
+ error = mps_kproc_create(mpssas_scanner_thread, sassc,
+ &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
+ if (error) {
+ mps_printf(sc, "Error %d starting rescan thread\n", error);
+ goto out;
+ }
+
+ mps_lock(sc);
+ sassc->flags |= MPSSAS_SCANTHREAD;
+
/*
* XXX There should be a bus for every port on the adapter, but since
* we're just going to fake the topology for now, we'll pretend that
* everything is just a target on a single bus.
*/
- mps_lock(sc);
if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
error);
@@ -744,14 +659,25 @@ mps_attach_sas(struct mps_softc *sc)
* the simq will prevent the CAM boottime scanner from running
* before discovery is complete.
*/
- sassc->flags = MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
+ sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
xpt_freeze_simq(sassc->sim, 1);
-
- mps_unlock(sc);
+ sc->sassc->startup_refcount = 0;
callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
sassc->discovery_timeouts = 0;
+ sassc->tm_count = 0;
+
+#if __FreeBSD_version >= 1000006
+ status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
+ if (status != CAM_REQ_CMP) {
+ mps_printf(sc, "Error %#x registering async handler for "
+ "AC_ADVINFO_CHANGED events\n", status);
+ }
+#endif
+
+ mps_unlock(sc);
+
mpssas_register_events(sc);
out:
if (error)
@@ -770,22 +696,41 @@ mps_detach_sas(struct mps_softc *sc)
return (0);
sassc = sc->sassc;
+ mps_deregister_events(sc, sassc->mpssas_eh);
+
+ /*
+ * Drain and free the event handling taskqueue with the lock
+ * unheld so that any parallel processing tasks drain properly
+ * without deadlocking.
+ */
+ if (sassc->ev_tq != NULL)
+ taskqueue_free(sassc->ev_tq);
/* Make sure CAM doesn't wedge if we had to bail out early. */
mps_lock(sc);
- if (sassc->flags & MPSSAS_IN_STARTUP)
- xpt_release_simq(sassc->sim, 1);
- mps_unlock(sc);
- if (sassc->mpssas_eh != NULL)
- mps_deregister_events(sc, sassc->mpssas_eh);
+ /* Deregister our async handler */
+#if __FreeBSD_version >= 1000006
+ xpt_register_async(0, mpssas_async, sc, NULL);
+#endif
- mps_lock(sc);
+ if (sassc->flags & MPSSAS_IN_STARTUP)
+ xpt_release_simq(sassc->sim, 1);
if (sassc->sim != NULL) {
xpt_bus_deregister(cam_sim_path(sassc->sim));
cam_sim_free(sassc->sim, FALSE);
}
+
+ if (sassc->flags & MPSSAS_SCANTHREAD) {
+ sassc->flags |= MPSSAS_SHUTDOWN;
+ wakeup(&sassc->ccb_scanq);
+
+ if (sassc->flags & MPSSAS_SCANTHREAD) {
+ msleep(&sassc->flags, &sc->mps_mtx, PRIBIO,
+ "mps_shutdown", 30 * hz);
+ }
+ }
mps_unlock(sc);
if (sassc->devq != NULL)
@@ -798,7 +743,7 @@ mps_detach_sas(struct mps_softc *sc)
return (0);
}
-static void
+void
mpssas_discovery_end(struct mpssas_softc *sassc)
{
struct mps_softc *sc = sassc->sc;
@@ -808,59 +753,28 @@ mpssas_discovery_end(struct mpssas_softc *sassc)
if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
callout_stop(&sassc->discovery_callout);
- if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
- mps_dprint(sc, MPS_INFO,
- "mpssas_discovery_end: removing confighook\n");
- sassc->flags &= ~MPSSAS_IN_STARTUP;
- xpt_release_simq(sassc->sim, 1);
- }
-#if 0
- mpssas_announce_device(sassc, NULL);
-#endif
-
}
static void
-mpssas_announce_device(struct mpssas_softc *sassc, struct mpssas_target *targ)
+mpssas_discovery_timeout(void *data)
{
- union ccb *ccb;
- int bus, tid, lun;
+ struct mpssas_softc *sassc = data;
+ struct mps_softc *sc;
- /*
- * Force a rescan, a hackish way to announce devices.
- * XXX Doing a scan on an individual device is hackish in that it
- * won't scan the LUNs.
- * XXX Does it matter if any of this fails?
- */
- bus = cam_sim_path(sassc->sim);
- if (targ != NULL) {
- tid = targ->tid;
- lun = 0;
- } else {
- tid = CAM_TARGET_WILDCARD;
- lun = CAM_LUN_WILDCARD;
- }
- ccb = xpt_alloc_ccb_nowait();
- if (ccb == NULL)
- return;
- if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, bus, tid,
- CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
- xpt_free_ccb(ccb);
- return;
- }
- mps_dprint(sassc->sc, MPS_INFO, "Triggering rescan of %d:%d:-1\n",
- bus, tid);
- xpt_rescan(ccb);
-}
+ sc = sassc->sc;
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
-static void
-mpssas_startup(void *data)
-{
- struct mpssas_softc *sassc = data;
+ mps_lock(sc);
+ mps_printf(sc,
+ "Timeout waiting for discovery, interrupts may not be working!\n");
+ sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
- mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
+ /* Poll the hardware for events in case interrupts aren't working */
+ mps_intr_locked(sc);
+
+ mps_printf(sassc->sc,
+ "Finished polling after discovery timeout at %d\n", ticks);
- mps_lock(sassc->sc);
if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
mpssas_discovery_end(sassc);
} else {
@@ -877,31 +791,8 @@ mpssas_startup(void *data)
mpssas_discovery_end(sassc);
}
}
- mps_unlock(sassc->sc);
-
- return;
-}
-
-static void
-mpssas_discovery_timeout(void *data)
-{
- struct mpssas_softc *sassc = data;
- struct mps_softc *sc;
-
- sc = sassc->sc;
- mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
-
- mps_lock(sc);
- mps_printf(sc,
- "Timeout waiting for discovery, interrupts may not be working!\n");
- sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
- /* Poll the hardware for events in case interrupts aren't working */
- mps_intr_locked(sc);
mps_unlock(sc);
-
- /* Check the status of discovery and re-arm the timeout if needed */
- mpssas_startup(sassc);
}
static void
@@ -911,8 +802,9 @@ mpssas_action(struct cam_sim *sim, union ccb *ccb)
sassc = cam_sim_softc(sim);
- mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
+ mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
ccb->ccb_h.func_code);
+ mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
switch (ccb->ccb_h.func_code) {
case XPT_PATH_INQ:
@@ -925,7 +817,7 @@ mpssas_action(struct cam_sim *sim, union ccb *ccb)
cpi->hba_misc = PIM_NOBUSRESET;
cpi->hba_eng_cnt = 0;
cpi->max_target = sassc->sc->facts->MaxTargets - 1;
- cpi->max_lun = 8;
+ cpi->max_lun = 0;
cpi->initiator_id = 255;
strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
@@ -937,7 +829,12 @@ mpssas_action(struct cam_sim *sim, union ccb *ccb)
cpi->transport_version = 0;
cpi->protocol = PROTO_SCSI;
cpi->protocol_version = SCSI_REV_SPC;
- cpi->maxio = MAXPHYS;
+#if __FreeBSD_version >= 800001
+ /*
+ * XXX KDM where does this number come from?
+ */
+ cpi->maxio = 256 * 1024;
+#endif
cpi->ccb_h.status = CAM_REQ_CMP;
break;
}
@@ -989,11 +886,14 @@ mpssas_action(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_RESET_DEV:
+ mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
mpssas_action_resetdev(sassc, ccb);
return;
case XPT_RESET_BUS:
case XPT_ABORT:
case XPT_TERM_IO:
+ mps_printf(sassc->sc, "mpssas_action faking success for "
+ "abort or reset\n");
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_SCSI_IO:
@@ -1003,7 +903,7 @@ mpssas_action(struct cam_sim *sim, union ccb *ccb)
case XPT_SMP_IO:
mpssas_action_smpio(sassc, ccb);
return;
-#endif /* __FreeBSD_version >= 900026 */
+#endif
default:
ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
break;
@@ -1012,349 +912,554 @@ mpssas_action(struct cam_sim *sim, union ccb *ccb)
}
-#if 0
static void
-mpssas_resettimeout_complete(struct mps_softc *sc, struct mps_command *cm)
+mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
+ target_id_t target_id, lun_id_t lun_id)
{
- MPI2_SCSI_TASK_MANAGE_REPLY *resp;
- uint16_t code;
-
- mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
-
- resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)cm->cm_reply;
- code = resp->ResponseCode;
+ path_id_t path_id = cam_sim_path(sc->sassc->sim);
+ struct cam_path *path;
- mps_free_command(sc, cm);
- mpssas_unfreeze_device(sassc, targ);
+ mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
+ ac_code, target_id, lun_id);
- if (code != MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
- mps_reset_controller(sc);
+ if (xpt_create_path(&path, NULL,
+ path_id, target_id, lun_id) != CAM_REQ_CMP) {
+ mps_printf(sc, "unable to create path for reset "
+ "notification\n");
+ return;
}
- return;
+ xpt_async(ac_code, path, NULL);
+ xpt_free_path(path);
}
-#endif
-static void
-mpssas_scsiio_timeout(void *data)
+static void
+mpssas_complete_all_commands(struct mps_softc *sc)
{
- union ccb *ccb;
- struct mps_softc *sc;
struct mps_command *cm;
- struct mpssas_target *targ;
-#if 0
- char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
-#endif
+ int i;
+ int completed;
- cm = (struct mps_command *)data;
- sc = cm->cm_sc;
+ mps_printf(sc, "%s\n", __func__);
+ mtx_assert(&sc->mps_mtx, MA_OWNED);
- /*
- * Run the interrupt handler to make sure it's not pending. This
- * isn't perfect because the command could have already completed
- * and been re-used, though this is unlikely.
- */
- mps_lock(sc);
- mps_intr_locked(sc);
- if (cm->cm_state == MPS_CM_STATE_FREE) {
- mps_unlock(sc);
- return;
+ /* complete all commands with a NULL reply */
+ for (i = 1; i < sc->num_reqs; i++) {
+ cm = &sc->commands[i];
+ cm->cm_reply = NULL;
+ completed = 0;
+
+ if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
+ cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
+
+ if (cm->cm_complete != NULL) {
+ mpssas_log_command(cm,
+ "completing cm %p state %x ccb %p for diag reset\n",
+ cm, cm->cm_state, cm->cm_ccb);
+
+ cm->cm_complete(sc, cm);
+ completed = 1;
+ }
+
+ if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
+ mpssas_log_command(cm,
+ "waking up cm %p state %x ccb %p for diag reset\n",
+ cm, cm->cm_state, cm->cm_ccb);
+ wakeup(cm);
+ completed = 1;
+ }
+
+ if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
+ /* this should never happen, but if it does, log */
+ mpssas_log_command(cm,
+ "cm %p state %x flags 0x%x ccb %p during diag "
+ "reset\n", cm, cm->cm_state, cm->cm_flags,
+ cm->cm_ccb);
+ }
}
+}
- ccb = cm->cm_complete_data;
- targ = cm->cm_targ;
- if (targ == 0x00)
- /* Driver bug */
- targ = &sc->sassc->targets[ccb->ccb_h.target_id];
+void
+mpssas_handle_reinit(struct mps_softc *sc)
+{
+ int i;
- xpt_print(ccb->ccb_h.path, "SCSI command timeout on device handle "
- "0x%04x SMID %d\n", targ->handle, cm->cm_desc.Default.SMID);
- /*
- * XXX KDM this is useful for debugging purposes, but the existing
- * scsi_op_desc() implementation can't handle a NULL value for
- * inq_data. So this will remain commented out until I bring in
- * those changes as well.
+ /* Go back into startup mode and freeze the simq, so that CAM
+ * doesn't send any commands until after we've rediscovered all
+ * targets and found the proper device handles for them.
+ *
+ * After the reset, portenable will trigger discovery, and after all
+ * discovery-related activities have finished, the simq will be
+ * released.
*/
-#if 0
- xpt_print(ccb->ccb_h.path, "Timed out command: %s. CDB %s\n",
- scsi_op_desc((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
- ccb->csio.cdb_io.cdb_ptr[0] :
- ccb->csio.cdb_io.cdb_bytes[0], NULL),
- scsi_cdb_string((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
- ccb->csio.cdb_io.cdb_ptr :
- ccb->csio.cdb_io.cdb_bytes, cdb_str,
- sizeof(cdb_str)));
-#endif
+ mps_printf(sc, "%s startup\n", __func__);
+ sc->sassc->flags |= MPSSAS_IN_STARTUP;
+ sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
+ xpt_freeze_simq(sc->sassc->sim, 1);
- /* Inform CAM about the timeout and that recovery is starting. */
-#if 0
- if ((targ->flags & MPSSAS_TARGET_INRECOVERY) == 0) {
- mpssas_freeze_device(sc->sassc, targ);
- ccb->ccb_h.status = CAM_CMD_TIMEOUT;
- xpt_done(ccb);
- }
-#endif
- mpssas_freeze_device(sc->sassc, targ);
- ccb->ccb_h.status = CAM_CMD_TIMEOUT;
+ /* notify CAM of a bus reset */
+ mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD);
+
+ /* complete and cleanup after all outstanding commands */
+ mpssas_complete_all_commands(sc);
+
+ mps_printf(sc, "%s startup %u tm %u after command completion\n",
+ __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
/*
- * recycle the command into recovery so that there's no risk of
- * command allocation failure.
+ * The simq was explicitly frozen above, so set the refcount to 0.
+ * The simq will be explicitly released after port enable completes.
*/
- cm->cm_state = MPS_CM_STATE_TIMEDOUT;
- mpssas_recovery(sc, cm);
- mps_unlock(sc);
+ sc->sassc->startup_refcount = 0;
+
+ /* zero all the target handles, since they may change after the
+ * reset, and we have to rediscover all the targets and use the new
+ * handles.
+ */
+ for (i = 0; i < sc->facts->MaxTargets; i++) {
+ if (sc->sassc->targets[i].outstanding != 0)
+ mps_printf(sc, "target %u outstanding %u\n",
+ i, sc->sassc->targets[i].outstanding);
+ sc->sassc->targets[i].handle = 0x0;
+ sc->sassc->targets[i].exp_dev_handle = 0x0;
+ sc->sassc->targets[i].outstanding = 0;
+ sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
+ }
+}
+static void
+mpssas_tm_timeout(void *data)
+{
+ struct mps_command *tm = data;
+ struct mps_softc *sc = tm->cm_sc;
+
+ mtx_assert(&sc->mps_mtx, MA_OWNED);
+
+ mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
+ mps_reinit(sc);
}
static void
-mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm)
+mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
{
+ MPI2_SCSI_TASK_MANAGE_REPLY *reply;
MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+ unsigned int cm_count = 0;
+ struct mps_command *cm;
+ struct mpssas_target *targ;
- req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
+ callout_stop(&tm->cm_callout);
+
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
+ reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
+ targ = tm->cm_targ;
/*
* Currently there should be no way we can hit this case. It only
* happens when we have a failure to allocate chain frames, and
* task management commands don't have S/G lists.
*/
- if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
- mps_printf(sc, "%s: cm_flags = %#x for abort on handle %#04x! "
- "This should not happen!\n", __func__, cm->cm_flags,
- req->DevHandle);
+ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
+ mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
+ "This should not happen!\n", __func__, tm->cm_flags);
+ mpssas_free_tm(sc, tm);
+ return;
}
- mps_printf(sc, "%s: abort request on handle %#04x SMID %d "
- "complete\n", __func__, req->DevHandle, req->TaskMID);
+ if (reply == NULL) {
+ mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
+ if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
+ /* this completion was due to a reset, just cleanup */
+ targ->flags &= ~MPSSAS_TARGET_INRESET;
+ targ->tm = NULL;
+ mpssas_free_tm(sc, tm);
+ }
+ else {
+ /* we should have gotten a reply. */
+ mps_reinit(sc);
+ }
+ return;
+ }
- mpssas_complete_tm_request(sc, cm, /*free_cm*/ 1);
+ mpssas_log_command(tm,
+ "logical unit reset status 0x%x code 0x%x count %u\n",
+ reply->IOCStatus, reply->ResponseCode,
+ reply->TerminationCount);
+
+ /* See if there are any outstanding commands for this LUN.
+ * This could be made more efficient by using a per-LU data
+ * structure of some sort.
+ */
+ TAILQ_FOREACH(cm, &targ->commands, cm_link) {
+ if (cm->cm_lun == tm->cm_lun)
+ cm_count++;
+ }
+
+ if (cm_count == 0) {
+ mpssas_log_command(tm,
+ "logical unit %u finished recovery after reset\n",
+ tm->cm_lun, tm);
+
+ mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
+ tm->cm_lun);
+
+ /* we've finished recovery for this logical unit. check and
+ * see if some other logical unit has a timedout command
+ * that needs to be processed.
+ */
+ cm = TAILQ_FIRST(&targ->timedout_commands);
+ if (cm) {
+ mpssas_send_abort(sc, tm, cm);
+ }
+ else {
+ targ->tm = NULL;
+ mpssas_free_tm(sc, tm);
+ }
+ }
+ else {
+ /* if we still have commands for this LUN, the reset
+ * effectively failed, regardless of the status reported.
+ * Escalate to a target reset.
+ */
+ mpssas_log_command(tm,
+ "logical unit reset complete for tm %p, but still have %u command(s)\n",
+ tm, cm_count);
+ mpssas_send_reset(sc, tm,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
+ }
}
static void
-mpssas_recovery(struct mps_softc *sc, struct mps_command *abort_cm)
+mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
{
- struct mps_command *cm;
- MPI2_SCSI_TASK_MANAGE_REQUEST *req, *orig_req;
+ MPI2_SCSI_TASK_MANAGE_REPLY *reply;
+ MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+ struct mpssas_target *targ;
- cm = mps_alloc_command(sc);
- if (cm == NULL) {
- mps_printf(sc, "%s: command allocation failure\n", __func__);
+ callout_stop(&tm->cm_callout);
+
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
+ reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
+ targ = tm->cm_targ;
+
+ /*
+ * Currently there should be no way we can hit this case. It only
+ * happens when we have a failure to allocate chain frames, and
+ * task management commands don't have S/G lists.
+ */
+ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
+ mps_printf(sc, "%s: cm_flags = %#x for target reset! "
+ "This should not happen!\n", __func__, tm->cm_flags);
+ mpssas_free_tm(sc, tm);
return;
}
- cm->cm_targ = abort_cm->cm_targ;
- cm->cm_complete = mpssas_abort_complete;
+ if (reply == NULL) {
+ mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
+ if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
+ /* this completion was due to a reset, just cleanup */
+ targ->flags &= ~MPSSAS_TARGET_INRESET;
+ targ->tm = NULL;
+ mpssas_free_tm(sc, tm);
+ }
+ else {
+ /* we should have gotten a reply. */
+ mps_reinit(sc);
+ }
+ return;
+ }
- req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
- orig_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)abort_cm->cm_req;
- req->DevHandle = abort_cm->cm_targ->handle;
- req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
- req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
- memcpy(req->LUN, orig_req->LUN, sizeof(req->LUN));
- req->TaskMID = abort_cm->cm_desc.Default.SMID;
+ mpssas_log_command(tm,
+ "target reset status 0x%x code 0x%x count %u\n",
+ reply->IOCStatus, reply->ResponseCode,
+ reply->TerminationCount);
- cm->cm_data = NULL;
- cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ targ->flags &= ~MPSSAS_TARGET_INRESET;
+
+ if (targ->outstanding == 0) {
+ /* we've finished recovery for this target and all
+ * of its logical units.
+ */
+ mpssas_log_command(tm,
+ "recovery finished after target reset\n");
- mpssas_issue_tm_request(sc, cm);
+ mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
+ CAM_LUN_WILDCARD);
+ targ->tm = NULL;
+ mpssas_free_tm(sc, tm);
+ }
+ else {
+ /* after a target reset, if this target still has
+ * outstanding commands, the reset effectively failed,
+ * regardless of the status reported. escalate.
+ */
+ mpssas_log_command(tm,
+ "target reset complete for tm %p, but still have %u command(s)\n",
+ tm, targ->outstanding);
+ mps_reinit(sc);
+ }
}
-/*
- * Can return 0 or EINPROGRESS on success. Any other value means failure.
- */
+#define MPS_RESET_TIMEOUT 30
+
static int
-mpssas_map_tm_request(struct mps_softc *sc, struct mps_command *cm)
+mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
{
- int error;
+ MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+ struct mpssas_target *target;
+ int err;
- error = 0;
+ target = tm->cm_targ;
+ if (target->handle == 0) {
+ mps_printf(sc, "%s null devhandle for target_id %d\n",
+ __func__, target->tid);
+ return -1;
+ }
- cm->cm_flags |= MPS_CM_FLAGS_ACTIVE;
- error = mps_map_command(sc, cm);
- if ((error == 0)
- || (error == EINPROGRESS))
- sc->tm_cmds_active++;
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
+ req->DevHandle = target->handle;
+ req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ req->TaskType = type;
+
+ if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
+ /* XXX Need to handle invalid LUNs */
+ MPS_SET_LUN(req->LUN, tm->cm_lun);
+ tm->cm_targ->logical_unit_resets++;
+ mpssas_log_command(tm, "sending logical unit reset\n");
+ tm->cm_complete = mpssas_logical_unit_reset_complete;
+ }
+ else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
+ /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
+ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
+ tm->cm_targ->target_resets++;
+ tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
+ mpssas_log_command(tm, "sending target reset\n");
+ tm->cm_complete = mpssas_target_reset_complete;
+ }
+ else {
+ mps_printf(sc, "unexpected reset type 0x%x\n", type);
+ return -1;
+ }
- return (error);
+ tm->cm_data = NULL;
+ tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ tm->cm_complete_data = (void *)tm;
+
+ callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
+ mpssas_tm_timeout, tm);
+
+ err = mps_map_command(sc, tm);
+ if (err)
+ mpssas_log_command(tm,
+ "error %d sending reset type %u\n",
+ err, type);
+
+ return err;
}
+
static void
-mpssas_issue_tm_request(struct mps_softc *sc, struct mps_command *cm)
+mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
{
- int freeze_queue, send_command, error;
+ struct mps_command *cm;
+ MPI2_SCSI_TASK_MANAGE_REPLY *reply;
+ MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+ struct mpssas_target *targ;
- freeze_queue = 0;
- send_command = 0;
- error = 0;
+ callout_stop(&tm->cm_callout);
- mtx_assert(&sc->mps_mtx, MA_OWNED);
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
+ reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
+ targ = tm->cm_targ;
/*
- * If there are no other pending task management commands, go
- * ahead and send this one. There is a small amount of anecdotal
- * evidence that sending lots of task management commands at once
- * may cause the controller to lock up. Or, if the user has
- * configured the driver (via the allow_multiple_tm_cmds variable) to
- * not serialize task management commands, go ahead and send the
- * command if even other task management commands are pending.
+ * Currently there should be no way we can hit this case. It only
+ * happens when we have a failure to allocate chain frames, and
+ * task management commands don't have S/G lists.
*/
- if (TAILQ_FIRST(&sc->tm_list) == NULL) {
- send_command = 1;
- freeze_queue = 1;
- } else if (sc->allow_multiple_tm_cmds != 0)
- send_command = 1;
-
- TAILQ_INSERT_TAIL(&sc->tm_list, cm, cm_link);
- if (send_command != 0) {
- /*
- * Freeze the SIM queue while we issue the task management
- * command. According to the Fusion-MPT 2.0 spec, task
- * management requests are serialized, and so the host
- * should not send any I/O requests while task management
- * requests are pending.
- */
- if (freeze_queue != 0)
- xpt_freeze_simq(sc->sassc->sim, 1);
+ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
+ mpssas_log_command(tm,
+ "cm_flags = %#x for abort %p TaskMID %u!\n",
+ tm->cm_flags, tm, req->TaskMID);
+ mpssas_free_tm(sc, tm);
+ return;
+ }
- error = mpssas_map_tm_request(sc, cm);
+ if (reply == NULL) {
+ mpssas_log_command(tm,
+ "NULL abort reply for tm %p TaskMID %u\n",
+ tm, req->TaskMID);
+ if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
+ /* this completion was due to a reset, just cleanup */
+ targ->tm = NULL;
+ mpssas_free_tm(sc, tm);
+ }
+ else {
+ /* we should have gotten a reply. */
+ mps_reinit(sc);
+ }
+ return;
+ }
- /*
- * At present, there is no error path back from
- * mpssas_map_tm_request() (which calls mps_map_command())
- * when cm->cm_data == NULL. But since there is a return
- * value, we check it just in case the implementation
- * changes later.
+ mpssas_log_command(tm,
+ "abort TaskMID %u status 0x%x code 0x%x count %u\n",
+ req->TaskMID,
+ reply->IOCStatus, reply->ResponseCode,
+ reply->TerminationCount);
+
+ cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
+ if (cm == NULL) {
+ /* if there are no more timedout commands, we're done with
+ * error recovery for this target.
*/
- if ((error != 0)
- && (error != EINPROGRESS))
- mpssas_tm_complete(sc, cm,
- MPI2_SCSITASKMGMT_RSP_TM_FAILED);
+ mpssas_log_command(tm,
+ "finished recovery after aborting TaskMID %u\n",
+ req->TaskMID);
+
+ targ->tm = NULL;
+ mpssas_free_tm(sc, tm);
+ }
+ else if (req->TaskMID != cm->cm_desc.Default.SMID) {
+ /* abort success, but we have more timedout commands to abort */
+ mpssas_log_command(tm,
+ "continuing recovery after aborting TaskMID %u\n",
+ req->TaskMID);
+
+ mpssas_send_abort(sc, tm, cm);
+ }
+ else {
+ /* we didn't get a command completion, so the abort
+ * failed as far as we're concerned. escalate.
+ */
+ mpssas_log_command(tm,
+ "abort failed for TaskMID %u tm %p\n",
+ req->TaskMID, tm);
+
+ mpssas_send_reset(sc, tm,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
}
}
-static void
-mpssas_tm_complete(struct mps_softc *sc, struct mps_command *cm, int error)
+#define MPS_ABORT_TIMEOUT 5
+
+static int
+mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
{
- MPI2_SCSI_TASK_MANAGE_REPLY *resp;
+ MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+ struct mpssas_target *targ;
+ int err;
- resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)cm->cm_reply;
+ targ = cm->cm_targ;
+ if (targ->handle == 0) {
+ mps_printf(sc, "%s null devhandle for target_id %d\n",
+ __func__, cm->cm_ccb->ccb_h.target_id);
+ return -1;
+ }
- if (resp != NULL)
- resp->ResponseCode = error;
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
+ req->DevHandle = targ->handle;
+ req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
- /*
- * Call the callback for this command, it will be
- * removed from the list and freed via the callback.
- */
- cm->cm_complete(sc, cm);
-}
+ /* XXX Need to handle invalid LUNs */
+ MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
-/*
- * Complete a task management request. The basic completion operation will
- * always succeed. Returns status for sending any further task management
- * commands that were queued.
- */
-static int
-mpssas_complete_tm_request(struct mps_softc *sc, struct mps_command *cm,
- int free_cm)
-{
- int error;
+ req->TaskMID = cm->cm_desc.Default.SMID;
- error = 0;
+ tm->cm_data = NULL;
+ tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ tm->cm_complete = mpssas_abort_complete;
+ tm->cm_complete_data = (void *)tm;
+ tm->cm_targ = cm->cm_targ;
+ tm->cm_lun = cm->cm_lun;
- mtx_assert(&sc->mps_mtx, MA_OWNED);
+ callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
+ mpssas_tm_timeout, tm);
- TAILQ_REMOVE(&sc->tm_list, cm, cm_link);
- cm->cm_flags &= ~MPS_CM_FLAGS_ACTIVE;
- sc->tm_cmds_active--;
+ targ->aborts++;
- if (free_cm != 0)
- mps_free_command(sc, cm);
+ err = mps_map_command(sc, tm);
+ if (err)
+ mpssas_log_command(tm,
+ "error %d sending abort for cm %p SMID %u\n",
+ err, cm, req->TaskMID);
+ return err;
+}
- if (TAILQ_FIRST(&sc->tm_list) == NULL) {
- /*
- * Release the SIM queue, we froze it when we sent the first
- * task management request.
- */
- xpt_release_simq(sc->sassc->sim, 1);
- } else if ((sc->tm_cmds_active == 0)
- || (sc->allow_multiple_tm_cmds != 0)) {
- int error;
- struct mps_command *cm2;
-restart_traversal:
+static void
+mpssas_scsiio_timeout(void *data)
+{
+ struct mps_softc *sc;
+ struct mps_command *cm;
+ struct mpssas_target *targ;
- /*
- * We don't bother using TAILQ_FOREACH_SAFE here, but
- * rather use the standard version and just restart the
- * list traversal if we run into the error case.
- * TAILQ_FOREACH_SAFE allows safe removal of the current
- * list element, but if you have a queue of task management
- * commands, all of which have mapping errors, you'll end
- * up with recursive calls to this routine and so you could
- * wind up removing more than just the current list element.
- */
- TAILQ_FOREACH(cm2, &sc->tm_list, cm_link) {
- MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+ cm = (struct mps_command *)data;
+ sc = cm->cm_sc;
+
+ mtx_assert(&sc->mps_mtx, MA_OWNED);
+
+ mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
+
+ /*
+ * Run the interrupt handler to make sure it's not pending. This
+ * isn't perfect because the command could have already completed
+ * and been re-used, though this is unlikely.
+ */
+ mps_intr_locked(sc);
+ if (cm->cm_state == MPS_CM_STATE_FREE) {
+ mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
+ return;
+ }
- /* This command is active, no need to send it again */
- if (cm2->cm_flags & MPS_CM_FLAGS_ACTIVE)
- continue;
+ if (cm->cm_ccb == NULL) {
+ mps_printf(sc, "command timeout with NULL ccb\n");
+ return;
+ }
- req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm2->cm_req;
+ mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
+ cm, cm->cm_ccb);
- mps_printf(sc, "%s: sending deferred task management "
- "request for handle %#04x SMID %d\n", __func__,
- req->DevHandle, req->TaskMID);
+ targ = cm->cm_targ;
+ targ->timeouts++;
- error = mpssas_map_tm_request(sc, cm2);
+ /* XXX first, check the firmware state, to see if it's still
+ * operational. if not, do a diag reset.
+ */
- /*
- * Check for errors. If we had an error, complete
- * this command with an error, and keep going through
- * the list until we are able to send at least one
- * command or all of them are completed with errors.
- *
- * We don't want to wind up in a situation where
- * we're stalled out with no way for queued task
- * management commands to complete.
- *
- * Note that there is not currently an error path
- * back from mpssas_map_tm_request() (which calls
- * mps_map_command()) when cm->cm_data == NULL.
- * But we still want to check for errors here in
- * case the implementation changes, or in case
- * there is some reason for a data payload here.
- */
- if ((error != 0)
- && (error != EINPROGRESS)) {
- mpssas_tm_complete(sc, cm,
- MPI2_SCSITASKMGMT_RSP_TM_FAILED);
+ cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
+ cm->cm_state = MPS_CM_STATE_TIMEDOUT;
+ TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
- /*
- * If we don't currently have any commands
- * active, go back to the beginning and see
- * if there are any more that can be started.
- * Otherwise, we're done here.
- */
- if (sc->tm_cmds_active == 0)
- goto restart_traversal;
- else
- break;
- }
+ if (targ->tm != NULL) {
+ /* target already in recovery, just queue up another
+ * timedout command to be processed later.
+ */
+ mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
+ cm, targ->tm);
+ }
+ else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
+ mps_printf(sc, "timedout cm %p allocated tm %p\n",
+ cm, targ->tm);
- /*
- * If the user only wants one task management command
- * active at a time, we're done, since we've
- * already successfully sent a command at this point.
- */
- if (sc->allow_multiple_tm_cmds == 0)
- break;
- }
+ /* start recovery by aborting the first timedout command */
+ mpssas_send_abort(sc, targ->tm, cm);
+ }
+ else {
+ /* XXX queue this target up for recovery once a TM becomes
+ * available. The firmware only has a limited number of
+ * HighPriority credits for the high priority requests used
+ * for task management, and we ran out.
+ *
+ * Isilon: don't worry about this for now, since we have
+ * more credits than disks in an enclosure, and limit
+ * ourselves to one TM per target for recovery.
+ */
+ mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
+ cm);
}
- return (error);
}
static void
@@ -1364,16 +1469,41 @@ mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
struct ccb_scsiio *csio;
struct mps_softc *sc;
struct mpssas_target *targ;
+ struct mpssas_lun *lun;
struct mps_command *cm;
-
- mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
+ uint8_t i, lba_byte, *ref_tag_addr;
+ uint16_t eedp_flags;
sc = sassc->sc;
+ mps_dprint(sc, MPS_TRACE, "%s ccb %p\n", __func__, ccb);
+ mtx_assert(&sc->mps_mtx, MA_OWNED);
csio = &ccb->csio;
targ = &sassc->targets[csio->ccb_h.target_id];
if (targ->handle == 0x0) {
- csio->ccb_h.status = CAM_SEL_TIMEOUT;
+ mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
+ __func__, csio->ccb_h.target_id);
+ csio->ccb_h.status = CAM_TID_INVALID;
+ xpt_done(ccb);
+ return;
+ }
+ /*
+ * If devinfo is 0 this will be a volume. In that case don't tell CAM
+ * that the volume has timed out. We want volumes to be enumerated
+ * until they are deleted/removed, not just failed.
+ */
+ if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
+ if (targ->devinfo == 0)
+ csio->ccb_h.status = CAM_REQ_CMP;
+ else
+ csio->ccb_h.status = CAM_SEL_TIMEOUT;
+ xpt_done(ccb);
+ return;
+ }
+
+ if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
+ mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
+ csio->ccb_h.status = CAM_TID_INVALID;
xpt_done(ccb);
return;
}
@@ -1446,6 +1576,7 @@ mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
break;
}
+ req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
mps_free_command(sc, cm);
@@ -1461,8 +1592,57 @@ mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
req->IoFlags = csio->cdb_len;
/*
- * XXX need to handle S/G lists and physical addresses here.
+ * Check if EEDP is supported and enabled. If it is then check if the
+ * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
+ * is formatted for EEDP support. If all of this is true, set CDB up
+ * for EEDP transfer.
*/
+ eedp_flags = op_code_prot[req->CDB.CDB32[0]];
+ if (sc->eedp_enabled && eedp_flags) {
+ SLIST_FOREACH(lun, &targ->luns, lun_link) {
+ if (lun->lun_id == csio->ccb_h.target_lun) {
+ break;
+ }
+ }
+
+ if ((lun != NULL) && (lun->eedp_formatted)) {
+ req->EEDPBlockSize = lun->eedp_block_size;
+ eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
+ req->EEDPFlags = eedp_flags;
+
+ /*
+ * If CDB less than 32, fill in Primary Ref Tag with
+ * low 4 bytes of LBA. If CDB is 32, tag stuff is
+ * already there. Also, set protection bit. FreeBSD
+ * currently does not support CDBs bigger than 16, but
+ * the code doesn't hurt, and will be here for the
+ * future.
+ */
+ if (csio->cdb_len != 32) {
+ lba_byte = (csio->cdb_len == 16) ? 6 : 2;
+ ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
+ PrimaryReferenceTag;
+ for (i = 0; i < 4; i++) {
+ *ref_tag_addr =
+ req->CDB.CDB32[lba_byte + i];
+ ref_tag_addr++;
+ }
+ req->CDB.EEDP32.PrimaryApplicationTagMask =
+ 0xFFFF;
+ req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
+ 0x20;
+ } else {
+ eedp_flags |=
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
+ req->EEDPFlags = eedp_flags;
+ req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
+ 0x1F) | 0x20;
+ }
+ }
+ }
+
cm->cm_data = csio->data_ptr;
cm->cm_length = csio->dxfer_len;
cm->cm_sge = &req->SGL;
@@ -1472,15 +1652,33 @@ mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
cm->cm_complete = mpssas_scsiio_complete;
cm->cm_complete_data = ccb;
cm->cm_targ = targ;
+ cm->cm_lun = csio->ccb_h.target_lun;
+ cm->cm_ccb = ccb;
- sc->io_cmds_active++;
- if (sc->io_cmds_active > sc->io_cmds_highwater)
- sc->io_cmds_highwater = sc->io_cmds_active;
+ /*
+ * If HBA is a WD and the command is not for a retry, try to build a
+ * direct I/O message. If failed, or the command is for a retry, send
+ * the I/O to the IR volume itself.
+ */
+ if (sc->WD_valid_config) {
+ if (ccb->ccb_h.status != MPS_WD_RETRY) {
+ mpssas_direct_drive_io(sassc, cm, ccb);
+ } else {
+ ccb->ccb_h.status = CAM_REQ_INPROG;
+ }
+ }
- TAILQ_INSERT_TAIL(&sc->io_list, cm, cm_link);
callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
mpssas_scsiio_timeout, cm);
+ targ->issued++;
+ targ->outstanding++;
+ TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
+
+ if ((sc->mps_debug & MPS_TRACE) != 0)
+ mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
+ __func__, cm, ccb, targ->outstanding);
+
mps_map_command(sc, cm);
return;
}
@@ -1490,19 +1688,25 @@ mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
{
MPI2_SCSI_IO_REPLY *rep;
union ccb *ccb;
+ struct ccb_scsiio *csio;
struct mpssas_softc *sassc;
- int dir = 0;
+ struct scsi_vpd_supported_page_list *vpd_list = NULL;
+ u8 *TLR_bits, TLR_on;
+ int dir = 0, i;
+ u16 alloc_len;
- mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+ mps_dprint(sc, MPS_TRACE,
+ "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
+ __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
+ cm->cm_targ->outstanding);
callout_stop(&cm->cm_callout);
- TAILQ_REMOVE(&sc->io_list, cm, cm_link);
- sc->io_cmds_active--;
+ mtx_assert(&sc->mps_mtx, MA_OWNED);
sassc = sc->sassc;
ccb = cm->cm_complete_data;
+ csio = &ccb->csio;
rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
-
/*
* XXX KDM if the chain allocation fails, does it matter if we do
* the sync and unload here? It is simpler to do it in every case,
@@ -1517,6 +1721,41 @@ mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
}
+ cm->cm_targ->completed++;
+ cm->cm_targ->outstanding--;
+ TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
+
+ if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
+ TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
+ if (cm->cm_reply != NULL)
+ mpssas_log_command(cm,
+ "completed timedout cm %p ccb %p during recovery "
+ "ioc %x scsi %x state %x xfer %u\n",
+ cm, cm->cm_ccb,
+ rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
+ rep->TransferCount);
+ else
+ mpssas_log_command(cm,
+ "completed timedout cm %p ccb %p during recovery\n",
+ cm, cm->cm_ccb);
+ } else if (cm->cm_targ->tm != NULL) {
+ if (cm->cm_reply != NULL)
+ mpssas_log_command(cm,
+ "completed cm %p ccb %p during recovery "
+ "ioc %x scsi %x state %x xfer %u\n",
+ cm, cm->cm_ccb,
+ rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
+ rep->TransferCount);
+ else
+ mpssas_log_command(cm,
+ "completed cm %p ccb %p during recovery\n",
+ cm, cm->cm_ccb);
+ } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
+ mpssas_log_command(cm,
+ "reset completed cm %p ccb %p\n",
+ cm, cm->cm_ccb);
+ }
+
if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
/*
* We ran into an error after we tried to map the command,
@@ -1550,16 +1789,31 @@ mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
/* Take the fast path to completion */
if (cm->cm_reply == NULL) {
if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
- ccb->ccb_h.status = CAM_REQ_CMP;
- ccb->csio.scsi_status = SCSI_STATUS_OK;
-
+ if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
+ ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
+ else {
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ ccb->csio.scsi_status = SCSI_STATUS_OK;
+ }
if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
mps_dprint(sc, MPS_INFO,
"Unfreezing SIM queue\n");
}
- } else {
+ }
+
+ /*
+ * There are two scenarios where the status won't be
+ * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
+ * set, the second is in the MPS_FLAGS_DIAGRESET above.
+ */
+ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ /*
+ * Freeze the dev queue so that commands are
+ * executed in the correct order with after error
+ * recovery.
+ */
ccb->ccb_h.status |= CAM_DEV_QFRZN;
xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
}
@@ -1568,37 +1822,149 @@ mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
return;
}
- mps_dprint(sc, MPS_INFO, "(%d:%d:%d) IOCStatus= 0x%x, "
- "ScsiStatus= 0x%x, SCSIState= 0x%x TransferCount= 0x%x\n",
- xpt_path_path_id(ccb->ccb_h.path),
- xpt_path_target_id(ccb->ccb_h.path),
- xpt_path_lun_id(ccb->ccb_h.path), rep->IOCStatus,
- rep->SCSIStatus, rep->SCSIState, rep->TransferCount);
+ if (sc->mps_debug & MPS_TRACE)
+ mpssas_log_command(cm,
+ "ioc %x scsi %x state %x xfer %u\n",
+ rep->IOCStatus, rep->SCSIStatus,
+ rep->SCSIState, rep->TransferCount);
+
+ /*
+ * If this is a Direct Drive I/O, reissue the I/O to the original IR
+ * Volume if an error occurred (normal I/O retry). Use the original
+ * CCB, but set a flag that this will be a retry so that it's sent to
+ * the original volume. Free the command but reuse the CCB.
+ */
+ if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
+ mps_free_command(sc, cm);
+ ccb->ccb_h.status = MPS_WD_RETRY;
+ mpssas_action_scsiio(sassc, ccb);
+ return;
+ }
switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
- case MPI2_IOCSTATUS_BUSY:
- case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
- /*
- * The controller is overloaded, try waiting a bit for it
- * to free up.
- */
- ccb->ccb_h.status = CAM_BUSY;
- break;
case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
- ccb->csio.resid = cm->cm_length - rep->TransferCount;
+ csio->resid = cm->cm_length - rep->TransferCount;
/* FALLTHROUGH */
case MPI2_IOCSTATUS_SUCCESS:
case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
- ccb->ccb_h.status = CAM_REQ_CMP;
- break;
- case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
- /* resid is ignored for this condition */
- ccb->csio.resid = 0;
- ccb->ccb_h.status = CAM_DATA_RUN_ERR;
+
+ if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
+ MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
+ mpssas_log_command(cm, "recovered error\n");
+
+ /* Completion failed at the transport level. */
+ if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
+ MPI2_SCSI_STATE_TERMINATED)) {
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ break;
+ }
+
+ /* In a modern packetized environment, an autosense failure
+ * implies that there's not much else that can be done to
+ * recover the command.
+ */
+ if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
+ ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
+ break;
+ }
+
+ /*
+ * CAM doesn't care about SAS Response Info data, but if this is
+ * the state check if TLR should be done. If not, clear the
+ * TLR_bits for the target.
+ */
+ if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
+ ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
+ MPS_SCSI_RI_INVALID_FRAME)) {
+ sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
+ (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
+ }
+
+ /*
+ * Intentionally override the normal SCSI status reporting
+ * for these two cases. These are likely to happen in a
+ * multi-initiator environment, and we want to make sure that
+ * CAM retries these commands rather than fail them.
+ */
+ if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
+ (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
+ ccb->ccb_h.status = CAM_REQ_ABORTED;
+ break;
+ }
+
+ /* Handle normal status and sense */
+ csio->scsi_status = rep->SCSIStatus;
+ if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ else
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+
+ if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ int sense_len, returned_sense_len;
+
+ returned_sense_len = min(rep->SenseCount,
+ sizeof(struct scsi_sense_data));
+ if (returned_sense_len < ccb->csio.sense_len)
+ ccb->csio.sense_resid = ccb->csio.sense_len -
+ returned_sense_len;
+ else
+ ccb->csio.sense_resid = 0;
+
+ sense_len = min(returned_sense_len,
+ ccb->csio.sense_len - ccb->csio.sense_resid);
+ bzero(&ccb->csio.sense_data,
+ sizeof(&ccb->csio.sense_data));
+ bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
+ ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
+ }
+
+ /*
+ * Check if this is an INQUIRY command. If it's a VPD inquiry,
+ * and it's page code 0 (Supported Page List), and there is
+ * inquiry data, and this is for a sequential access device, and
+ * the device is an SSP target, and TLR is supported by the
+ * controller, turn the TLR_bits value ON if page 0x90 is
+ * supported.
+ */
+ if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
+ (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
+ (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
+ (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
+ T_SEQUENTIAL) && (sc->control_TLR) &&
+ (sc->mapping_table[csio->ccb_h.target_id].device_info &
+ MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
+ vpd_list = (struct scsi_vpd_supported_page_list *)
+ csio->data_ptr;
+ TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
+ TLR_bits;
+ *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
+ TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
+ alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
+ csio->cdb_io.cdb_bytes[4];
+ for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
+ if (vpd_list->list[i] == 0x90) {
+ *TLR_bits = TLR_on;
+ break;
+ }
+ }
+ }
break;
case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
- ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ /*
+ * If devinfo is 0 this will be a volume. In that case don't
+ * tell CAM that the volume is not there. We want volumes to
+ * be enumerated until they are deleted/removed, not just
+ * failed.
+ */
+ if (cm->cm_targ->devinfo == 0)
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ else
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ break;
+ case MPI2_IOCSTATUS_INVALID_SGL:
+ mps_print_scsiio_cmd(sc, cm);
+ ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
break;
case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
/*
@@ -1615,22 +1981,23 @@ mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
else
ccb->ccb_h.status = CAM_REQ_ABORTED;
break;
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ /* resid is ignored for this condition */
+ csio->resid = 0;
+ ccb->ccb_h.status = CAM_DATA_RUN_ERR;
+ break;
case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
-#if 0
- ccb->ccb_h.status = CAM_REQ_ABORTED;
-#endif
- mps_printf(sc, "(%d:%d:%d) terminated ioc %x scsi %x state %x "
- "xfer %u\n", xpt_path_path_id(ccb->ccb_h.path),
- xpt_path_target_id(ccb->ccb_h.path),
- xpt_path_lun_id(ccb->ccb_h.path),
- rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
- rep->TransferCount);
+ /*
+ * Since these are generally external (i.e. hopefully
+ * transient transport-related) errors, retry these without
+ * decrementing the retry count.
+ */
ccb->ccb_h.status = CAM_REQUEUE_REQ;
- break;
- case MPI2_IOCSTATUS_INVALID_SGL:
- mps_print_scsiio_cmd(sc, cm);
- ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
+ mpssas_log_command(cm,
+ "terminated ioc %x scsi %x state %x xfer %u\n",
+ rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
+ rep->TransferCount);
break;
case MPI2_IOCSTATUS_INVALID_FUNCTION:
case MPI2_IOCSTATUS_INTERNAL_ERROR:
@@ -1643,63 +2010,291 @@ mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
default:
+ mpssas_log_command(cm,
+ "completed ioc %x scsi %x state %x xfer %u\n",
+ rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
+ rep->TransferCount);
+ csio->resid = cm->cm_length;
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ break;
}
-
- if ((rep->SCSIState & MPI2_SCSI_STATE_NO_SCSI_STATUS) == 0) {
- ccb->csio.scsi_status = rep->SCSIStatus;
-
- switch (rep->SCSIStatus) {
- case MPI2_SCSI_STATUS_TASK_SET_FULL:
- case MPI2_SCSI_STATUS_CHECK_CONDITION:
- ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
- break;
- case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
- case MPI2_SCSI_STATUS_TASK_ABORTED:
- ccb->ccb_h.status = CAM_REQ_ABORTED;
- break;
- case MPI2_SCSI_STATUS_GOOD:
- default:
- break;
- }
- }
-
- if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
- int sense_len;
-
- if (rep->SenseCount < ccb->csio.sense_len)
- ccb->csio.sense_resid = ccb->csio.sense_len -
- rep->SenseCount;
- else
- ccb->csio.sense_resid = 0;
-
- sense_len = min(rep->SenseCount, ccb->csio.sense_len -
- ccb->csio.sense_resid);
- bzero(&ccb->csio.sense_data, sizeof(&ccb->csio.sense_data));
- bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
- ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
- }
-
- if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
- ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
-
- if (rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
- ccb->ccb_h.status = CAM_REQ_CMP_ERR;
-
if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
- mps_printf(sc, "Command completed, unfreezing SIM queue\n");
+ mps_dprint(sc, MPS_INFO, "Command completed, "
+ "unfreezing SIM queue\n");
}
+
if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
ccb->ccb_h.status |= CAM_DEV_QFRZN;
xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
}
+
mps_free_command(sc, cm);
xpt_done(ccb);
}
+static void
+mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
+ union ccb *ccb) {
+ pMpi2SCSIIORequest_t pIO_req;
+ struct mps_softc *sc = sassc->sc;
+ uint64_t virtLBA;
+ uint32_t physLBA, stripe_offset, stripe_unit;
+ uint32_t io_size, column;
+ uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
+
+ /*
+ * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
+ * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
+ * will be sent to the IR volume itself. Since Read6 and Write6 are a
+ * bit different than the 10/16 CDBs, handle them separately.
+ */
+ pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
+ CDB = pIO_req->CDB.CDB32;
+
+ /*
+ * Handle 6 byte CDBs.
+ */
+ if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
+ (CDB[0] == WRITE_6))) {
+ /*
+ * Get the transfer size in blocks.
+ */
+ io_size = (cm->cm_length >> sc->DD_block_exponent);
+
+ /*
+ * Get virtual LBA given in the CDB.
+ */
+ virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
+ ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
+
+ /*
+ * Check that LBA range for I/O does not exceed volume's
+ * MaxLBA.
+ */
+ if ((virtLBA + (uint64_t)io_size - 1) <=
+ sc->DD_max_lba) {
+ /*
+ * Check if the I/O crosses a stripe boundary. If not,
+ * translate the virtual LBA to a physical LBA and set
+ * the DevHandle for the PhysDisk to be used. If it
+ * does cross a boundry, do normal I/O. To get the
+ * right DevHandle to use, get the map number for the
+ * column, then use that map number to look up the
+ * DevHandle of the PhysDisk.
+ */
+ stripe_offset = (uint32_t)virtLBA &
+ (sc->DD_stripe_size - 1);
+ if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
+ physLBA = (uint32_t)virtLBA >>
+ sc->DD_stripe_exponent;
+ stripe_unit = physLBA / sc->DD_num_phys_disks;
+ column = physLBA % sc->DD_num_phys_disks;
+ pIO_req->DevHandle =
+ sc->DD_column_map[column].dev_handle;
+ cm->cm_desc.SCSIIO.DevHandle =
+ pIO_req->DevHandle;
+
+ physLBA = (stripe_unit <<
+ sc->DD_stripe_exponent) + stripe_offset;
+ ptrLBA = &pIO_req->CDB.CDB32[1];
+ physLBA_byte = (uint8_t)(physLBA >> 16);
+ *ptrLBA = physLBA_byte;
+ ptrLBA = &pIO_req->CDB.CDB32[2];
+ physLBA_byte = (uint8_t)(physLBA >> 8);
+ *ptrLBA = physLBA_byte;
+ ptrLBA = &pIO_req->CDB.CDB32[3];
+ physLBA_byte = (uint8_t)physLBA;
+ *ptrLBA = physLBA_byte;
+
+ /*
+ * Set flag that Direct Drive I/O is
+ * being done.
+ */
+ cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
+ }
+ }
+ return;
+ }
+
+ /*
+ * Handle 10 or 16 byte CDBs.
+ */
+ if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
+ (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
+ (CDB[0] == WRITE_16))) {
+ /*
+ * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
+ * are 0. If not, this is accessing beyond 2TB so handle it in
+ * the else section. 10-byte CDB's are OK.
+ */
+ if ((CDB[0] < READ_16) ||
+ !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
+ /*
+ * Get the transfer size in blocks.
+ */
+ io_size = (cm->cm_length >> sc->DD_block_exponent);
+
+ /*
+ * Get virtual LBA. Point to correct lower 4 bytes of
+ * LBA in the CDB depending on command.
+ */
+ lba_idx = (CDB[0] < READ_16) ? 2 : 6;
+ virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
+ ((uint64_t)CDB[lba_idx + 1] << 16) |
+ ((uint64_t)CDB[lba_idx + 2] << 8) |
+ (uint64_t)CDB[lba_idx + 3];
+
+ /*
+ * Check that LBA range for I/O does not exceed volume's
+ * MaxLBA.
+ */
+ if ((virtLBA + (uint64_t)io_size - 1) <=
+ sc->DD_max_lba) {
+ /*
+ * Check if the I/O crosses a stripe boundary.
+ * If not, translate the virtual LBA to a
+ * physical LBA and set the DevHandle for the
+ * PhysDisk to be used. If it does cross a
+ * boundry, do normal I/O. To get the right
+ * DevHandle to use, get the map number for the
+ * column, then use that map number to look up
+ * the DevHandle of the PhysDisk.
+ */
+ stripe_offset = (uint32_t)virtLBA &
+ (sc->DD_stripe_size - 1);
+ if ((stripe_offset + io_size) <=
+ sc->DD_stripe_size) {
+ physLBA = (uint32_t)virtLBA >>
+ sc->DD_stripe_exponent;
+ stripe_unit = physLBA /
+ sc->DD_num_phys_disks;
+ column = physLBA %
+ sc->DD_num_phys_disks;
+ pIO_req->DevHandle =
+ sc->DD_column_map[column].
+ dev_handle;
+ cm->cm_desc.SCSIIO.DevHandle =
+ pIO_req->DevHandle;
+
+ physLBA = (stripe_unit <<
+ sc->DD_stripe_exponent) +
+ stripe_offset;
+ ptrLBA =
+ &pIO_req->CDB.CDB32[lba_idx];
+ physLBA_byte = (uint8_t)(physLBA >> 24);
+ *ptrLBA = physLBA_byte;
+ ptrLBA =
+ &pIO_req->CDB.CDB32[lba_idx + 1];
+ physLBA_byte = (uint8_t)(physLBA >> 16);
+ *ptrLBA = physLBA_byte;
+ ptrLBA =
+ &pIO_req->CDB.CDB32[lba_idx + 2];
+ physLBA_byte = (uint8_t)(physLBA >> 8);
+ *ptrLBA = physLBA_byte;
+ ptrLBA =
+ &pIO_req->CDB.CDB32[lba_idx + 3];
+ physLBA_byte = (uint8_t)physLBA;
+ *ptrLBA = physLBA_byte;
+
+ /*
+ * Set flag that Direct Drive I/O is
+ * being done.
+ */
+ cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
+ }
+ }
+ } else {
+ /*
+ * 16-byte CDB and the upper 4 bytes of the CDB are not
+ * 0. Get the transfer size in blocks.
+ */
+ io_size = (cm->cm_length >> sc->DD_block_exponent);
+
+ /*
+ * Get virtual LBA.
+ */
+ virtLBA = ((uint64_t)CDB[2] << 54) |
+ ((uint64_t)CDB[3] << 48) |
+ ((uint64_t)CDB[4] << 40) |
+ ((uint64_t)CDB[5] << 32) |
+ ((uint64_t)CDB[6] << 24) |
+ ((uint64_t)CDB[7] << 16) |
+ ((uint64_t)CDB[8] << 8) |
+ (uint64_t)CDB[9];
+
+ /*
+ * Check that LBA range for I/O does not exceed volume's
+ * MaxLBA.
+ */
+ if ((virtLBA + (uint64_t)io_size - 1) <=
+ sc->DD_max_lba) {
+ /*
+ * Check if the I/O crosses a stripe boundary.
+ * If not, translate the virtual LBA to a
+ * physical LBA and set the DevHandle for the
+ * PhysDisk to be used. If it does cross a
+ * boundry, do normal I/O. To get the right
+ * DevHandle to use, get the map number for the
+ * column, then use that map number to look up
+ * the DevHandle of the PhysDisk.
+ */
+ stripe_offset = (uint32_t)virtLBA &
+ (sc->DD_stripe_size - 1);
+ if ((stripe_offset + io_size) <=
+ sc->DD_stripe_size) {
+ physLBA = (uint32_t)(virtLBA >>
+ sc->DD_stripe_exponent);
+ stripe_unit = physLBA /
+ sc->DD_num_phys_disks;
+ column = physLBA %
+ sc->DD_num_phys_disks;
+ pIO_req->DevHandle =
+ sc->DD_column_map[column].
+ dev_handle;
+ cm->cm_desc.SCSIIO.DevHandle =
+ pIO_req->DevHandle;
+
+ physLBA = (stripe_unit <<
+ sc->DD_stripe_exponent) +
+ stripe_offset;
+
+ /*
+ * Set upper 4 bytes of LBA to 0. We
+ * assume that the phys disks are less
+ * than 2 TB's in size. Then, set the
+ * lower 4 bytes.
+ */
+ pIO_req->CDB.CDB32[2] = 0;
+ pIO_req->CDB.CDB32[3] = 0;
+ pIO_req->CDB.CDB32[4] = 0;
+ pIO_req->CDB.CDB32[5] = 0;
+ ptrLBA = &pIO_req->CDB.CDB32[6];
+ physLBA_byte = (uint8_t)(physLBA >> 24);
+ *ptrLBA = physLBA_byte;
+ ptrLBA = &pIO_req->CDB.CDB32[7];
+ physLBA_byte = (uint8_t)(physLBA >> 16);
+ *ptrLBA = physLBA_byte;
+ ptrLBA = &pIO_req->CDB.CDB32[8];
+ physLBA_byte = (uint8_t)(physLBA >> 8);
+ *ptrLBA = physLBA_byte;
+ ptrLBA = &pIO_req->CDB.CDB32[9];
+ physLBA_byte = (uint8_t)physLBA;
+ *ptrLBA = physLBA_byte;
+
+ /*
+ * Set flag that Direct Drive I/O is
+ * being done.
+ */
+ cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
+ }
+ }
+ }
+ }
+}
+
#if __FreeBSD_version >= 900026
static void
mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
@@ -1722,7 +2317,7 @@ mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
__func__, cm->cm_flags);
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
goto bailout;
- }
+ }
rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
if (rpl == NULL) {
@@ -1989,7 +2584,9 @@ mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
* the parent device of this device, which is probably the expander.
*/
if (sasaddr == 0) {
+#ifdef OLD_MPS_PROBE
struct mpssas_target *parent_target;
+#endif
if (targ->parent_handle == 0x0) {
mps_printf(sc, "%s: handle %d does not have a valid "
@@ -1997,8 +2594,9 @@ mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
ccb->ccb_h.status = CAM_REQ_INVALID;
goto bailout;
}
- parent_target = mpssas_find_target(sassc, 0,
- targ->parent_handle);
+#ifdef OLD_MPS_PROBE
+ parent_target = mpssas_find_target_by_handle(sassc, 0,
+ targ->parent_handle);
if (parent_target == NULL) {
mps_printf(sc, "%s: handle %d does not have a valid "
@@ -2018,6 +2616,27 @@ mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
}
sasaddr = parent_target->sasaddr;
+#else /* OLD_MPS_PROBE */
+ if ((targ->parent_devinfo &
+ MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
+ mps_printf(sc, "%s: handle %d parent %d does not "
+ "have an SMP target!\n", __func__,
+ targ->handle, targ->parent_handle);
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ goto bailout;
+
+ }
+ if (targ->parent_sasaddr == 0x0) {
+ mps_printf(sc, "%s: handle %d parent handle %d does "
+ "not have a valid SAS address!\n",
+ __func__, targ->handle, targ->parent_handle);
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ goto bailout;
+ }
+
+ sasaddr = targ->parent_sasaddr;
+#endif /* OLD_MPS_PROBE */
+
}
if (sasaddr == 0) {
@@ -2034,99 +2653,87 @@ bailout:
xpt_done(ccb);
}
-
-#endif /* __FreeBSD_version >= 900026 */
+#endif //__FreeBSD_version >= 900026
static void
mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
{
+ MPI2_SCSI_TASK_MANAGE_REQUEST *req;
struct mps_softc *sc;
- struct mps_command *cm;
+ struct mps_command *tm;
struct mpssas_target *targ;
- sc = sassc->sc;
- targ = &sassc->targets[ccb->ccb_h.target_id];
-
- if (targ->flags & MPSSAS_TARGET_INRECOVERY) {
- ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
- xpt_done(ccb);
- return;
- }
+ mps_dprint(sassc->sc, MPS_TRACE, __func__);
+ mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
- cm = mps_alloc_command(sc);
- if (cm == NULL) {
- mps_printf(sc, "%s: cannot alloc command\n", __func__);
+ sc = sassc->sc;
+ tm = mps_alloc_command(sc);
+ if (tm == NULL) {
+ mps_printf(sc, "comand alloc failure in mpssas_action_resetdev\n");
ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
xpt_done(ccb);
return;
}
- cm->cm_targ = targ;
- cm->cm_complete = mpssas_resetdev_complete;
- cm->cm_complete_data = ccb;
-
- mpssas_resetdev(sassc, cm);
-}
-
-static void
-mpssas_resetdev(struct mpssas_softc *sassc, struct mps_command *cm)
-{
- MPI2_SCSI_TASK_MANAGE_REQUEST *req;
- struct mps_softc *sc;
-
- mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
-
- sc = sassc->sc;
-
- req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
- req->DevHandle = cm->cm_targ->handle;
+ targ = &sassc->targets[ccb->ccb_h.target_id];
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
+ req->DevHandle = targ->handle;
req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
/* SAS Hard Link Reset / SATA Link Reset */
req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
- cm->cm_data = NULL;
- cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
-
- mpssas_issue_tm_request(sc, cm);
+ tm->cm_data = NULL;
+ tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ tm->cm_complete = mpssas_resetdev_complete;
+ tm->cm_complete_data = ccb;
+ mps_map_command(sc, tm);
}
static void
-mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *cm)
+mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
{
MPI2_SCSI_TASK_MANAGE_REPLY *resp;
union ccb *ccb;
- mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+ mps_dprint(sc, MPS_TRACE, __func__);
+ mtx_assert(&sc->mps_mtx, MA_OWNED);
- resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)cm->cm_reply;
- ccb = cm->cm_complete_data;
+ resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
+ ccb = tm->cm_complete_data;
- if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
+ /*
+ * Currently there should be no way we can hit this case. It only
+ * happens when we have a failure to allocate chain frames, and
+ * task management commands don't have S/G lists.
+ */
+ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
MPI2_SCSI_TASK_MANAGE_REQUEST *req;
- req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
- "This should not happen!\n", __func__, cm->cm_flags,
+ "This should not happen!\n", __func__, tm->cm_flags,
req->DevHandle);
-
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
goto bailout;
}
- printf("resetdev complete IOCStatus= 0x%x ResponseCode= 0x%x\n",
+ printf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
resp->IOCStatus, resp->ResponseCode);
- if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE)
+ if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
ccb->ccb_h.status = CAM_REQ_CMP;
+ mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
+ CAM_LUN_WILDCARD);
+ }
else
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
bailout:
- mpssas_complete_tm_request(sc, cm, /*free_cm*/ 1);
+ mpssas_free_tm(sc, tm);
xpt_done(ccb);
}
@@ -2136,16 +2743,502 @@ mpssas_poll(struct cam_sim *sim)
struct mpssas_softc *sassc;
sassc = cam_sim_softc(sim);
+
+ if (sassc->sc->mps_debug & MPS_TRACE) {
+ /* frequent debug messages during a panic just slow
+ * everything down too much.
+ */
+ mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
+ sassc->sc->mps_debug &= ~MPS_TRACE;
+ }
+
mps_intr_locked(sassc->sc);
}
static void
-mpssas_freeze_device(struct mpssas_softc *sassc, struct mpssas_target *targ)
+mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
+{
+ struct mpssas_softc *sassc;
+ char path_str[64];
+
+ if (done_ccb == NULL)
+ return;
+
+ sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
+
+ mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
+
+ xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
+ mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
+
+ xpt_free_path(done_ccb->ccb_h.path);
+ xpt_free_ccb(done_ccb);
+
+#if __FreeBSD_version < 1000006
+ /*
+ * Before completing scan, get EEDP stuff for all of the existing
+ * targets.
+ */
+ mpssas_check_eedp(sassc);
+#endif
+
+}
+
+/* thread to handle bus rescans */
+static void
+mpssas_scanner_thread(void *arg)
+{
+ struct mpssas_softc *sassc;
+ struct mps_softc *sc;
+ union ccb *ccb;
+
+ sassc = (struct mpssas_softc *)arg;
+ sc = sassc->sc;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ mps_lock(sc);
+ for (;;) {
+ msleep(&sassc->ccb_scanq, &sc->mps_mtx, PRIBIO,
+ "mps_scanq", 0);
+ if (sassc->flags & MPSSAS_SHUTDOWN) {
+ mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
+ break;
+ }
+ ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
+ if (ccb == NULL)
+ continue;
+ TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
+ xpt_action(ccb);
+ }
+
+ sassc->flags &= ~MPSSAS_SCANTHREAD;
+ wakeup(&sassc->flags);
+ mps_unlock(sc);
+ mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
+ mps_kproc_exit(0);
+}
+
+static void
+mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
+{
+ char path_str[64];
+
+ mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
+
+ mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
+
+ if (ccb == NULL)
+ return;
+
+ xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
+ mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
+
+ /* Prepare request */
+ ccb->ccb_h.ppriv_ptr1 = sassc;
+ ccb->ccb_h.cbfcnp = mpssas_rescan_done;
+ xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
+ TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
+ wakeup(&sassc->ccb_scanq);
+}
+
+#if __FreeBSD_version >= 1000006
+static void
+mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
+ void *arg)
+{
+ struct mps_softc *sc;
+
+ sc = (struct mps_softc *)callback_arg;
+
+ switch (code) {
+ case AC_ADVINFO_CHANGED: {
+ struct mpssas_target *target;
+ struct mpssas_softc *sassc;
+ struct scsi_read_capacity_data_long rcap_buf;
+ struct ccb_dev_advinfo cdai;
+ struct mpssas_lun *lun;
+ lun_id_t lunid;
+ int found_lun;
+ uintptr_t buftype;
+
+ buftype = (uintptr_t)arg;
+
+ found_lun = 0;
+ sassc = sc->sassc;
+
+ /*
+ * We're only interested in read capacity data changes.
+ */
+ if (buftype != CDAI_TYPE_RCAPLONG)
+ break;
+
+ /*
+ * We're only interested in devices that are attached to
+ * this controller.
+ */
+ if (xpt_path_path_id(path) != sassc->sim->path_id)
+ break;
+
+ /*
+ * We should have a handle for this, but check to make sure.
+ */
+ target = &sassc->targets[xpt_path_target_id(path)];
+ if (target->handle == 0)
+ break;
+
+ lunid = xpt_path_lun_id(path);
+
+ SLIST_FOREACH(lun, &target->luns, lun_link) {
+ if (lun->lun_id == lunid) {
+ found_lun = 1;
+ break;
+ }
+ }
+
+ if (found_lun == 0) {
+ lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
+ M_NOWAIT | M_ZERO);
+ if (lun == NULL) {
+ mps_dprint(sc, MPS_FAULT, "Unable to alloc "
+ "LUN for EEDP support.\n");
+ break;
+ }
+ lun->lun_id = lunid;
+ SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
+ }
+
+ bzero(&rcap_buf, sizeof(rcap_buf));
+ xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
+ cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
+ cdai.ccb_h.flags = CAM_DIR_IN;
+ cdai.buftype = CDAI_TYPE_RCAPLONG;
+ cdai.flags = 0;
+ cdai.bufsiz = sizeof(rcap_buf);
+ cdai.buf = (uint8_t *)&rcap_buf;
+ xpt_action((union ccb *)&cdai);
+ if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(cdai.ccb_h.path,
+ 0, 0, 0, FALSE);
+
+ if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
+ && (rcap_buf.prot & SRC16_PROT_EN)) {
+ lun->eedp_formatted = TRUE;
+ lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
+ } else {
+ lun->eedp_formatted = FALSE;
+ lun->eedp_block_size = 0;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+}
+#else /* __FreeBSD_version >= 1000006 */
+
+static void
+mpssas_check_eedp(struct mpssas_softc *sassc)
{
+ struct mps_softc *sc = sassc->sc;
+ struct ccb_scsiio *csio;
+ struct scsi_read_capacity_16 *scsi_cmd;
+ struct scsi_read_capacity_eedp *rcap_buf;
+ union ccb *ccb;
+ path_id_t pathid = cam_sim_path(sassc->sim);
+ target_id_t targetid;
+ lun_id_t lunid;
+ struct cam_periph *found_periph;
+ struct mpssas_target *target;
+ struct mpssas_lun *lun;
+ uint8_t found_lun;
+
+ /*
+ * Issue a READ CAPACITY 16 command to each LUN of each target. This
+ * info is used to determine if the LUN is formatted for EEDP support.
+ */
+ for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
+ target = &sassc->targets[targetid];
+ if (target->handle == 0x0) {
+ continue;
+ }
+
+ lunid = 0;
+ do {
+ rcap_buf =
+ malloc(sizeof(struct scsi_read_capacity_eedp),
+ M_MPT2, M_NOWAIT | M_ZERO);
+ if (rcap_buf == NULL) {
+ mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
+ "capacity buffer for EEDP support.\n");
+ return;
+ }
+ ccb = xpt_alloc_ccb_nowait();
+ if (ccb == NULL) {
+ mps_dprint(sc, MPS_FAULT, "Unable to alloc CCB "
+ "for EEDP support.\n");
+ free(rcap_buf, M_MPT2);
+ return;
+ }
+
+ if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
+ pathid, targetid, lunid) != CAM_REQ_CMP) {
+ mps_dprint(sc, MPS_FAULT, "Unable to create "
+ "path for EEDP support\n");
+ free(rcap_buf, M_MPT2);
+ xpt_free_ccb(ccb);
+ return;
+ }
+
+ /*
+ * If a periph is returned, the LUN exists. Create an
+ * entry in the target's LUN list.
+ */
+ if ((found_periph = cam_periph_find(ccb->ccb_h.path,
+ NULL)) != NULL) {
+ /*
+ * If LUN is already in list, don't create a new
+ * one.
+ */
+ found_lun = FALSE;
+ SLIST_FOREACH(lun, &target->luns, lun_link) {
+ if (lun->lun_id == lunid) {
+ found_lun = TRUE;
+ break;
+ }
+ }
+ if (!found_lun) {
+ lun = malloc(sizeof(struct mpssas_lun),
+ M_MPT2, M_WAITOK | M_ZERO);
+ if (lun == NULL) {
+ mps_dprint(sc, MPS_FAULT,
+ "Unable to alloc LUN for "
+ "EEDP support.\n");
+ free(rcap_buf, M_MPT2);
+ xpt_free_path(ccb->ccb_h.path);
+ xpt_free_ccb(ccb);
+ return;
+ }
+ lun->lun_id = lunid;
+ SLIST_INSERT_HEAD(&target->luns, lun,
+ lun_link);
+ }
+ lunid++;
+
+ /*
+ * Issue a READ CAPACITY 16 command for the LUN.
+ * The mpssas_read_cap_done function will load
+ * the read cap info into the LUN struct.
+ */
+ csio = &ccb->csio;
+ csio->ccb_h.func_code = XPT_SCSI_IO;
+ csio->ccb_h.flags = CAM_DIR_IN;
+ csio->ccb_h.retry_count = 4;
+ csio->ccb_h.cbfcnp = mpssas_read_cap_done;
+ csio->ccb_h.timeout = 60000;
+ csio->data_ptr = (uint8_t *)rcap_buf;
+ csio->dxfer_len = sizeof(struct
+ scsi_read_capacity_eedp);
+ csio->sense_len = MPS_SENSE_LEN;
+ csio->cdb_len = sizeof(*scsi_cmd);
+ csio->tag_action = MSG_SIMPLE_Q_TAG;
+
+ scsi_cmd = (struct scsi_read_capacity_16 *)
+ &csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = 0x9E;
+ scsi_cmd->service_action = SRC16_SERVICE_ACTION;
+ ((uint8_t *)scsi_cmd)[13] = sizeof(struct
+ scsi_read_capacity_eedp);
+
+ /*
+ * Set the path, target and lun IDs for the READ
+ * CAPACITY request.
+ */
+ ccb->ccb_h.path_id =
+ xpt_path_path_id(ccb->ccb_h.path);
+ ccb->ccb_h.target_id =
+ xpt_path_target_id(ccb->ccb_h.path);
+ ccb->ccb_h.target_lun =
+ xpt_path_lun_id(ccb->ccb_h.path);
+
+ ccb->ccb_h.ppriv_ptr1 = sassc;
+ xpt_action(ccb);
+ } else {
+ free(rcap_buf, M_MPT2);
+ xpt_free_path(ccb->ccb_h.path);
+ xpt_free_ccb(ccb);
+ }
+ } while (found_periph);
+ }
}
+
static void
-mpssas_unfreeze_device(struct mpssas_softc *sassc, struct mpssas_target *targ)
+mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
{
+ struct mpssas_softc *sassc;
+ struct mpssas_target *target;
+ struct mpssas_lun *lun;
+ struct scsi_read_capacity_eedp *rcap_buf;
+
+ if (done_ccb == NULL)
+ return;
+
+ rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
+
+ /*
+ * Get the LUN ID for the path and look it up in the LUN list for the
+ * target.
+ */
+ sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
+ target = &sassc->targets[done_ccb->ccb_h.target_id];
+ SLIST_FOREACH(lun, &target->luns, lun_link) {
+ if (lun->lun_id != done_ccb->ccb_h.target_lun)
+ continue;
+
+ /*
+ * Got the LUN in the target's LUN list. Fill it in
+ * with EEDP info. If the READ CAP 16 command had some
+ * SCSI error (common if command is not supported), mark
+ * the lun as not supporting EEDP and set the block size
+ * to 0.
+ */
+ if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
+ || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
+ lun->eedp_formatted = FALSE;
+ lun->eedp_block_size = 0;
+ break;
+ }
+
+ if (rcap_buf->protect & 0x01) {
+ lun->eedp_formatted = TRUE;
+ lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
+ }
+ break;
+ }
+
+ // Finished with this CCB and path.
+ free(rcap_buf, M_MPT2);
+ xpt_free_path(done_ccb->ccb_h.path);
+ xpt_free_ccb(done_ccb);
+}
+#endif /* __FreeBSD_version >= 1000006 */
+
+int
+mpssas_startup(struct mps_softc *sc)
+{
+ struct mpssas_softc *sassc;
+
+ /*
+ * Send the port enable message and set the wait_for_port_enable flag.
+ * This flag helps to keep the simq frozen until all discovery events
+ * are processed.
+ */
+ sassc = sc->sassc;
+ mpssas_startup_increment(sassc);
+ sc->wait_for_port_enable = 1;
+ mpssas_send_portenable(sc);
+ return (0);
+}
+
+static int
+mpssas_send_portenable(struct mps_softc *sc)
+{
+ MPI2_PORT_ENABLE_REQUEST *request;
+ struct mps_command *cm;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ if ((cm = mps_alloc_command(sc)) == NULL)
+ return (EBUSY);
+ request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
+ request->Function = MPI2_FUNCTION_PORT_ENABLE;
+ request->MsgFlags = 0;
+ request->VP_ID = 0;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_complete = mpssas_portenable_complete;
+ cm->cm_data = NULL;
+ cm->cm_sge = NULL;
+
+ mps_map_command(sc, cm);
+ mps_dprint(sc, MPS_TRACE,
+ "mps_send_portenable finished cm %p req %p complete %p\n",
+ cm, cm->cm_req, cm->cm_complete);
+ return (0);
+}
+
+static void
+mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
+{
+ MPI2_PORT_ENABLE_REPLY *reply;
+ struct mpssas_softc *sassc;
+ struct mpssas_target *target;
+ int i;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+ sassc = sc->sassc;
+
+ /*
+ * Currently there should be no way we can hit this case. It only
+ * happens when we have a failure to allocate chain frames, and
+ * port enable commands don't have S/G lists.
+ */
+ if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
+ mps_printf(sc, "%s: cm_flags = %#x for port enable! "
+ "This should not happen!\n", __func__, cm->cm_flags);
+ }
+
+ reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
+ if (reply == NULL)
+ mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
+ else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
+ MPI2_IOCSTATUS_SUCCESS)
+ mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
+
+ mps_free_command(sc, cm);
+ if (sc->mps_ich.ich_arg != NULL) {
+ mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
+ config_intrhook_disestablish(&sc->mps_ich);
+ sc->mps_ich.ich_arg = NULL;
+ }
+
+ /*
+ * Get WarpDrive info after discovery is complete but before the scan
+ * starts. At this point, all devices are ready to be exposed to the
+ * OS. If devices should be hidden instead, take them out of the
+ * 'targets' array before the scan. The devinfo for a disk will have
+ * some info and a volume's will be 0. Use that to remove disks.
+ */
+ mps_wd_config_pages(sc);
+ if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
+ && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
+ || (sc->WD_valid_config && (sc->WD_hide_expose ==
+ MPS_WD_HIDE_IF_VOLUME))) {
+ for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
+ target = &sassc->targets[i];
+ if (target->devinfo) {
+ target->devinfo = 0x0;
+ target->encl_handle = 0x0;
+ target->encl_slot = 0x0;
+ target->handle = 0x0;
+ target->tid = 0x0;
+ target->linkrate = 0x0;
+ target->flags = 0x0;
+ }
+ }
+ }
+
+ /*
+ * Done waiting for port enable to complete. Decrement the refcount.
+ * If refcount is 0, discovery is complete and a rescan of the bus can
+ * take place. Since the simq was explicitly frozen before port
+ * enable, it must be explicitly released here to keep the
+ * freeze/release count in sync.
+ */
+ sc->wait_for_port_enable = 0;
+ sc->port_enable_complete = 1;
+ mpssas_startup_decrement(sassc);
+ xpt_release_simq(sassc->sim, 1);
}
diff --git a/sys/dev/mps/mps_sas.h b/sys/dev/mps/mps_sas.h
new file mode 100644
index 0000000..343247f
--- /dev/null
+++ b/sys/dev/mps/mps_sas.h
@@ -0,0 +1,161 @@
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+struct mps_fw_event_work;
+
+struct mpssas_lun {
+ SLIST_ENTRY(mpssas_lun) lun_link;
+ lun_id_t lun_id;
+ uint8_t eedp_formatted;
+ uint32_t eedp_block_size;
+};
+
+struct mpssas_target {
+ uint16_t handle;
+ uint8_t linkrate;
+ uint64_t devname;
+ uint32_t devinfo;
+ uint16_t encl_handle;
+ uint16_t encl_slot;
+ uint8_t flags;
+#define MPSSAS_TARGET_INABORT (1 << 0)
+#define MPSSAS_TARGET_INRESET (1 << 1)
+#define MPSSAS_TARGET_INDIAGRESET (1 << 2)
+#define MPSSAS_TARGET_INREMOVAL (1 << 3)
+#define MPSSAS_TARGET_INRECOVERY (MPSSAS_TARGET_INABORT | \
+ MPSSAS_TARGET_INRESET | MPSSAS_TARGET_INCHIPRESET)
+#define MPSSAS_TARGET_ADD (1 << 29)
+#define MPSSAS_TARGET_REMOVE (1 << 30)
+ uint16_t tid;
+ SLIST_HEAD(, mpssas_lun) luns;
+ TAILQ_HEAD(, mps_command) commands;
+ struct mps_command *tm;
+ TAILQ_HEAD(, mps_command) timedout_commands;
+ uint16_t exp_dev_handle;
+ uint16_t phy_num;
+ uint64_t sasaddr;
+ uint16_t parent_handle;
+ uint64_t parent_sasaddr;
+ uint32_t parent_devinfo;
+ struct sysctl_ctx_list sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+ TAILQ_ENTRY(mpssas_target) sysctl_link;
+ uint64_t issued;
+ uint64_t completed;
+ unsigned int outstanding;
+ unsigned int timeouts;
+ unsigned int aborts;
+ unsigned int logical_unit_resets;
+ unsigned int target_resets;
+};
+
+struct mpssas_softc {
+ struct mps_softc *sc;
+ u_int flags;
+#define MPSSAS_IN_DISCOVERY (1 << 0)
+#define MPSSAS_IN_STARTUP (1 << 1)
+#define MPSSAS_DISCOVERY_TIMEOUT_PENDING (1 << 2)
+#define MPSSAS_QUEUE_FROZEN (1 << 3)
+#define MPSSAS_SHUTDOWN (1 << 4)
+#define MPSSAS_SCANTHREAD (1 << 5)
+ struct mpssas_target *targets;
+ struct cam_devq *devq;
+ struct cam_sim *sim;
+ struct cam_path *path;
+ struct intr_config_hook sas_ich;
+ struct callout discovery_callout;
+ u_int discovery_timeouts;
+ struct mps_event_handle *mpssas_eh;
+
+ u_int startup_refcount;
+ u_int tm_count;
+ struct proc *sysctl_proc;
+
+ TAILQ_HEAD(, ccb_hdr) ccb_scanq;
+ struct proc *rescan_thread;
+
+ struct taskqueue *ev_tq;
+ struct task ev_task;
+ TAILQ_HEAD(, mps_fw_event_work) ev_queue;
+};
+
+MALLOC_DECLARE(M_MPSSAS);
+
+/*
+ * Abstracted so that the driver can be backwards and forwards compatible
+ * with future versions of CAM that will provide this functionality.
+ */
+#define MPS_SET_LUN(lun, ccblun) \
+ mpssas_set_lun(lun, ccblun)
+
+static __inline int
+mpssas_set_lun(uint8_t *lun, u_int ccblun)
+{
+ uint64_t *newlun;
+
+ newlun = (uint64_t *)lun;
+ *newlun = 0;
+ if (ccblun <= 0xff) {
+ /* Peripheral device address method, LUN is 0 to 255 */
+ lun[1] = ccblun;
+ } else if (ccblun <= 0x3fff) {
+ /* Flat space address method, LUN is <= 16383 */
+ scsi_ulto2b(ccblun, lun);
+ lun[0] |= 0x40;
+ } else if (ccblun <= 0xffffff) {
+ /* Extended flat space address method, LUN is <= 16777215 */
+ scsi_ulto3b(ccblun, &lun[1]);
+ /* Extended Flat space address method */
+ lun[0] = 0xc0;
+ /* Length = 1, i.e. LUN is 3 bytes long */
+ lun[0] |= 0x10;
+ /* Extended Address Method */
+ lun[0] |= 0x02;
+ } else {
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+#define MPS_SET_SINGLE_LUN(req, lun) \
+do { \
+ bzero((req)->LUN, 8); \
+ (req)->LUN[1] = lun; \
+} while(0)
+
+void mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ);
+void mpssas_discovery_end(struct mpssas_softc *sassc);
+void mpssas_startup_increment(struct mpssas_softc *sassc);
+void mpssas_startup_decrement(struct mpssas_softc *sassc);
+
+struct mps_command * mpssas_alloc_tm(struct mps_softc *sc);
+void mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm);
+void mpssas_firmware_event_work(void *arg, int pending);
diff --git a/sys/dev/mps/mps_sas_lsi.c b/sys/dev/mps/mps_sas_lsi.c
new file mode 100644
index 0000000..70c74ea
--- /dev/null
+++ b/sys/dev/mps/mps_sas_lsi.c
@@ -0,0 +1,865 @@
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* Communications core for LSI MPT2 */
+
+/* TODO Move headers to mpsvar */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/selinfo.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/bio.h>
+#include <sys/malloc.h>
+#include <sys/uio.h>
+#include <sys/sysctl.h>
+#include <sys/endian.h>
+#include <sys/queue.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
+#include <sys/sbuf.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <machine/stdarg.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_debug.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/cam_periph.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+
+#include <dev/mps/mpi/mpi2_type.h>
+#include <dev/mps/mpi/mpi2.h>
+#include <dev/mps/mpi/mpi2_ioc.h>
+#include <dev/mps/mpi/mpi2_sas.h>
+#include <dev/mps/mpi/mpi2_cnfg.h>
+#include <dev/mps/mpi/mpi2_init.h>
+#include <dev/mps/mpi/mpi2_raid.h>
+#include <dev/mps/mpi/mpi2_tool.h>
+#include <dev/mps/mps_ioctl.h>
+#include <dev/mps/mpsvar.h>
+#include <dev/mps/mps_table.h>
+#include <dev/mps/mps_sas.h>
+
+/* For Hashed SAS Address creation for SATA Drives */
+#define MPT2SAS_SN_LEN 20
+#define MPT2SAS_MN_LEN 40
+
+struct mps_fw_event_work {
+ u16 event;
+ void *event_data;
+ TAILQ_ENTRY(mps_fw_event_work) ev_link;
+};
+
+union _sata_sas_address {
+ u8 wwid[8];
+ struct {
+ u32 high;
+ u32 low;
+ } word;
+};
+
+/*
+ * define the IDENTIFY DEVICE structure
+ */
+struct _ata_identify_device_data {
+ u16 reserved1[10]; /* 0-9 */
+ u16 serial_number[10]; /* 10-19 */
+ u16 reserved2[7]; /* 20-26 */
+ u16 model_number[20]; /* 27-46*/
+ u16 reserved3[209]; /* 47-255*/
+};
+
+static void mpssas_fw_work(struct mps_softc *sc,
+ struct mps_fw_event_work *fw_event);
+static void mpssas_fw_event_free(struct mps_softc *,
+ struct mps_fw_event_work *);
+static int mpssas_add_device(struct mps_softc *sc, u16 handle, u8 linkrate);
+static int mpssas_get_sata_identify(struct mps_softc *sc, u16 handle,
+ Mpi2SataPassthroughReply_t *mpi_reply, char *id_buffer, int sz,
+ u32 devinfo);
+int mpssas_get_sas_address_for_sata_disk(struct mps_softc *sc,
+ u64 *sas_address, u16 handle, u32 device_info);
+static int mpssas_volume_add(struct mps_softc *sc,
+ Mpi2EventIrConfigElement_t *element);
+
+void
+mpssas_evt_handler(struct mps_softc *sc, uintptr_t data,
+ MPI2_EVENT_NOTIFICATION_REPLY *event)
+{
+ struct mps_fw_event_work *fw_event;
+ u16 sz;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+ mps_print_evt_sas(sc, event);
+ mpssas_record_event(sc, event);
+
+ fw_event = malloc(sizeof(struct mps_fw_event_work), M_MPT2,
+ M_ZERO|M_NOWAIT);
+ if (!fw_event) {
+ printf("%s: allocate failed for fw_event\n", __func__);
+ return;
+ }
+ sz = le16toh(event->EventDataLength) * 4;
+ fw_event->event_data = malloc(sz, M_MPT2, M_ZERO|M_NOWAIT);
+ if (!fw_event->event_data) {
+ printf("%s: allocate failed for event_data\n", __func__);
+ free(fw_event, M_MPT2);
+ return;
+ }
+
+ bcopy(event->EventData, fw_event->event_data, sz);
+ fw_event->event = event->Event;
+ if ((event->Event == MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
+ event->Event == MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE ||
+ event->Event == MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST) &&
+ sc->track_mapping_events)
+ sc->pending_map_events++;
+
+ /*
+ * When wait_for_port_enable flag is set, make sure that all the events
+ * are processed. Increment the startup_refcount and decrement it after
+ * events are processed.
+ */
+ if ((event->Event == MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
+ event->Event == MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST) &&
+ sc->wait_for_port_enable)
+ mpssas_startup_increment(sc->sassc);
+
+ TAILQ_INSERT_TAIL(&sc->sassc->ev_queue, fw_event, ev_link);
+ taskqueue_enqueue(sc->sassc->ev_tq, &sc->sassc->ev_task);
+
+}
+
+static void
+mpssas_fw_event_free(struct mps_softc *sc, struct mps_fw_event_work *fw_event)
+{
+
+ free(fw_event->event_data, M_MPT2);
+ free(fw_event, M_MPT2);
+}
+
+/**
+ * _mps_fw_work - delayed task for processing firmware events
+ * @sc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+mpssas_fw_work(struct mps_softc *sc, struct mps_fw_event_work *fw_event)
+{
+ struct mpssas_softc *sassc;
+ sassc = sc->sassc;
+
+ switch (fw_event->event) {
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ {
+ MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST *data;
+ MPI2_EVENT_SAS_TOPO_PHY_ENTRY *phy;
+ int i;
+
+ data = (MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST *)
+ fw_event->event_data;
+
+ mps_mapping_topology_change_event(sc, fw_event->event_data);
+
+ for (i = 0; i < data->NumEntries; i++) {
+ phy = &data->PHY[i];
+ switch (phy->PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK) {
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
+ if (mpssas_add_device(sc,
+ phy->AttachedDevHandle, phy->LinkRate)){
+ printf("%s: failed to add device with "
+ "handle 0x%x\n", __func__,
+ phy->AttachedDevHandle);
+ mpssas_prepare_remove(sassc, phy->
+ AttachedDevHandle);
+ }
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+ mpssas_prepare_remove(sassc, phy->
+ AttachedDevHandle);
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
+ case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
+ case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
+ default:
+ break;
+ }
+ }
+ /*
+ * refcount was incremented for this event in
+ * mpssas_evt_handler. Decrement it here because the event has
+ * been processed.
+ */
+ mpssas_startup_decrement(sassc);
+ break;
+ }
+ case MPI2_EVENT_SAS_DISCOVERY:
+ {
+ MPI2_EVENT_DATA_SAS_DISCOVERY *data;
+
+ data = (MPI2_EVENT_DATA_SAS_DISCOVERY *)fw_event->event_data;
+
+ if (data->ReasonCode & MPI2_EVENT_SAS_DISC_RC_STARTED)
+ mps_dprint(sc, MPS_TRACE,"SAS discovery start event\n");
+ if (data->ReasonCode & MPI2_EVENT_SAS_DISC_RC_COMPLETED) {
+ mps_dprint(sc, MPS_TRACE,"SAS discovery stop event\n");
+ sassc->flags &= ~MPSSAS_IN_DISCOVERY;
+ mpssas_discovery_end(sassc);
+ }
+ break;
+ }
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ {
+ Mpi2EventDataSasEnclDevStatusChange_t *data;
+ data = (Mpi2EventDataSasEnclDevStatusChange_t *)
+ fw_event->event_data;
+ mps_mapping_enclosure_dev_status_change_event(sc,
+ fw_event->event_data);
+ break;
+ }
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ {
+ Mpi2EventIrConfigElement_t *element;
+ int i;
+ u8 foreign_config;
+ Mpi2EventDataIrConfigChangeList_t *event_data;
+ struct mpssas_target *targ;
+ unsigned int id;
+
+ event_data = fw_event->event_data;
+ foreign_config = (le32toh(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
+
+ element =
+ (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ id = mps_mapping_get_raid_id_from_handle
+ (sc, element->VolDevHandle);
+
+ mps_mapping_ir_config_change_event(sc, event_data);
+
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ switch (element->ReasonCode) {
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED:
+ if (!foreign_config) {
+ if (mpssas_volume_add(sc, element)) {
+ printf("%s: failed to add RAID "
+ "volume with handle 0x%x\n",
+ __func__, le16toh(element->
+ VolDevHandle));
+ }
+ }
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
+ /*
+ * Rescan after volume is deleted or removed.
+ */
+ if (!foreign_config) {
+ if (id == MPS_MAP_BAD_ID) {
+ printf("%s: could not get ID "
+ "for volume with handle "
+ "0x%04x\n", __func__,
+ element->VolDevHandle);
+ break;
+ }
+
+ targ = &sassc->targets[id];
+ targ->handle = 0x0;
+ targ->encl_slot = 0x0;
+ targ->encl_handle = 0x0;
+ targ->exp_dev_handle = 0x0;
+ targ->phy_num = 0x0;
+ targ->linkrate = 0x0;
+ mpssas_rescan_target(sc, targ);
+ printf("RAID target id 0x%x removed\n",
+ targ->tid);
+ }
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
+ /*
+ * Phys Disk of a volume has been created. Hide
+ * it from the OS.
+ */
+ mpssas_prepare_remove(sassc, element->
+ PhysDiskDevHandle);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
+ /*
+ * Phys Disk of a volume has been deleted.
+ * Expose it to the OS.
+ */
+ if (mpssas_add_device(sc,
+ element->PhysDiskDevHandle, 0)){
+ printf("%s: failed to add device with "
+ "handle 0x%x\n", __func__,
+ element->PhysDiskDevHandle);
+ mpssas_prepare_remove(sassc, element->
+ PhysDiskDevHandle);
+ }
+ break;
+ }
+ }
+ /*
+ * refcount was incremented for this event in
+ * mpssas_evt_handler. Decrement it here because the event has
+ * been processed.
+ */
+ mpssas_startup_decrement(sassc);
+ break;
+ }
+ case MPI2_EVENT_IR_VOLUME:
+ {
+ Mpi2EventDataIrVolume_t *event_data = fw_event->event_data;
+
+ /*
+ * Informational only.
+ */
+ mps_dprint(sc, MPS_INFO, "Received IR Volume event:\n");
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
+ mps_dprint(sc, MPS_INFO, " Volume Settings "
+ "changed from 0x%x to 0x%x for Volome with "
+ "handle 0x%x", event_data->PreviousValue,
+ event_data->NewValue,
+ event_data->VolDevHandle);
+ break;
+ case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
+ mps_dprint(sc, MPS_INFO, " Volume Status "
+ "changed from 0x%x to 0x%x for Volome with "
+ "handle 0x%x", event_data->PreviousValue,
+ event_data->NewValue,
+ event_data->VolDevHandle);
+ break;
+ case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
+ mps_dprint(sc, MPS_INFO, " Volume State "
+ "changed from 0x%x to 0x%x for Volome with "
+ "handle 0x%x", event_data->PreviousValue,
+ event_data->NewValue,
+ event_data->VolDevHandle);
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ {
+ Mpi2EventDataIrPhysicalDisk_t *event_data =
+ fw_event->event_data;
+
+ /*
+ * Informational only.
+ */
+ mps_dprint(sc, MPS_INFO, "Received IR Phys Disk event:\n");
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
+ mps_dprint(sc, MPS_INFO, " Phys Disk Settings "
+ "changed from 0x%x to 0x%x for Phys Disk Number "
+ "%d and handle 0x%x at Enclosure handle 0x%x, Slot "
+ "%d", event_data->PreviousValue,
+ event_data->NewValue, event_data->PhysDiskNum,
+ event_data->PhysDiskDevHandle,
+ event_data->EnclosureHandle, event_data->Slot);
+ break;
+ case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
+ mps_dprint(sc, MPS_INFO, " Phys Disk Status changed "
+ "from 0x%x to 0x%x for Phys Disk Number %d and "
+ "handle 0x%x at Enclosure handle 0x%x, Slot %d",
+ event_data->PreviousValue, event_data->NewValue,
+ event_data->PhysDiskNum,
+ event_data->PhysDiskDevHandle,
+ event_data->EnclosureHandle, event_data->Slot);
+ break;
+ case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
+ mps_dprint(sc, MPS_INFO, " Phys Disk State changed "
+ "from 0x%x to 0x%x for Phys Disk Number %d and "
+ "handle 0x%x at Enclosure handle 0x%x, Slot %d",
+ event_data->PreviousValue, event_data->NewValue,
+ event_data->PhysDiskNum,
+ event_data->PhysDiskDevHandle,
+ event_data->EnclosureHandle, event_data->Slot);
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ {
+ Mpi2EventDataIrOperationStatus_t *event_data =
+ fw_event->event_data;
+
+ /*
+ * Informational only.
+ */
+ mps_dprint(sc, MPS_INFO, "Received IR Op Status event:\n");
+ mps_dprint(sc, MPS_INFO, " RAID Operation of %d is %d "
+ "percent complete for Volume with handle 0x%x",
+ event_data->RAIDOperation, event_data->PercentComplete,
+ event_data->VolDevHandle);
+ break;
+ }
+ case MPI2_EVENT_LOG_ENTRY_ADDED:
+ {
+ pMpi2EventDataLogEntryAdded_t logEntry;
+ uint16_t logQualifier;
+ uint8_t logCode;
+
+ logEntry = (pMpi2EventDataLogEntryAdded_t)fw_event->event_data;
+ logQualifier = logEntry->LogEntryQualifier;
+
+ if (logQualifier == MPI2_WD_LOG_ENTRY) {
+ logCode = logEntry->LogData[0];
+
+ switch (logCode) {
+ case MPI2_WD_SSD_THROTTLING:
+ printf("WarpDrive Warning: IO Throttling has "
+ "occurred in the WarpDrive subsystem. "
+ "Check WarpDrive documentation for "
+ "additional details\n");
+ break;
+ case MPI2_WD_DRIVE_LIFE_WARN:
+ printf("WarpDrive Warning: Program/Erase "
+ "Cycles for the WarpDrive subsystem in "
+ "degraded range. Check WarpDrive "
+ "documentation for additional details\n");
+ break;
+ case MPI2_WD_DRIVE_LIFE_DEAD:
+ printf("WarpDrive Fatal Error: There are no "
+ "Program/Erase Cycles for the WarpDrive "
+ "subsystem. The storage device will be in "
+ "read-only mode. Check WarpDrive "
+ "documentation for additional details\n");
+ break;
+ case MPI2_WD_RAIL_MON_FAIL:
+ printf("WarpDrive Fatal Error: The Backup Rail "
+ "Monitor has failed on the WarpDrive "
+ "subsystem. Check WarpDrive documentation "
+ "for additional details\n");
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ }
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ default:
+ mps_dprint(sc, MPS_TRACE,"Unhandled event 0x%0X\n",
+ fw_event->event);
+ break;
+
+ }
+ mpssas_fw_event_free(sc, fw_event);
+}
+
+void
+mpssas_firmware_event_work(void *arg, int pending)
+{
+ struct mps_fw_event_work *fw_event;
+ struct mps_softc *sc;
+
+ sc = (struct mps_softc *)arg;
+ mps_lock(sc);
+ while ((fw_event = TAILQ_FIRST(&sc->sassc->ev_queue)) != NULL) {
+ TAILQ_REMOVE(&sc->sassc->ev_queue, fw_event, ev_link);
+ mpssas_fw_work(sc, fw_event);
+ }
+ mps_unlock(sc);
+}
+
+static int
+mpssas_add_device(struct mps_softc *sc, u16 handle, u8 linkrate){
+ char devstring[80];
+ struct mpssas_softc *sassc;
+ struct mpssas_target *targ;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t config_page;
+ uint64_t sas_address, sata_sas_address;
+ uint64_t parent_sas_address = 0;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ u32 device_info, parent_devinfo = 0;
+ unsigned int id;
+ int ret;
+ int error = 0;
+
+ sassc = sc->sassc;
+ mpssas_startup_increment(sassc);
+ if ((mps_config_get_sas_device_pg0(sc, &mpi_reply, &config_page,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ printf("%s: error reading SAS device page0\n", __func__);
+ error = ENXIO;
+ goto out;
+ }
+
+ device_info = le32toh(config_page.DeviceInfo);
+
+ if (((device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0)
+ && (config_page.ParentDevHandle != 0)) {
+ Mpi2ConfigReply_t tmp_mpi_reply;
+ Mpi2SasDevicePage0_t parent_config_page;
+
+ if ((mps_config_get_sas_device_pg0(sc, &tmp_mpi_reply,
+ &parent_config_page, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ le16toh(config_page.ParentDevHandle)))) {
+ printf("%s: error reading SAS device %#x page0\n",
+ __func__, le16toh(config_page.ParentDevHandle));
+ } else {
+ parent_sas_address = parent_config_page.SASAddress.High;
+ parent_sas_address = (parent_sas_address << 32) |
+ parent_config_page.SASAddress.Low;
+ parent_devinfo = le32toh(parent_config_page.DeviceInfo);
+ }
+ }
+ /* TODO Check proper endianess */
+ sas_address = config_page.SASAddress.High;
+ sas_address = (sas_address << 32) |
+ config_page.SASAddress.Low;
+
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE)
+ == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
+ if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) {
+ ret = mpssas_get_sas_address_for_sata_disk(sc,
+ &sata_sas_address, handle, device_info);
+ if (!ret)
+ id = mps_mapping_get_sas_id(sc,
+ sata_sas_address, handle);
+ else
+ id = mps_mapping_get_sas_id(sc,
+ sas_address, handle);
+ } else
+ id = mps_mapping_get_sas_id(sc, sas_address,
+ handle);
+ } else
+ id = mps_mapping_get_sas_id(sc, sas_address, handle);
+
+ if (id == MPS_MAP_BAD_ID) {
+ printf("failure at %s:%d/%s()! Could not get ID for device "
+ "with handle 0x%04x\n", __FILE__, __LINE__, __func__,
+ handle);
+ error = ENXIO;
+ goto out;
+ }
+ mps_vprintf(sc, "SAS Address from SAS device page0 = %jx\n",
+ sas_address);
+ targ = &sassc->targets[id];
+ targ->devinfo = device_info;
+ targ->devname = le32toh(config_page.DeviceName.High);
+ targ->devname = (targ->devname << 32) |
+ le32toh(config_page.DeviceName.Low);
+ targ->encl_handle = le16toh(config_page.EnclosureHandle);
+ targ->encl_slot = le16toh(config_page.Slot);
+ targ->handle = handle;
+ targ->parent_handle = le16toh(config_page.ParentDevHandle);
+ targ->sasaddr = mps_to_u64(&config_page.SASAddress);
+ targ->parent_sasaddr = le64toh(parent_sas_address);
+ targ->parent_devinfo = parent_devinfo;
+ targ->tid = id;
+ targ->linkrate = (linkrate>>4);
+ targ->flags = 0;
+ TAILQ_INIT(&targ->commands);
+ TAILQ_INIT(&targ->timedout_commands);
+ SLIST_INIT(&targ->luns);
+ mps_describe_devinfo(targ->devinfo, devstring, 80);
+ mps_vprintf(sc, "Found device <%s> <%s> <0x%04x> <%d/%d>\n", devstring,
+ mps_describe_table(mps_linkrate_names, targ->linkrate),
+ targ->handle, targ->encl_handle, targ->encl_slot);
+ if ((sassc->flags & MPSSAS_IN_STARTUP) == 0)
+ mpssas_rescan_target(sc, targ);
+ mps_vprintf(sc, "Target id 0x%x added\n", targ->tid);
+out:
+ mpssas_startup_decrement(sassc);
+ return (error);
+
+}
+
+int
+mpssas_get_sas_address_for_sata_disk(struct mps_softc *sc,
+ u64 *sas_address, u16 handle, u32 device_info)
+{
+ Mpi2SataPassthroughReply_t mpi_reply;
+ int i, rc, try_count;
+ u32 *bufferptr;
+ union _sata_sas_address hash_address;
+ struct _ata_identify_device_data ata_identify;
+ u8 buffer[MPT2SAS_MN_LEN + MPT2SAS_SN_LEN];
+ u32 ioc_status;
+ u8 sas_status;
+
+ memset(&ata_identify, 0, sizeof(ata_identify));
+ try_count = 0;
+ do {
+ rc = mpssas_get_sata_identify(sc, handle, &mpi_reply,
+ (char *)&ata_identify, sizeof(ata_identify), device_info);
+ try_count++;
+ ioc_status = le16toh(mpi_reply.IOCStatus)
+ & MPI2_IOCSTATUS_MASK;
+ sas_status = mpi_reply.SASStatus;
+ } while ((rc == -EAGAIN || ioc_status || sas_status) &&
+ (try_count < 5));
+
+ if (rc == 0 && !ioc_status && !sas_status) {
+ mps_dprint(sc, MPS_INFO, "%s: got SATA identify successfully "
+ "for handle = 0x%x with try_count = %d\n",
+ __func__, handle, try_count);
+ } else {
+ mps_dprint(sc, MPS_INFO, "%s: handle = 0x%x failed\n",
+ __func__, handle);
+ return -1;
+ }
+ /* Copy & byteswap the 40 byte model number to a buffer */
+ for (i = 0; i < MPT2SAS_MN_LEN; i += 2) {
+ buffer[i] = ((u8 *)ata_identify.model_number)[i + 1];
+ buffer[i + 1] = ((u8 *)ata_identify.model_number)[i];
+ }
+ /* Copy & byteswap the 20 byte serial number to a buffer */
+ for (i = 0; i < MPT2SAS_SN_LEN; i += 2) {
+ buffer[MPT2SAS_MN_LEN + i] =
+ ((u8 *)ata_identify.serial_number)[i + 1];
+ buffer[MPT2SAS_MN_LEN + i + 1] =
+ ((u8 *)ata_identify.serial_number)[i];
+ }
+ bufferptr = (u32 *)buffer;
+ /* There are 60 bytes to hash down to 8. 60 isn't divisible by 8,
+ * so loop through the first 56 bytes (7*8),
+ * and then add in the last dword.
+ */
+ hash_address.word.low = 0;
+ hash_address.word.high = 0;
+ for (i = 0; (i < ((MPT2SAS_MN_LEN+MPT2SAS_SN_LEN)/8)); i++) {
+ hash_address.word.low += *bufferptr;
+ bufferptr++;
+ hash_address.word.high += *bufferptr;
+ bufferptr++;
+ }
+ /* Add the last dword */
+ hash_address.word.low += *bufferptr;
+ /* Make sure the hash doesn't start with 5, because it could clash
+ * with a SAS address. Change 5 to a D.
+ */
+ if ((hash_address.word.high & 0x000000F0) == (0x00000050))
+ hash_address.word.high |= 0x00000080;
+ *sas_address = (u64)hash_address.wwid[0] << 56 |
+ (u64)hash_address.wwid[1] << 48 | (u64)hash_address.wwid[2] << 40 |
+ (u64)hash_address.wwid[3] << 32 | (u64)hash_address.wwid[4] << 24 |
+ (u64)hash_address.wwid[5] << 16 | (u64)hash_address.wwid[6] << 8 |
+ (u64)hash_address.wwid[7];
+ return 0;
+}
+
+static int
+mpssas_get_sata_identify(struct mps_softc *sc, u16 handle,
+ Mpi2SataPassthroughReply_t *mpi_reply, char *id_buffer, int sz, u32 devinfo)
+{
+ Mpi2SataPassthroughRequest_t *mpi_request;
+ Mpi2SataPassthroughReply_t *reply;
+ struct mps_command *cm;
+ char *buffer;
+ int error = 0;
+
+ buffer = malloc( sz, M_MPT2, M_NOWAIT | M_ZERO);
+ if (!buffer)
+ return ENOMEM;
+
+ if ((cm = mps_alloc_command(sc)) == NULL)
+ return (EBUSY);
+ mpi_request = (MPI2_SATA_PASSTHROUGH_REQUEST *)cm->cm_req;
+ bzero(mpi_request,sizeof(MPI2_SATA_PASSTHROUGH_REQUEST));
+ mpi_request->Function = MPI2_FUNCTION_SATA_PASSTHROUGH;
+ mpi_request->VF_ID = 0;
+ mpi_request->DevHandle = htole16(handle);
+ mpi_request->PassthroughFlags = (MPI2_SATA_PT_REQ_PT_FLAGS_PIO |
+ MPI2_SATA_PT_REQ_PT_FLAGS_READ);
+ mpi_request->DataLength = htole32(sz);
+ mpi_request->CommandFIS[0] = 0x27;
+ mpi_request->CommandFIS[1] = 0x80;
+ mpi_request->CommandFIS[2] = (devinfo &
+ MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? 0xA1 : 0xEC;
+ cm->cm_sge = &mpi_request->SGL;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = buffer;
+ cm->cm_length = htole32(sz);
+ error = mps_request_polled(sc, cm);
+ reply = (Mpi2SataPassthroughReply_t *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(buffer, id_buffer, sz);
+ bcopy(reply, mpi_reply, sizeof(Mpi2SataPassthroughReply_t));
+ if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
+ MPI2_IOCSTATUS_SUCCESS) {
+ printf("%s: error reading SATA PASSTHRU; iocstatus = 0x%x\n",
+ __func__, reply->IOCStatus);
+ error = ENXIO;
+ goto out;
+ }
+out:
+ mps_free_command(sc, cm);
+ free(buffer, M_MPT2);
+ return (error);
+}
+
+static int
+mpssas_volume_add(struct mps_softc *sc, Mpi2EventIrConfigElement_t *element)
+{
+ struct mpssas_softc *sassc;
+ struct mpssas_target *targ;
+ u64 wwid;
+ u16 handle = le16toh(element->VolDevHandle);
+ unsigned int id;
+ int error = 0;
+
+ sassc = sc->sassc;
+ mpssas_startup_increment(sassc);
+ mps_config_get_volume_wwid(sc, handle, &wwid);
+ if (!wwid) {
+ printf("%s: invalid WWID; cannot add volume to mapping table\n",
+ __func__);
+ error = ENXIO;
+ goto out;
+ }
+
+ id = mps_mapping_get_raid_id(sc, wwid, handle);
+ if (id == MPS_MAP_BAD_ID) {
+ printf("%s: could not get ID for volume with handle 0x%04x and "
+ "WWID 0x%016llx\n", __func__, handle,
+ (unsigned long long)wwid);
+ error = ENXIO;
+ goto out;
+ }
+
+ targ = &sassc->targets[id];
+ targ->tid = id;
+ targ->handle = handle;
+ targ->devname = wwid;
+ TAILQ_INIT(&targ->commands);
+ TAILQ_INIT(&targ->timedout_commands);
+ SLIST_INIT(&targ->luns);
+ if ((sassc->flags & MPSSAS_IN_STARTUP) == 0)
+ mpssas_rescan_target(sc, targ);
+ mps_dprint(sc, MPS_INFO, "RAID target id %d added (WWID = 0x%jx)\n",
+ targ->tid, wwid);
+out:
+ mpssas_startup_decrement(sassc);
+ return (error);
+}
+
+/**
+ * mpssas_ir_shutdown - IR shutdown notification
+ * @sc: per adapter object
+ *
+ * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
+ * the host system is shutting down.
+ *
+ * Return nothing.
+ */
+void
+mpssas_ir_shutdown(struct mps_softc *sc)
+{
+ u16 volume_mapping_flags;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ struct dev_mapping_table *mt_entry;
+ u32 start_idx, end_idx;
+ unsigned int id, found_volume = 0;
+ struct mps_command *cm;
+ Mpi2RaidActionRequest_t *action;
+
+ mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
+
+ /* is IR firmware build loaded? */
+ if (!sc->ir_firmware)
+ return;
+
+ /* are there any volumes? Look at IR target IDs. */
+ // TODO-later, this should be looked up in the RAID config structure
+ // when it is implemented.
+ volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ if (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
+ start_idx = 0;
+ if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0)
+ start_idx = 1;
+ } else
+ start_idx = sc->max_devices - sc->max_volumes;
+ end_idx = start_idx + sc->max_volumes - 1;
+
+ for (id = start_idx; id < end_idx; id++) {
+ mt_entry = &sc->mapping_table[id];
+ if ((mt_entry->physical_id != 0) &&
+ (mt_entry->missing_count == 0)) {
+ found_volume = 1;
+ break;
+ }
+ }
+
+ if (!found_volume)
+ return;
+
+ if ((cm = mps_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed\n", __func__);
+ return;
+ }
+
+ action = (MPI2_RAID_ACTION_REQUEST *)cm->cm_req;
+ action->Function = MPI2_FUNCTION_RAID_ACTION;
+ action->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ mps_request_polled(sc, cm);
+
+ /*
+ * Don't check for reply, just leave.
+ */
+ if (cm)
+ mps_free_command(sc, cm);
+}
diff --git a/sys/dev/mps/mps_table.c b/sys/dev/mps/mps_table.c
index 55110a1..c9acefe 100644
--- a/sys/dev/mps/mps_table.c
+++ b/sys/dev/mps/mps_table.c
@@ -29,6 +29,7 @@ __FBSDID("$FreeBSD$");
/* Debugging tables for MPT2 */
+/* TODO Move headers to mpsvar */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
@@ -41,6 +42,9 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/uio.h>
#include <sys/sysctl.h>
+#include <sys/queue.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
#include <machine/bus.h>
#include <machine/resource.h>
@@ -53,6 +57,8 @@ __FBSDID("$FreeBSD$");
#include <dev/mps/mpi/mpi2_ioc.h>
#include <dev/mps/mpi/mpi2_cnfg.h>
#include <dev/mps/mpi/mpi2_init.h>
+#include <dev/mps/mpi/mpi2_tool.h>
+#include <dev/mps/mps_ioctl.h>
#include <dev/mps/mpsvar.h>
#include <dev/mps/mps_table.h>
diff --git a/sys/dev/mps/mps_user.c b/sys/dev/mps/mps_user.c
index 7ca90c1..75bb7ad 100644
--- a/sys/dev/mps/mps_user.c
+++ b/sys/dev/mps/mps_user.c
@@ -27,7 +27,36 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * LSI MPS-Fusion Host Adapter FreeBSD userland interface
+ * LSI MPT-Fusion Host Adapter FreeBSD userland interface
+ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
*/
#include <sys/cdefs.h>
@@ -35,6 +64,7 @@ __FBSDID("$FreeBSD$");
#include "opt_compat.h"
+/* TODO Move headers to mpsvar */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
@@ -49,6 +79,9 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/ioccom.h>
#include <sys/endian.h>
+#include <sys/queue.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
#include <sys/proc.h>
#include <sys/sysent.h>
@@ -56,15 +89,21 @@ __FBSDID("$FreeBSD$");
#include <machine/resource.h>
#include <sys/rman.h>
+#include <cam/cam.h>
#include <cam/scsi/scsi_all.h>
#include <dev/mps/mpi/mpi2_type.h>
#include <dev/mps/mpi/mpi2.h>
#include <dev/mps/mpi/mpi2_ioc.h>
#include <dev/mps/mpi/mpi2_cnfg.h>
+#include <dev/mps/mpi/mpi2_init.h>
+#include <dev/mps/mpi/mpi2_tool.h>
+#include <dev/mps/mps_ioctl.h>
#include <dev/mps/mpsvar.h>
#include <dev/mps/mps_table.h>
-#include <dev/mps/mps_ioctl.h>
+#include <dev/mps/mps_sas.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
static d_open_t mps_open;
static d_close_t mps_close;
@@ -103,8 +142,52 @@ static int mps_user_setup_request(struct mps_command *,
struct mps_usr_command *);
static int mps_user_command(struct mps_softc *, struct mps_usr_command *);
+static int mps_user_pass_thru(struct mps_softc *sc, mps_pass_thru_t *data);
+static void mps_user_get_adapter_data(struct mps_softc *sc,
+ mps_adapter_data_t *data);
+static void mps_user_read_pci_info(struct mps_softc *sc,
+ mps_pci_info_t *data);
+static uint8_t mps_get_fw_diag_buffer_number(struct mps_softc *sc,
+ uint32_t unique_id);
+static int mps_post_fw_diag_buffer(struct mps_softc *sc,
+ mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
+static int mps_release_fw_diag_buffer(struct mps_softc *sc,
+ mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
+ uint32_t diag_type);
+static int mps_diag_register(struct mps_softc *sc,
+ mps_fw_diag_register_t *diag_register, uint32_t *return_code);
+static int mps_diag_unregister(struct mps_softc *sc,
+ mps_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
+static int mps_diag_query(struct mps_softc *sc, mps_fw_diag_query_t *diag_query,
+ uint32_t *return_code);
+static int mps_diag_read_buffer(struct mps_softc *sc,
+ mps_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
+ uint32_t *return_code);
+static int mps_diag_release(struct mps_softc *sc,
+ mps_fw_diag_release_t *diag_release, uint32_t *return_code);
+static int mps_do_diag_action(struct mps_softc *sc, uint32_t action,
+ uint8_t *diag_action, uint32_t length, uint32_t *return_code);
+static int mps_user_diag_action(struct mps_softc *sc, mps_diag_action_t *data);
+static void mps_user_event_query(struct mps_softc *sc, mps_event_query_t *data);
+static void mps_user_event_enable(struct mps_softc *sc,
+ mps_event_enable_t *data);
+static int mps_user_event_report(struct mps_softc *sc,
+ mps_event_report_t *data);
+static int mps_user_reg_access(struct mps_softc *sc, mps_reg_access_t *data);
+static int mps_user_btdh(struct mps_softc *sc, mps_btdh_mapping_t *data);
+
static MALLOC_DEFINE(M_MPSUSER, "mps_user", "Buffers for mps(4) ioctls");
+/* Macros from compat/freebsd32/freebsd32.h */
+#define PTRIN(v) (void *)(uintptr_t)(v)
+#define PTROUT(v) (uint32_t)(uintptr_t)(v)
+
+#define CP(src,dst,fld) do { (dst).fld = (src).fld; } while (0)
+#define PTRIN_CP(src,dst,fld) \
+ do { (dst).fld = PTRIN((src).fld); } while (0)
+#define PTROUT_CP(src,dst,fld) \
+ do { (dst).fld = PTROUT((src).fld); } while (0)
+
int
mps_attach_user(struct mps_softc *sc)
{
@@ -625,18 +708,17 @@ mps_user_command(struct mps_softc *sc, struct mps_usr_command *cmd)
cm->cm_length = 0;
}
- cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_WAKEUP;
+ cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE;
cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
mps_lock(sc);
- err = mps_map_command(sc, cm);
+ err = mps_wait_command(sc, cm, 0);
- if (err != 0 && err != EINPROGRESS) {
+ if (err) {
mps_printf(sc, "%s: invalid request: error %d\n",
__func__, err);
goto Ret;
}
- msleep(cm, &sc->mps_mtx, 0, "mpsuser", 0);
rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply;
sz = rpl->MsgLength * 4;
@@ -664,7 +746,1302 @@ Ret:
if (buf != NULL)
free(buf, M_MPSUSER);
return (err);
-}
+}
+
+static int
+mps_user_pass_thru(struct mps_softc *sc, mps_pass_thru_t *data)
+{
+ MPI2_REQUEST_HEADER *hdr, tmphdr;
+ MPI2_DEFAULT_REPLY *rpl;
+ struct mps_command *cm = NULL;
+ int err = 0, dir = 0, sz;
+ uint8_t function = 0;
+ u_int sense_len;
+
+ /*
+ * Only allow one passthru command at a time. Use the MPS_FLAGS_BUSY
+ * bit to denote that a passthru is being processed.
+ */
+ mps_lock(sc);
+ if (sc->mps_flags & MPS_FLAGS_BUSY) {
+ mps_dprint(sc, MPS_INFO, "%s: Only one passthru command "
+ "allowed at a single time.", __func__);
+ mps_unlock(sc);
+ return (EBUSY);
+ }
+ sc->mps_flags |= MPS_FLAGS_BUSY;
+ mps_unlock(sc);
+
+ /*
+ * Do some validation on data direction. Valid cases are:
+ * 1) DataSize is 0 and direction is NONE
+ * 2) DataSize is non-zero and one of:
+ * a) direction is READ or
+ * b) direction is WRITE or
+ * c) direction is BOTH and DataOutSize is non-zero
+ * If valid and the direction is BOTH, change the direction to READ.
+ * if valid and the direction is not BOTH, make sure DataOutSize is 0.
+ */
+ if (((data->DataSize == 0) &&
+ (data->DataDirection == MPS_PASS_THRU_DIRECTION_NONE)) ||
+ ((data->DataSize != 0) &&
+ ((data->DataDirection == MPS_PASS_THRU_DIRECTION_READ) ||
+ (data->DataDirection == MPS_PASS_THRU_DIRECTION_WRITE) ||
+ ((data->DataDirection == MPS_PASS_THRU_DIRECTION_BOTH) &&
+ (data->DataOutSize != 0))))) {
+ if (data->DataDirection == MPS_PASS_THRU_DIRECTION_BOTH)
+ data->DataDirection = MPS_PASS_THRU_DIRECTION_READ;
+ else
+ data->DataOutSize = 0;
+ } else
+ return (EINVAL);
+
+ mps_dprint(sc, MPS_INFO, "%s: req 0x%jx %d rpl 0x%jx %d "
+ "data in 0x%jx %d data out 0x%jx %d data dir %d\n", __func__,
+ data->PtrRequest, data->RequestSize, data->PtrReply,
+ data->ReplySize, data->PtrData, data->DataSize,
+ data->PtrDataOut, data->DataOutSize, data->DataDirection);
+
+ /*
+ * copy in the header so we know what we're dealing with before we
+ * commit to allocating a command for it.
+ */
+ err = copyin(PTRIN(data->PtrRequest), &tmphdr, data->RequestSize);
+ if (err != 0)
+ goto RetFreeUnlocked;
+
+ if (data->RequestSize > (int)sc->facts->IOCRequestFrameSize * 4) {
+ err = EINVAL;
+ goto RetFreeUnlocked;
+ }
+
+ function = tmphdr.Function;
+ mps_dprint(sc, MPS_INFO, "%s: Function %02X MsgFlags %02X\n", __func__,
+ function, tmphdr.MsgFlags);
+
+ /*
+ * Handle a passthru TM request.
+ */
+ if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
+ MPI2_SCSI_TASK_MANAGE_REQUEST *task;
+
+ mps_lock(sc);
+ cm = mpssas_alloc_tm(sc);
+ if (cm == NULL) {
+ err = EINVAL;
+ goto Ret;
+ }
+
+ /* Copy the header in. Only a small fixup is needed. */
+ task = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
+ bcopy(&tmphdr, task, data->RequestSize);
+ task->TaskMID = cm->cm_desc.Default.SMID;
+
+ cm->cm_data = NULL;
+ cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ cm->cm_complete = NULL;
+ cm->cm_complete_data = NULL;
+
+ err = mps_wait_command(sc, cm, 0);
+
+ if (err != 0) {
+ err = EIO;
+ mps_dprint(sc, MPS_FAULT, "%s: task management failed",
+ __func__);
+ }
+ /*
+ * Copy the reply data and sense data to user space.
+ */
+ if (cm->cm_reply != NULL) {
+ rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply;
+ sz = rpl->MsgLength * 4;
+
+ if (sz > data->ReplySize) {
+ mps_printf(sc, "%s: reply buffer too small: %d, "
+ "required: %d\n", __func__, data->ReplySize, sz);
+ err = EINVAL;
+ } else {
+ mps_unlock(sc);
+ copyout(cm->cm_reply, PTRIN(data->PtrReply),
+ data->ReplySize);
+ mps_lock(sc);
+ }
+ }
+ mpssas_free_tm(sc, cm);
+ goto Ret;
+ }
+
+ mps_lock(sc);
+ cm = mps_alloc_command(sc);
+
+ if (cm == NULL) {
+ mps_printf(sc, "%s: no mps requests\n", __func__);
+ err = ENOMEM;
+ goto Ret;
+ }
+ mps_unlock(sc);
+
+ hdr = (MPI2_REQUEST_HEADER *)cm->cm_req;
+ bcopy(&tmphdr, hdr, data->RequestSize);
+
+ /*
+ * Do some checking to make sure the IOCTL request contains a valid
+ * request. Then set the SGL info.
+ */
+ mpi_init_sge(cm, hdr, (void *)((uint8_t *)hdr + data->RequestSize));
+
+ /*
+ * Set up for read, write or both. From check above, DataOutSize will
+ * be 0 if direction is READ or WRITE, but it will have some non-zero
+ * value if the direction is BOTH. So, just use the biggest size to get
+ * the cm_data buffer size. If direction is BOTH, 2 SGLs need to be set
+ * up; the first is for the request and the second will contain the
+ * response data. cm_out_len needs to be set here and this will be used
+ * when the SGLs are set up.
+ */
+ cm->cm_data = NULL;
+ cm->cm_length = MAX(data->DataSize, data->DataOutSize);
+ cm->cm_out_len = data->DataOutSize;
+ cm->cm_flags = 0;
+ if (cm->cm_length != 0) {
+ cm->cm_data = malloc(cm->cm_length, M_MPSUSER, M_WAITOK |
+ M_ZERO);
+ if (cm->cm_data == NULL) {
+ mps_dprint(sc, MPS_FAULT, "%s: alloc failed for IOCTL "
+ "passthru length %d\n", __func__, cm->cm_length);
+ } else {
+ cm->cm_flags = MPS_CM_FLAGS_DATAIN;
+ if (data->DataOutSize) {
+ cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
+ err = copyin(PTRIN(data->PtrDataOut),
+ cm->cm_data, data->DataOutSize);
+ } else if (data->DataDirection ==
+ MPS_PASS_THRU_DIRECTION_WRITE) {
+ cm->cm_flags = MPS_CM_FLAGS_DATAOUT;
+ err = copyin(PTRIN(data->PtrData),
+ cm->cm_data, data->DataSize);
+ }
+ if (err != 0)
+ mps_dprint(sc, MPS_FAULT, "%s: failed to copy "
+ "IOCTL data from user space\n", __func__);
+ }
+ }
+ cm->cm_flags |= MPS_CM_FLAGS_SGE_SIMPLE;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+
+ /*
+ * Set up Sense buffer and SGL offset for IO passthru. SCSI IO request
+ * uses SCSI IO descriptor.
+ */
+ if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
+ (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+ MPI2_SCSI_IO_REQUEST *scsi_io_req;
+
+ scsi_io_req = (MPI2_SCSI_IO_REQUEST *)hdr;
+ /*
+ * Put SGE for data and data_out buffer at the end of
+ * scsi_io_request message header (64 bytes in total).
+ * Following above SGEs, the residual space will be used by
+ * sense data.
+ */
+ scsi_io_req->SenseBufferLength = (uint8_t)(data->RequestSize -
+ 64);
+ scsi_io_req->SenseBufferLowAddress = cm->cm_sense_busaddr;
+
+ /*
+ * Set SGLOffset0 value. This is the number of dwords that SGL
+ * is offset from the beginning of MPI2_SCSI_IO_REQUEST struct.
+ */
+ scsi_io_req->SGLOffset0 = 24;
+
+ /*
+ * Setup descriptor info. RAID passthrough must use the
+ * default request descriptor which is already set, so if this
+ * is a SCSI IO request, change the descriptor to SCSI IO.
+ * Also, if this is a SCSI IO request, handle the reply in the
+ * mpssas_scsio_complete function.
+ */
+ if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
+ cm->cm_desc.SCSIIO.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+ cm->cm_desc.SCSIIO.DevHandle = scsi_io_req->DevHandle;
+
+ /*
+ * Make sure the DevHandle is not 0 because this is a
+ * likely error.
+ */
+ if (scsi_io_req->DevHandle == 0) {
+ err = EINVAL;
+ goto RetFreeUnlocked;
+ }
+ }
+ }
+
+ mps_lock(sc);
+
+ err = mps_wait_command(sc, cm, 0);
+
+ if (err) {
+ mps_printf(sc, "%s: invalid request: error %d\n", __func__,
+ err);
+ mps_unlock(sc);
+ goto RetFreeUnlocked;
+ }
+
+ /*
+ * Sync the DMA data, if any. Then copy the data to user space.
+ */
+ if (cm->cm_data != NULL) {
+ if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
+ dir = BUS_DMASYNC_POSTREAD;
+ else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
+ dir = BUS_DMASYNC_POSTWRITE;;
+ bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
+ bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
+
+ if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) {
+ mps_unlock(sc);
+ err = copyout(cm->cm_data,
+ PTRIN(data->PtrData), data->DataSize);
+ mps_lock(sc);
+ if (err != 0)
+ mps_dprint(sc, MPS_FAULT, "%s: failed to copy "
+ "IOCTL data to user space\n", __func__);
+ }
+ }
+
+ /*
+ * Copy the reply data and sense data to user space.
+ */
+ if (cm->cm_reply != NULL) {
+ rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply;
+ sz = rpl->MsgLength * 4;
+
+ if (sz > data->ReplySize) {
+ mps_printf(sc, "%s: reply buffer too small: %d, "
+ "required: %d\n", __func__, data->ReplySize, sz);
+ err = EINVAL;
+ } else {
+ mps_unlock(sc);
+ copyout(cm->cm_reply, PTRIN(data->PtrReply),
+ data->ReplySize);
+ mps_lock(sc);
+ }
+
+ if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
+ (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+ if (((MPI2_SCSI_IO_REPLY *)rpl)->SCSIState &
+ MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ sense_len =
+ MIN(((MPI2_SCSI_IO_REPLY *)rpl)->SenseCount,
+ sizeof(struct scsi_sense_data));
+ mps_unlock(sc);
+ copyout(cm->cm_sense, cm->cm_req + 64, sense_len);
+ mps_lock(sc);
+ }
+ }
+ }
+ mps_unlock(sc);
+
+RetFreeUnlocked:
+ mps_lock(sc);
+
+ if (cm != NULL) {
+ if (cm->cm_data)
+ free(cm->cm_data, M_MPSUSER);
+ mps_free_command(sc, cm);
+ }
+Ret:
+ sc->mps_flags &= ~MPS_FLAGS_BUSY;
+ mps_unlock(sc);
+
+ return (err);
+}
+
+static void
+mps_user_get_adapter_data(struct mps_softc *sc, mps_adapter_data_t *data)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2BiosPage3_t config_page;
+
+ /*
+ * Use the PCI interface functions to get the Bus, Device, and Function
+ * information.
+ */
+ data->PciInformation.u.bits.BusNumber = pci_get_bus(sc->mps_dev);
+ data->PciInformation.u.bits.DeviceNumber = pci_get_slot(sc->mps_dev);
+ data->PciInformation.u.bits.FunctionNumber =
+ pci_get_function(sc->mps_dev);
+
+ /*
+ * Get the FW version that should already be saved in IOC Facts.
+ */
+ data->MpiFirmwareVersion = sc->facts->FWVersion.Word;
+
+ /*
+ * General device info.
+ */
+ data->AdapterType = MPSIOCTL_ADAPTER_TYPE_SAS2;
+ if (sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
+ data->AdapterType = MPSIOCTL_ADAPTER_TYPE_SAS2_SSS6200;
+ data->PCIDeviceHwId = pci_get_device(sc->mps_dev);
+ data->PCIDeviceHwRev = pci_read_config(sc->mps_dev, PCIR_REVID, 1);
+ data->SubSystemId = pci_get_subdevice(sc->mps_dev);
+ data->SubsystemVendorId = pci_get_subvendor(sc->mps_dev);
+
+ /*
+ * Get the driver version.
+ */
+ strcpy((char *)&data->DriverVersion[0], MPS_DRIVER_VERSION);
+
+ /*
+ * Need to get BIOS Config Page 3 for the BIOS Version.
+ */
+ data->BiosVersion = 0;
+ if (mps_config_get_bios_pg3(sc, &mpi_reply, &config_page))
+ printf("%s: Error while retrieving BIOS Version\n", __func__);
+ else
+ data->BiosVersion = config_page.BiosVersion;
+}
+
+static void
+mps_user_read_pci_info(struct mps_softc *sc, mps_pci_info_t *data)
+{
+ int i;
+
+ /*
+ * Use the PCI interface functions to get the Bus, Device, and Function
+ * information.
+ */
+ data->BusNumber = pci_get_bus(sc->mps_dev);
+ data->DeviceNumber = pci_get_slot(sc->mps_dev);
+ data->FunctionNumber = pci_get_function(sc->mps_dev);
+
+ /*
+ * Now get the interrupt vector and the pci header. The vector can
+ * only be 0 right now. The header is the first 256 bytes of config
+ * space.
+ */
+ data->InterruptVector = 0;
+ for (i = 0; i < sizeof (data->PciHeader); i++) {
+ data->PciHeader[i] = pci_read_config(sc->mps_dev, i, 1);
+ }
+}
+
+static uint8_t
+mps_get_fw_diag_buffer_number(struct mps_softc *sc, uint32_t unique_id)
+{
+ uint8_t index;
+
+ for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
+ if (sc->fw_diag_buffer_list[index].unique_id == unique_id) {
+ return (index);
+ }
+ }
+
+ return (MPS_FW_DIAGNOSTIC_UID_NOT_FOUND);
+}
+
+static int
+mps_post_fw_diag_buffer(struct mps_softc *sc,
+ mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
+{
+ MPI2_DIAG_BUFFER_POST_REQUEST *req;
+ MPI2_DIAG_BUFFER_POST_REPLY *reply;
+ struct mps_command *cm = NULL;
+ int i, status;
+
+ /*
+ * If buffer is not enabled, just leave.
+ */
+ *return_code = MPS_FW_DIAG_ERROR_POST_FAILED;
+ if (!pBuffer->enabled) {
+ return (MPS_DIAG_FAILURE);
+ }
+
+ /*
+ * Clear some flags initially.
+ */
+ pBuffer->force_release = FALSE;
+ pBuffer->valid_data = FALSE;
+ pBuffer->owned_by_firmware = FALSE;
+
+ /*
+ * Get a command.
+ */
+ cm = mps_alloc_command(sc);
+ if (cm == NULL) {
+ mps_printf(sc, "%s: no mps requests\n", __func__);
+ return (MPS_DIAG_FAILURE);
+ }
+
+ /*
+ * Build the request for releasing the FW Diag Buffer and send it.
+ */
+ req = (MPI2_DIAG_BUFFER_POST_REQUEST *)cm->cm_req;
+ req->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
+ req->BufferType = pBuffer->buffer_type;
+ req->ExtendedType = pBuffer->extended_type;
+ req->BufferLength = pBuffer->size;
+ for (i = 0; i < (sizeof(req->ProductSpecific) / 4); i++)
+ req->ProductSpecific[i] = pBuffer->product_specific[i];
+ mps_from_u64(sc->fw_diag_busaddr, &req->BufferAddress);
+ cm->cm_data = NULL;
+ cm->cm_length = 0;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_complete_data = NULL;
+
+ /*
+ * Send command synchronously.
+ */
+ status = mps_wait_command(sc, cm, 0);
+ if (status) {
+ mps_printf(sc, "%s: invalid request: error %d\n", __func__,
+ status);
+ status = MPS_DIAG_FAILURE;
+ goto done;
+ }
+
+ /*
+ * Process POST reply.
+ */
+ reply = (MPI2_DIAG_BUFFER_POST_REPLY *)cm->cm_reply;
+ if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
+ status = MPS_DIAG_FAILURE;
+ mps_dprint(sc, MPS_FAULT, "%s: post of FW Diag Buffer failed "
+ "with IOCStatus = 0x%x, IOCLogInfo = 0x%x and "
+ "TransferLength = 0x%x\n", __func__, reply->IOCStatus,
+ reply->IOCLogInfo, reply->TransferLength);
+ goto done;
+ }
+
+ /*
+ * Post was successful.
+ */
+ pBuffer->valid_data = TRUE;
+ pBuffer->owned_by_firmware = TRUE;
+ *return_code = MPS_FW_DIAG_ERROR_SUCCESS;
+ status = MPS_DIAG_SUCCESS;
+
+done:
+ mps_free_command(sc, cm);
+ return (status);
+}
+
+static int
+mps_release_fw_diag_buffer(struct mps_softc *sc,
+ mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
+ uint32_t diag_type)
+{
+ MPI2_DIAG_RELEASE_REQUEST *req;
+ MPI2_DIAG_RELEASE_REPLY *reply;
+ struct mps_command *cm = NULL;
+ int status;
+
+ /*
+ * If buffer is not enabled, just leave.
+ */
+ *return_code = MPS_FW_DIAG_ERROR_RELEASE_FAILED;
+ if (!pBuffer->enabled) {
+ mps_dprint(sc, MPS_INFO, "%s: This buffer type is not supported "
+ "by the IOC", __func__);
+ return (MPS_DIAG_FAILURE);
+ }
+
+ /*
+ * Clear some flags initially.
+ */
+ pBuffer->force_release = FALSE;
+ pBuffer->valid_data = FALSE;
+ pBuffer->owned_by_firmware = FALSE;
+
+ /*
+ * Get a command.
+ */
+ cm = mps_alloc_command(sc);
+ if (cm == NULL) {
+ mps_printf(sc, "%s: no mps requests\n", __func__);
+ return (MPS_DIAG_FAILURE);
+ }
+
+ /*
+ * Build the request for releasing the FW Diag Buffer and send it.
+ */
+ req = (MPI2_DIAG_RELEASE_REQUEST *)cm->cm_req;
+ req->Function = MPI2_FUNCTION_DIAG_RELEASE;
+ req->BufferType = pBuffer->buffer_type;
+ cm->cm_data = NULL;
+ cm->cm_length = 0;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_complete_data = NULL;
+
+ /*
+ * Send command synchronously.
+ */
+ status = mps_wait_command(sc, cm, 0);
+ if (status) {
+ mps_printf(sc, "%s: invalid request: error %d\n", __func__,
+ status);
+ status = MPS_DIAG_FAILURE;
+ goto done;
+ }
+
+ /*
+ * Process RELEASE reply.
+ */
+ reply = (MPI2_DIAG_RELEASE_REPLY *)cm->cm_reply;
+ if ((reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) ||
+ pBuffer->owned_by_firmware) {
+ status = MPS_DIAG_FAILURE;
+ mps_dprint(sc, MPS_FAULT, "%s: release of FW Diag Buffer "
+ "failed with IOCStatus = 0x%x and IOCLogInfo = 0x%x\n",
+ __func__, reply->IOCStatus, reply->IOCLogInfo);
+ goto done;
+ }
+
+ /*
+ * Release was successful.
+ */
+ *return_code = MPS_FW_DIAG_ERROR_SUCCESS;
+ status = MPS_DIAG_SUCCESS;
+
+ /*
+ * If this was for an UNREGISTER diag type command, clear the unique ID.
+ */
+ if (diag_type == MPS_FW_DIAG_TYPE_UNREGISTER) {
+ pBuffer->unique_id = MPS_FW_DIAG_INVALID_UID;
+ }
+
+done:
+ return (status);
+}
+
+static int
+mps_diag_register(struct mps_softc *sc, mps_fw_diag_register_t *diag_register,
+ uint32_t *return_code)
+{
+ mps_fw_diagnostic_buffer_t *pBuffer;
+ uint8_t extended_type, buffer_type, i;
+ uint32_t buffer_size;
+ uint32_t unique_id;
+ int status;
+
+ extended_type = diag_register->ExtendedType;
+ buffer_type = diag_register->BufferType;
+ buffer_size = diag_register->RequestedBufferSize;
+ unique_id = diag_register->UniqueId;
+
+ /*
+ * Check for valid buffer type
+ */
+ if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
+ *return_code = MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
+ return (MPS_DIAG_FAILURE);
+ }
+
+ /*
+ * Get the current buffer and look up the unique ID. The unique ID
+ * should not be found. If it is, the ID is already in use.
+ */
+ i = mps_get_fw_diag_buffer_number(sc, unique_id);
+ pBuffer = &sc->fw_diag_buffer_list[buffer_type];
+ if (i != MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
+ *return_code = MPS_FW_DIAG_ERROR_INVALID_UID;
+ return (MPS_DIAG_FAILURE);
+ }
+
+ /*
+ * The buffer's unique ID should not be registered yet, and the given
+ * unique ID cannot be 0.
+ */
+ if ((pBuffer->unique_id != MPS_FW_DIAG_INVALID_UID) ||
+ (unique_id == MPS_FW_DIAG_INVALID_UID)) {
+ *return_code = MPS_FW_DIAG_ERROR_INVALID_UID;
+ return (MPS_DIAG_FAILURE);
+ }
+
+ /*
+ * If this buffer is already posted as immediate, just change owner.
+ */
+ if (pBuffer->immediate && pBuffer->owned_by_firmware &&
+ (pBuffer->unique_id == MPS_FW_DIAG_INVALID_UID)) {
+ pBuffer->immediate = FALSE;
+ pBuffer->unique_id = unique_id;
+ return (MPS_DIAG_SUCCESS);
+ }
+
+ /*
+ * Post a new buffer after checking if it's enabled. The DMA buffer
+ * that is allocated will be contiguous (nsegments = 1).
+ */
+ if (!pBuffer->enabled) {
+ *return_code = MPS_FW_DIAG_ERROR_NO_BUFFER;
+ return (MPS_DIAG_FAILURE);
+ }
+ if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ buffer_size, /* maxsize */
+ 1, /* nsegments */
+ buffer_size, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->fw_diag_dmat)) {
+ device_printf(sc->mps_dev, "Cannot allocate FW diag buffer DMA "
+ "tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->fw_diag_dmat, (void **)&sc->fw_diag_buffer,
+ BUS_DMA_NOWAIT, &sc->fw_diag_map)) {
+ device_printf(sc->mps_dev, "Cannot allocate FW diag buffer "
+ "memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->fw_diag_buffer, buffer_size);
+ bus_dmamap_load(sc->fw_diag_dmat, sc->fw_diag_map, sc->fw_diag_buffer,
+ buffer_size, mps_memaddr_cb, &sc->fw_diag_busaddr, 0);
+ pBuffer->size = buffer_size;
+
+ /*
+ * Copy the given info to the diag buffer and post the buffer.
+ */
+ pBuffer->buffer_type = buffer_type;
+ pBuffer->immediate = FALSE;
+ if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
+ for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
+ i++) {
+ pBuffer->product_specific[i] =
+ diag_register->ProductSpecific[i];
+ }
+ }
+ pBuffer->extended_type = extended_type;
+ pBuffer->unique_id = unique_id;
+ status = mps_post_fw_diag_buffer(sc, pBuffer, return_code);
+
+ /*
+ * In case there was a failure, free the DMA buffer.
+ */
+ if (status == MPS_DIAG_FAILURE) {
+ if (sc->fw_diag_busaddr != 0)
+ bus_dmamap_unload(sc->fw_diag_dmat, sc->fw_diag_map);
+ if (sc->fw_diag_buffer != NULL)
+ bus_dmamem_free(sc->fw_diag_dmat, sc->fw_diag_buffer,
+ sc->fw_diag_map);
+ if (sc->fw_diag_dmat != NULL)
+ bus_dma_tag_destroy(sc->fw_diag_dmat);
+ }
+
+ return (status);
+}
+
+static int
+mps_diag_unregister(struct mps_softc *sc,
+ mps_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
+{
+ mps_fw_diagnostic_buffer_t *pBuffer;
+ uint8_t i;
+ uint32_t unique_id;
+ int status;
+
+ unique_id = diag_unregister->UniqueId;
+
+ /*
+ * Get the current buffer and look up the unique ID. The unique ID
+ * should be there.
+ */
+ i = mps_get_fw_diag_buffer_number(sc, unique_id);
+ if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
+ *return_code = MPS_FW_DIAG_ERROR_INVALID_UID;
+ return (MPS_DIAG_FAILURE);
+ }
+
+ pBuffer = &sc->fw_diag_buffer_list[i];
+
+ /*
+ * Try to release the buffer from FW before freeing it. If release
+ * fails, don't free the DMA buffer in case FW tries to access it
+ * later. If buffer is not owned by firmware, can't release it.
+ */
+ if (!pBuffer->owned_by_firmware) {
+ status = MPS_DIAG_SUCCESS;
+ } else {
+ status = mps_release_fw_diag_buffer(sc, pBuffer, return_code,
+ MPS_FW_DIAG_TYPE_UNREGISTER);
+ }
+
+ /*
+ * At this point, return the current status no matter what happens with
+ * the DMA buffer.
+ */
+ pBuffer->unique_id = MPS_FW_DIAG_INVALID_UID;
+ if (status == MPS_DIAG_SUCCESS) {
+ if (sc->fw_diag_busaddr != 0)
+ bus_dmamap_unload(sc->fw_diag_dmat, sc->fw_diag_map);
+ if (sc->fw_diag_buffer != NULL)
+ bus_dmamem_free(sc->fw_diag_dmat, sc->fw_diag_buffer,
+ sc->fw_diag_map);
+ if (sc->fw_diag_dmat != NULL)
+ bus_dma_tag_destroy(sc->fw_diag_dmat);
+ }
+
+ return (status);
+}
+
+static int
+mps_diag_query(struct mps_softc *sc, mps_fw_diag_query_t *diag_query,
+ uint32_t *return_code)
+{
+ mps_fw_diagnostic_buffer_t *pBuffer;
+ uint8_t i;
+ uint32_t unique_id;
+
+ unique_id = diag_query->UniqueId;
+
+ /*
+ * If ID is valid, query on ID.
+ * If ID is invalid, query on buffer type.
+ */
+ if (unique_id == MPS_FW_DIAG_INVALID_UID) {
+ i = diag_query->BufferType;
+ if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
+ *return_code = MPS_FW_DIAG_ERROR_INVALID_UID;
+ return (MPS_DIAG_FAILURE);
+ }
+ } else {
+ i = mps_get_fw_diag_buffer_number(sc, unique_id);
+ if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
+ *return_code = MPS_FW_DIAG_ERROR_INVALID_UID;
+ return (MPS_DIAG_FAILURE);
+ }
+ }
+
+ /*
+ * Fill query structure with the diag buffer info.
+ */
+ pBuffer = &sc->fw_diag_buffer_list[i];
+ diag_query->BufferType = pBuffer->buffer_type;
+ diag_query->ExtendedType = pBuffer->extended_type;
+ if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
+ for (i = 0; i < (sizeof(diag_query->ProductSpecific) / 4);
+ i++) {
+ diag_query->ProductSpecific[i] =
+ pBuffer->product_specific[i];
+ }
+ }
+ diag_query->TotalBufferSize = pBuffer->size;
+ diag_query->DriverAddedBufferSize = 0;
+ diag_query->UniqueId = pBuffer->unique_id;
+ diag_query->ApplicationFlags = 0;
+ diag_query->DiagnosticFlags = 0;
+
+ /*
+ * Set/Clear application flags
+ */
+ if (pBuffer->immediate) {
+ diag_query->ApplicationFlags &= ~MPS_FW_DIAG_FLAG_APP_OWNED;
+ } else {
+ diag_query->ApplicationFlags |= MPS_FW_DIAG_FLAG_APP_OWNED;
+ }
+ if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
+ diag_query->ApplicationFlags |= MPS_FW_DIAG_FLAG_BUFFER_VALID;
+ } else {
+ diag_query->ApplicationFlags &= ~MPS_FW_DIAG_FLAG_BUFFER_VALID;
+ }
+ if (pBuffer->owned_by_firmware) {
+ diag_query->ApplicationFlags |=
+ MPS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
+ } else {
+ diag_query->ApplicationFlags &=
+ ~MPS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
+ }
+
+ return (MPS_DIAG_SUCCESS);
+}
+
+static int
+mps_diag_read_buffer(struct mps_softc *sc,
+ mps_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
+ uint32_t *return_code)
+{
+ mps_fw_diagnostic_buffer_t *pBuffer;
+ uint8_t i, *pData;
+ uint32_t unique_id;
+ int status;
+
+ unique_id = diag_read_buffer->UniqueId;
+
+ /*
+ * Get the current buffer and look up the unique ID. The unique ID
+ * should be there.
+ */
+ i = mps_get_fw_diag_buffer_number(sc, unique_id);
+ if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
+ *return_code = MPS_FW_DIAG_ERROR_INVALID_UID;
+ return (MPS_DIAG_FAILURE);
+ }
+
+ pBuffer = &sc->fw_diag_buffer_list[i];
+
+ /*
+ * Make sure requested read is within limits
+ */
+ if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
+ pBuffer->size) {
+ *return_code = MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
+ return (MPS_DIAG_FAILURE);
+ }
+
+ /*
+ * Copy the requested data from DMA to the diag_read_buffer. The DMA
+ * buffer that was allocated is one contiguous buffer.
+ */
+ pData = (uint8_t *)(sc->fw_diag_buffer +
+ diag_read_buffer->StartingOffset);
+ if (copyout(pData, ioctl_buf, diag_read_buffer->BytesToRead) != 0)
+ return (MPS_DIAG_FAILURE);
+ diag_read_buffer->Status = 0;
+
+ /*
+ * Set or clear the Force Release flag.
+ */
+ if (pBuffer->force_release) {
+ diag_read_buffer->Flags |= MPS_FW_DIAG_FLAG_FORCE_RELEASE;
+ } else {
+ diag_read_buffer->Flags &= ~MPS_FW_DIAG_FLAG_FORCE_RELEASE;
+ }
+
+ /*
+ * If buffer is to be reregistered, make sure it's not already owned by
+ * firmware first.
+ */
+ status = MPS_DIAG_SUCCESS;
+ if (!pBuffer->owned_by_firmware) {
+ if (diag_read_buffer->Flags & MPS_FW_DIAG_FLAG_REREGISTER) {
+ status = mps_post_fw_diag_buffer(sc, pBuffer,
+ return_code);
+ }
+ }
+
+ return (status);
+}
+
+static int
+mps_diag_release(struct mps_softc *sc, mps_fw_diag_release_t *diag_release,
+ uint32_t *return_code)
+{
+ mps_fw_diagnostic_buffer_t *pBuffer;
+ uint8_t i;
+ uint32_t unique_id;
+ int status;
+
+ unique_id = diag_release->UniqueId;
+
+ /*
+ * Get the current buffer and look up the unique ID. The unique ID
+ * should be there.
+ */
+ i = mps_get_fw_diag_buffer_number(sc, unique_id);
+ if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
+ *return_code = MPS_FW_DIAG_ERROR_INVALID_UID;
+ return (MPS_DIAG_FAILURE);
+ }
+
+ pBuffer = &sc->fw_diag_buffer_list[i];
+
+ /*
+ * If buffer is not owned by firmware, it's already been released.
+ */
+ if (!pBuffer->owned_by_firmware) {
+ *return_code = MPS_FW_DIAG_ERROR_ALREADY_RELEASED;
+ return (MPS_DIAG_FAILURE);
+ }
+
+ /*
+ * Release the buffer.
+ */
+ status = mps_release_fw_diag_buffer(sc, pBuffer, return_code,
+ MPS_FW_DIAG_TYPE_RELEASE);
+ return (status);
+}
+
+static int
+mps_do_diag_action(struct mps_softc *sc, uint32_t action, uint8_t *diag_action,
+ uint32_t length, uint32_t *return_code)
+{
+ mps_fw_diag_register_t diag_register;
+ mps_fw_diag_unregister_t diag_unregister;
+ mps_fw_diag_query_t diag_query;
+ mps_diag_read_buffer_t diag_read_buffer;
+ mps_fw_diag_release_t diag_release;
+ int status = MPS_DIAG_SUCCESS;
+ uint32_t original_return_code;
+
+ original_return_code = *return_code;
+ *return_code = MPS_FW_DIAG_ERROR_SUCCESS;
+
+ switch (action) {
+ case MPS_FW_DIAG_TYPE_REGISTER:
+ if (!length) {
+ *return_code =
+ MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
+ status = MPS_DIAG_FAILURE;
+ break;
+ }
+ if (copyin(diag_action, &diag_register,
+ sizeof(diag_register)) != 0)
+ return (MPS_DIAG_FAILURE);
+ status = mps_diag_register(sc, &diag_register,
+ return_code);
+ break;
+
+ case MPS_FW_DIAG_TYPE_UNREGISTER:
+ if (length < sizeof(diag_unregister)) {
+ *return_code =
+ MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
+ status = MPS_DIAG_FAILURE;
+ break;
+ }
+ if (copyin(diag_action, &diag_unregister,
+ sizeof(diag_unregister)) != 0)
+ return (MPS_DIAG_FAILURE);
+ status = mps_diag_unregister(sc, &diag_unregister,
+ return_code);
+ break;
+
+ case MPS_FW_DIAG_TYPE_QUERY:
+ if (length < sizeof (diag_query)) {
+ *return_code =
+ MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
+ status = MPS_DIAG_FAILURE;
+ break;
+ }
+ if (copyin(diag_action, &diag_query, sizeof(diag_query))
+ != 0)
+ return (MPS_DIAG_FAILURE);
+ status = mps_diag_query(sc, &diag_query, return_code);
+ if (status == MPS_DIAG_SUCCESS)
+ if (copyout(&diag_query, diag_action,
+ sizeof (diag_query)) != 0)
+ return (MPS_DIAG_FAILURE);
+ break;
+
+ case MPS_FW_DIAG_TYPE_READ_BUFFER:
+ if (copyin(diag_action, &diag_read_buffer,
+ sizeof(diag_read_buffer)) != 0)
+ return (MPS_DIAG_FAILURE);
+ if (length < diag_read_buffer.BytesToRead) {
+ *return_code =
+ MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
+ status = MPS_DIAG_FAILURE;
+ break;
+ }
+ status = mps_diag_read_buffer(sc, &diag_read_buffer,
+ PTRIN(diag_read_buffer.PtrDataBuffer),
+ return_code);
+ if (status == MPS_DIAG_SUCCESS) {
+ if (copyout(&diag_read_buffer, diag_action,
+ sizeof(diag_read_buffer) -
+ sizeof(diag_read_buffer.PtrDataBuffer)) !=
+ 0)
+ return (MPS_DIAG_FAILURE);
+ }
+ break;
+
+ case MPS_FW_DIAG_TYPE_RELEASE:
+ if (length < sizeof(diag_release)) {
+ *return_code =
+ MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
+ status = MPS_DIAG_FAILURE;
+ break;
+ }
+ if (copyin(diag_action, &diag_release,
+ sizeof(diag_release)) != 0)
+ return (MPS_DIAG_FAILURE);
+ status = mps_diag_release(sc, &diag_release,
+ return_code);
+ break;
+
+ default:
+ *return_code = MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
+ status = MPS_DIAG_FAILURE;
+ break;
+ }
+
+ if ((status == MPS_DIAG_FAILURE) &&
+ (original_return_code == MPS_FW_DIAG_NEW) &&
+ (*return_code != MPS_FW_DIAG_ERROR_SUCCESS))
+ status = MPS_DIAG_SUCCESS;
+
+ return (status);
+}
+
+static int
+mps_user_diag_action(struct mps_softc *sc, mps_diag_action_t *data)
+{
+ int status;
+
+ /*
+ * Only allow one diag action at one time.
+ */
+ if (sc->mps_flags & MPS_FLAGS_BUSY) {
+ mps_dprint(sc, MPS_INFO, "%s: Only one FW diag command "
+ "allowed at a single time.", __func__);
+ return (EBUSY);
+ }
+ sc->mps_flags |= MPS_FLAGS_BUSY;
+
+ /*
+ * Send diag action request
+ */
+ if (data->Action == MPS_FW_DIAG_TYPE_REGISTER ||
+ data->Action == MPS_FW_DIAG_TYPE_UNREGISTER ||
+ data->Action == MPS_FW_DIAG_TYPE_QUERY ||
+ data->Action == MPS_FW_DIAG_TYPE_READ_BUFFER ||
+ data->Action == MPS_FW_DIAG_TYPE_RELEASE) {
+ status = mps_do_diag_action(sc, data->Action,
+ PTRIN(data->PtrDiagAction), data->Length,
+ &data->ReturnCode);
+ } else
+ status = EINVAL;
+
+ sc->mps_flags &= ~MPS_FLAGS_BUSY;
+ return (status);
+}
+
+/*
+ * Copy the event recording mask and the event queue size out. For
+ * clarification, the event recording mask (events_to_record) is not the same
+ * thing as the event mask (event_mask). events_to_record has a bit set for
+ * every event type that is to be recorded by the driver, and event_mask has a
+ * bit cleared for every event that is allowed into the driver from the IOC.
+ * They really have nothing to do with each other.
+ */
+static void
+mps_user_event_query(struct mps_softc *sc, mps_event_query_t *data)
+{
+ uint8_t i;
+
+ mps_lock(sc);
+ data->Entries = MPS_EVENT_QUEUE_SIZE;
+
+ for (i = 0; i < 4; i++) {
+ data->Types[i] = sc->events_to_record[i];
+ }
+ mps_unlock(sc);
+}
+
+/*
+ * Set the driver's event mask according to what's been given. See
+ * mps_user_event_query for explanation of the event recording mask and the IOC
+ * event mask. It's the app's responsibility to enable event logging by setting
+ * the bits in events_to_record. Initially, no events will be logged.
+ */
+static void
+mps_user_event_enable(struct mps_softc *sc, mps_event_enable_t *data)
+{
+ uint8_t i;
+
+ mps_lock(sc);
+ for (i = 0; i < 4; i++) {
+ sc->events_to_record[i] = data->Types[i];
+ }
+ mps_unlock(sc);
+}
+
+/*
+ * Copy out the events that have been recorded, up to the max events allowed.
+ */
+static int
+mps_user_event_report(struct mps_softc *sc, mps_event_report_t *data)
+{
+ int status = 0;
+ uint32_t size;
+
+ mps_lock(sc);
+ size = data->Size;
+ if ((size >= sizeof(sc->recorded_events)) && (status == 0)) {
+ mps_unlock(sc);
+ if (copyout((void *)sc->recorded_events,
+ PTRIN(data->PtrEvents), size) != 0)
+ status = EFAULT;
+ mps_lock(sc);
+ } else {
+ /*
+ * data->Size value is not large enough to copy event data.
+ */
+ status = EFAULT;
+ }
+
+ /*
+ * Change size value to match the number of bytes that were copied.
+ */
+ if (status == 0)
+ data->Size = sizeof(sc->recorded_events);
+ mps_unlock(sc);
+
+ return (status);
+}
+
+/*
+ * Record events into the driver from the IOC if they are not masked.
+ */
+void
+mpssas_record_event(struct mps_softc *sc,
+ MPI2_EVENT_NOTIFICATION_REPLY *event_reply)
+{
+ uint32_t event;
+ int i, j;
+ uint16_t event_data_len;
+ boolean_t sendAEN = FALSE;
+
+ event = event_reply->Event;
+
+ /*
+ * Generate a system event to let anyone who cares know that a
+ * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
+ * event mask is set to.
+ */
+ if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
+ sendAEN = TRUE;
+ }
+
+ /*
+ * Record the event only if its corresponding bit is set in
+ * events_to_record. event_index is the index into recorded_events and
+ * event_number is the overall number of an event being recorded since
+ * start-of-day. event_index will roll over; event_number will never
+ * roll over.
+ */
+ i = (uint8_t)(event / 32);
+ j = (uint8_t)(event % 32);
+ if ((i < 4) && ((1 << j) & sc->events_to_record[i])) {
+ i = sc->event_index;
+ sc->recorded_events[i].Type = event;
+ sc->recorded_events[i].Number = ++sc->event_number;
+ bzero(sc->recorded_events[i].Data, MPS_MAX_EVENT_DATA_LENGTH *
+ 4);
+ event_data_len = event_reply->EventDataLength;
+
+ if (event_data_len > 0) {
+ /*
+ * Limit data to size in m_event entry
+ */
+ if (event_data_len > MPS_MAX_EVENT_DATA_LENGTH) {
+ event_data_len = MPS_MAX_EVENT_DATA_LENGTH;
+ }
+ for (j = 0; j < event_data_len; j++) {
+ sc->recorded_events[i].Data[j] =
+ event_reply->EventData[j];
+ }
+
+ /*
+ * check for index wrap-around
+ */
+ if (++i == MPS_EVENT_QUEUE_SIZE) {
+ i = 0;
+ }
+ sc->event_index = (uint8_t)i;
+
+ /*
+ * Set flag to send the event.
+ */
+ sendAEN = TRUE;
+ }
+ }
+
+ /*
+ * Generate a system event if flag is set to let anyone who cares know
+ * that an event has occurred.
+ */
+ if (sendAEN) {
+//SLM-how to send a system event (see kqueue, kevent)
+// (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
+// "SAS", NULL, NULL, DDI_NOSLEEP);
+ }
+}
+
+static int
+mps_user_reg_access(struct mps_softc *sc, mps_reg_access_t *data)
+{
+ int status = 0;
+
+ switch (data->Command) {
+ /*
+ * IO access is not supported.
+ */
+ case REG_IO_READ:
+ case REG_IO_WRITE:
+ mps_dprint(sc, MPS_INFO, "IO access is not supported. "
+ "Use memory access.");
+ status = EINVAL;
+ break;
+
+ case REG_MEM_READ:
+ data->RegData = mps_regread(sc, data->RegOffset);
+ break;
+
+ case REG_MEM_WRITE:
+ mps_regwrite(sc, data->RegOffset, data->RegData);
+ break;
+
+ default:
+ status = EINVAL;
+ break;
+ }
+
+ return (status);
+}
+
+static int
+mps_user_btdh(struct mps_softc *sc, mps_btdh_mapping_t *data)
+{
+ uint8_t bt2dh = FALSE;
+ uint8_t dh2bt = FALSE;
+ uint16_t dev_handle, bus, target;
+
+ bus = data->Bus;
+ target = data->TargetID;
+ dev_handle = data->DevHandle;
+
+ /*
+ * When DevHandle is 0xFFFF and Bus/Target are not 0xFFFF, use Bus/
+ * Target to get DevHandle. When Bus/Target are 0xFFFF and DevHandle is
+ * not 0xFFFF, use DevHandle to get Bus/Target. Anything else is
+ * invalid.
+ */
+ if ((bus == 0xFFFF) && (target == 0xFFFF) && (dev_handle != 0xFFFF))
+ dh2bt = TRUE;
+ if ((dev_handle == 0xFFFF) && (bus != 0xFFFF) && (target != 0xFFFF))
+ bt2dh = TRUE;
+ if (!dh2bt && !bt2dh)
+ return (EINVAL);
+
+ /*
+ * Only handle bus of 0. Make sure target is within range.
+ */
+ if (bt2dh) {
+ if (bus != 0)
+ return (EINVAL);
+
+ if (target > sc->max_devices) {
+ mps_dprint(sc, MPS_FAULT, "Target ID is out of range "
+ "for Bus/Target to DevHandle mapping.");
+ return (EINVAL);
+ }
+ dev_handle = sc->mapping_table[target].dev_handle;
+ if (dev_handle)
+ data->DevHandle = dev_handle;
+ } else {
+ bus = 0;
+ target = mps_mapping_get_sas_id_from_handle(sc, dev_handle);
+ data->Bus = bus;
+ data->TargetID = target;
+ }
+
+ return (0);
+}
static int
mps_ioctl(struct cdev *dev, u_long cmd, void *arg, int flag,
@@ -674,7 +2051,7 @@ mps_ioctl(struct cdev *dev, u_long cmd, void *arg, int flag,
struct mps_cfg_page_req *page_req;
struct mps_ext_cfg_page_req *ext_page_req;
void *mps_page;
- int error;
+ int error, reset_loop;
mps_page = NULL;
sc = dev->si_drv1;
@@ -730,6 +2107,98 @@ mps_ioctl(struct cdev *dev, u_long cmd, void *arg, int flag,
case MPSIO_MPS_COMMAND:
error = mps_user_command(sc, (struct mps_usr_command *)arg);
break;
+ case MPTIOCTL_PASS_THRU:
+ /*
+ * The user has requested to pass through a command to be
+ * executed by the MPT firmware. Call our routine which does
+ * this. Only allow one passthru IOCTL at one time.
+ */
+ error = mps_user_pass_thru(sc, (mps_pass_thru_t *)arg);
+ break;
+ case MPTIOCTL_GET_ADAPTER_DATA:
+ /*
+ * The user has requested to read adapter data. Call our
+ * routine which does this.
+ */
+ error = 0;
+ mps_user_get_adapter_data(sc, (mps_adapter_data_t *)arg);
+ break;
+ case MPTIOCTL_GET_PCI_INFO:
+ /*
+ * The user has requested to read pci info. Call
+ * our routine which does this.
+ */
+ mps_lock(sc);
+ error = 0;
+ mps_user_read_pci_info(sc, (mps_pci_info_t *)arg);
+ mps_unlock(sc);
+ break;
+ case MPTIOCTL_RESET_ADAPTER:
+ mps_lock(sc);
+ sc->port_enable_complete = 0;
+ error = mps_reinit(sc);
+ mps_unlock(sc);
+ /*
+ * Wait no more than 5 minutes for Port Enable to complete
+ */
+ for (reset_loop = 0; (reset_loop < MPS_DIAG_RESET_TIMEOUT) &&
+ (!sc->port_enable_complete); reset_loop++) {
+ DELAY(1000);
+ }
+ if (reset_loop == MPS_DIAG_RESET_TIMEOUT) {
+ printf("Port Enable did not complete after Diag "
+ "Reset.\n");
+ }
+ break;
+ case MPTIOCTL_DIAG_ACTION:
+ /*
+ * The user has done a diag buffer action. Call our routine
+ * which does this. Only allow one diag action at one time.
+ */
+ mps_lock(sc);
+ error = mps_user_diag_action(sc, (mps_diag_action_t *)arg);
+ mps_unlock(sc);
+ break;
+ case MPTIOCTL_EVENT_QUERY:
+ /*
+ * The user has done an event query. Call our routine which does
+ * this.
+ */
+ error = 0;
+ mps_user_event_query(sc, (mps_event_query_t *)arg);
+ break;
+ case MPTIOCTL_EVENT_ENABLE:
+ /*
+ * The user has done an event enable. Call our routine which
+ * does this.
+ */
+ error = 0;
+ mps_user_event_enable(sc, (mps_event_enable_t *)arg);
+ break;
+ case MPTIOCTL_EVENT_REPORT:
+ /*
+ * The user has done an event report. Call our routine which
+ * does this.
+ */
+ error = mps_user_event_report(sc, (mps_event_report_t *)arg);
+ break;
+ case MPTIOCTL_REG_ACCESS:
+ /*
+ * The user has requested register access. Call our routine
+ * which does this.
+ */
+ mps_lock(sc);
+ error = mps_user_reg_access(sc, (mps_reg_access_t *)arg);
+ mps_unlock(sc);
+ break;
+ case MPTIOCTL_BTDH_MAPPING:
+ /*
+ * The user has requested to translate a bus/target to a
+ * DevHandle or a DevHandle to a bus/target. Call our routine
+ * which does this.
+ */
+ error = mps_user_btdh(sc, (mps_btdh_mapping_t *)arg);
+ break;
default:
error = ENOIOCTL;
break;
@@ -743,16 +2212,6 @@ mps_ioctl(struct cdev *dev, u_long cmd, void *arg, int flag,
#ifdef COMPAT_FREEBSD32
-/* Macros from compat/freebsd32/freebsd32.h */
-#define PTRIN(v) (void *)(uintptr_t)(v)
-#define PTROUT(v) (uint32_t)(uintptr_t)(v)
-
-#define CP(src,dst,fld) do { (dst).fld = (src).fld; } while (0)
-#define PTRIN_CP(src,dst,fld) \
- do { (dst).fld = PTRIN((src).fld); } while (0)
-#define PTROUT_CP(src,dst,fld) \
- do { (dst).fld = PTROUT((src).fld); } while (0)
-
struct mps_cfg_page_req32 {
MPI2_CONFIG_PAGE_HEADER header;
uint32_t page_address;
diff --git a/sys/dev/mps/mpsvar.h b/sys/dev/mps/mpsvar.h
index 0de654e..c6f83df 100644
--- a/sys/dev/mps/mpsvar.h
+++ b/sys/dev/mps/mpsvar.h
@@ -22,13 +22,44 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
+ * $FreeBSD$
+ */
+/*-
+ * Copyright (c) 2011 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
* $FreeBSD$
*/
#ifndef _MPSVAR_H
#define _MPSVAR_H
+#define MPS_DRIVER_VERSION "11.255.03.00-fbsd"
+
#define MPS_DB_MAX_WAIT 2500
#define MPS_REQ_FRAMES 1024
@@ -41,12 +72,137 @@
#define MPS_SGE32_SIZE 8
#define MPS_SGC_SIZE 8
+#define CAN_SLEEP 1
+#define NO_SLEEP 0
+
#define MPS_PERIODIC_DELAY 1 /* 1 second heartbeat/watchdog check */
+#define MPS_SCSI_RI_INVALID_FRAME (0x00000002)
+
+/*
+ * host mapping related macro definitions
+ */
+#define MPS_MAPTABLE_BAD_IDX 0xFFFFFFFF
+#define MPS_DPM_BAD_IDX 0xFFFF
+#define MPS_ENCTABLE_BAD_IDX 0xFF
+#define MPS_MAX_MISSING_COUNT 0x0F
+#define MPS_DEV_RESERVED 0x20000000
+#define MPS_MAP_IN_USE 0x10000000
+#define MPS_RAID_CHANNEL 1
+#define MPS_MAP_BAD_ID 0xFFFFFFFF
+
+/*
+ * WarpDrive controller
+ */
+#define MPS_CHIP_WD_DEVICE_ID 0x007E
+#define MPS_WD_LSI_OEM 0x80
+#define MPS_WD_HIDE_EXPOSE_MASK 0x03
+#define MPS_WD_HIDE_ALWAYS 0x00
+#define MPS_WD_EXPOSE_ALWAYS 0x01
+#define MPS_WD_HIDE_IF_VOLUME 0x02
+#define MPS_WD_RETRY 0x01
+#define MPS_MAN_PAGE10_SIZE 0x5C /* Hardcode for now */
+#define MPS_MAX_DISKS_IN_VOL 10
+
+/*
+ * WarpDrive Event Logging
+ */
+#define MPI2_WD_LOG_ENTRY 0x8002
+#define MPI2_WD_SSD_THROTTLING 0x0041
+#define MPI2_WD_DRIVE_LIFE_WARN 0x0043
+#define MPI2_WD_DRIVE_LIFE_DEAD 0x0044
+#define MPI2_WD_RAIL_MON_FAIL 0x004D
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+/**
+ * struct dev_mapping_table - device mapping information
+ * @physical_id: SAS address for drives or WWID for RAID volumes
+ * @device_info: bitfield provides detailed info about the device
+ * @phy_bits: bitfields indicating controller phys
+ * @dpm_entry_num: index of this device in device persistent map table
+ * @dev_handle: device handle for the device pointed by this entry
+ * @channel: target channel
+ * @id: target id
+ * @missing_count: number of times the device not detected by driver
+ * @hide_flag: Hide this physical disk/not (foreign configuration)
+ * @init_complete: Whether the start of the day checks completed or not
+ */
+struct dev_mapping_table {
+ u64 physical_id;
+ u32 device_info;
+ u32 phy_bits;
+ u16 dpm_entry_num;
+ u16 dev_handle;
+ u8 reserved1;
+ u8 channel;
+ u16 id;
+ u8 missing_count;
+ u8 init_complete;
+ u8 TLR_bits;
+ u8 reserved2;
+};
+
+/**
+ * struct enc_mapping_table - mapping information about an enclosure
+ * @enclosure_id: Logical ID of this enclosure
+ * @start_index: index to the entry in dev_mapping_table
+ * @phy_bits: bitfields indicating controller phys
+ * @dpm_entry_num: index of this enclosure in device persistent map table
+ * @enc_handle: device handle for the enclosure pointed by this entry
+ * @num_slots: number of slots in the enclosure
+ * @start_slot: Starting slot id
+ * @missing_count: number of times the device not detected by driver
+ * @removal_flag: used to mark the device for removal
+ * @skip_search: used as a flag to include/exclude enclosure for search
+ * @init_complete: Whether the start of the day checks completed or not
+ */
+struct enc_mapping_table {
+ u64 enclosure_id;
+ u32 start_index;
+ u32 phy_bits;
+ u16 dpm_entry_num;
+ u16 enc_handle;
+ u16 num_slots;
+ u16 start_slot;
+ u8 missing_count;
+ u8 removal_flag;
+ u8 skip_search;
+ u8 init_complete;
+};
+
+/**
+ * struct map_removal_table - entries to be removed from mapping table
+ * @dpm_entry_num: index of this device in device persistent map table
+ * @dev_handle: device handle for the device pointed by this entry
+ */
+struct map_removal_table{
+ u16 dpm_entry_num;
+ u16 dev_handle;
+};
+
+typedef struct mps_fw_diagnostic_buffer {
+ size_t size;
+ uint8_t extended_type;
+ uint8_t buffer_type;
+ uint8_t force_release;
+ uint32_t product_specific[23];
+ uint8_t immediate;
+ uint8_t enabled;
+ uint8_t valid_data;
+ uint8_t owned_by_firmware;
+ uint32_t unique_id;
+} mps_fw_diagnostic_buffer_t;
+
struct mps_softc;
struct mps_command;
struct mpssas_softc;
+union ccb;
struct mpssas_target;
+struct mps_column_map;
MALLOC_DECLARE(M_MPT2);
@@ -63,13 +219,16 @@ struct mps_chain {
/*
* This needs to be at least 2 to support SMP passthrough.
*/
-#define MPS_IOVEC_COUNT 2
+#define MPS_IOVEC_COUNT 2
struct mps_command {
TAILQ_ENTRY(mps_command) cm_link;
+ TAILQ_ENTRY(mps_command) cm_recovery;
struct mps_softc *cm_sc;
+ union ccb *cm_ccb;
void *cm_data;
u_int cm_length;
+ u_int cm_out_len;
struct uio cm_uio;
struct iovec cm_iovec[MPS_IOVEC_COUNT];
u_int cm_max_segs;
@@ -82,6 +241,7 @@ struct mps_command {
void *cm_complete_data;
struct mpssas_target *cm_targ;
MPI2_REQUEST_DESCRIPTOR_UNION cm_desc;
+ u_int cm_lun;
u_int cm_flags;
#define MPS_CM_FLAGS_POLLED (1 << 0)
#define MPS_CM_FLAGS_COMPLETE (1 << 1)
@@ -89,7 +249,7 @@ struct mps_command {
#define MPS_CM_FLAGS_DATAOUT (1 << 3)
#define MPS_CM_FLAGS_DATAIN (1 << 4)
#define MPS_CM_FLAGS_WAKEUP (1 << 5)
-#define MPS_CM_FLAGS_ACTIVE (1 << 6)
+#define MPS_CM_FLAGS_DD_IO (1 << 6)
#define MPS_CM_FLAGS_USE_UIO (1 << 7)
#define MPS_CM_FLAGS_SMP_PASS (1 << 8)
#define MPS_CM_FLAGS_CHAIN_FAILED (1 << 9)
@@ -106,6 +266,11 @@ struct mps_command {
struct callout cm_callout;
};
+struct mps_column_map {
+ uint16_t dev_handle;
+ uint8_t phys_disk_num;
+};
+
struct mps_event_handle {
TAILQ_ENTRY(mps_event_handle) eh_list;
mps_evt_callback_t *callback;
@@ -121,17 +286,24 @@ struct mps_softc {
#define MPS_FLAGS_MSI (1 << 1)
#define MPS_FLAGS_BUSY (1 << 2)
#define MPS_FLAGS_SHUTDOWN (1 << 3)
-#define MPS_FLAGS_ATTACH_DONE (1 << 4)
+#define MPS_FLAGS_DIAGRESET (1 << 4)
+#define MPS_FLAGS_ATTACH_DONE (1 << 5)
+#define MPS_FLAGS_WD_AVAILABLE (1 << 6)
u_int mps_debug;
- u_int allow_multiple_tm_cmds;
+ u_int disable_msix;
+ u_int disable_msi;
int tm_cmds_active;
int io_cmds_active;
int io_cmds_highwater;
int chain_free;
+ int max_chains;
int chain_free_lowwater;
+#if __FreeBSD_version >= 900030
uint64_t chain_alloc_fail;
+#endif
struct sysctl_ctx_list sysctl_ctx;
struct sysctl_oid *sysctl_tree;
+ char fw_version[16];
struct mps_command *commands;
struct mps_chain *chains;
struct callout periodic;
@@ -139,9 +311,9 @@ struct mps_softc {
struct mpssas_softc *sassc;
TAILQ_HEAD(, mps_command) req_list;
+ TAILQ_HEAD(, mps_command) high_priority_req_list;
TAILQ_HEAD(, mps_chain) chain_list;
TAILQ_HEAD(, mps_command) tm_list;
- TAILQ_HEAD(, mps_command) io_list;
int replypostindex;
int replyfreeindex;
@@ -196,6 +368,73 @@ struct mps_softc {
bus_addr_t free_busaddr;
bus_dma_tag_t queues_dmat;
bus_dmamap_t queues_map;
+
+ uint8_t *fw_diag_buffer;
+ bus_addr_t fw_diag_busaddr;
+ bus_dma_tag_t fw_diag_dmat;
+ bus_dmamap_t fw_diag_map;
+
+ uint8_t ir_firmware;
+
+ /* static config pages */
+ Mpi2IOCPage8_t ioc_pg8;
+
+ /* host mapping support */
+ struct dev_mapping_table *mapping_table;
+ struct enc_mapping_table *enclosure_table;
+ struct map_removal_table *removal_table;
+ uint8_t *dpm_entry_used;
+ uint8_t *dpm_flush_entry;
+ Mpi2DriverMappingPage0_t *dpm_pg0;
+ uint16_t max_devices;
+ uint16_t max_enclosures;
+ uint16_t max_expanders;
+ uint8_t max_volumes;
+ uint8_t num_enc_table_entries;
+ uint8_t num_rsvd_entries;
+ uint8_t num_channels;
+ uint16_t max_dpm_entries;
+ uint8_t is_dpm_enable;
+ uint8_t track_mapping_events;
+ uint32_t pending_map_events;
+ uint8_t mt_full_retry;
+ uint8_t mt_add_device_failed;
+
+ /* FW diag Buffer List */
+ mps_fw_diagnostic_buffer_t
+ fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_COUNT];
+
+ /* Event Recording IOCTL support */
+ uint32_t events_to_record[4];
+ mps_event_entry_t recorded_events[MPS_EVENT_QUEUE_SIZE];
+ uint8_t event_index;
+ uint32_t event_number;
+
+ /* EEDP and TLR support */
+ uint8_t eedp_enabled;
+ uint8_t control_TLR;
+
+ /* Shutdown Event Handler */
+ eventhandler_tag shutdown_eh;
+
+ /* To track topo events during reset */
+#define MPS_DIAG_RESET_TIMEOUT 300000
+ uint8_t wait_for_port_enable;
+ uint8_t port_enable_complete;
+
+ /* WD controller */
+ uint8_t WD_valid_config;
+ uint8_t WD_hide_expose;
+
+ /* Direct Drive for WarpDrive */
+ uint8_t DD_num_phys_disks;
+ uint16_t DD_dev_handle;
+ uint32_t DD_stripe_size;
+ uint32_t DD_stripe_exponent;
+ uint32_t DD_block_size;
+ uint16_t DD_block_exponent;
+ uint64_t DD_max_lba;
+ struct mps_column_map DD_column_map[MPS_MAX_DISKS_IN_VOL];
};
struct mps_config_params {
@@ -210,6 +449,13 @@ struct mps_config_params {
void *cbdata;
};
+struct scsi_read_capacity_eedp
+{
+ uint8_t addr[8];
+ uint8_t length[4];
+ uint8_t protect;
+};
+
static __inline uint32_t
mps_regread(struct mps_softc *sc, uint32_t offset)
{
@@ -225,7 +471,6 @@ mps_regwrite(struct mps_softc *sc, uint32_t offset, uint32_t val)
static __inline void
mps_free_reply(struct mps_softc *sc, uint32_t busaddr)
{
-
if (++sc->replyfreeindex >= sc->fqdepth)
sc->replyfreeindex = 0;
sc->free_queue[sc->replyfreeindex] = busaddr;
@@ -242,8 +487,11 @@ mps_alloc_chain(struct mps_softc *sc)
sc->chain_free--;
if (sc->chain_free < sc->chain_free_lowwater)
sc->chain_free_lowwater = sc->chain_free;
- } else
+ }
+#if __FreeBSD_version >= 900030
+ else
sc->chain_alloc_fail++;
+#endif
return (chain);
}
@@ -262,15 +510,16 @@ mps_free_command(struct mps_softc *sc, struct mps_command *cm)
{
struct mps_chain *chain, *chain_temp;
- if (cm->cm_reply != NULL) {
+ if (cm->cm_reply != NULL)
mps_free_reply(sc, cm->cm_reply_data);
- cm->cm_reply = NULL;
- }
+ cm->cm_reply = NULL;
cm->cm_flags = 0;
cm->cm_complete = NULL;
cm->cm_complete_data = NULL;
- cm->cm_targ = 0;
+ cm->cm_ccb = NULL;
+ cm->cm_targ = NULL;
cm->cm_max_segs = 0;
+ cm->cm_lun = 0;
cm->cm_state = MPS_CM_STATE_FREE;
TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, chain_temp) {
TAILQ_REMOVE(&cm->cm_chain_list, chain, chain_link);
@@ -295,6 +544,43 @@ mps_alloc_command(struct mps_softc *sc)
}
static __inline void
+mps_free_high_priority_command(struct mps_softc *sc, struct mps_command *cm)
+{
+ struct mps_chain *chain, *chain_temp;
+
+ if (cm->cm_reply != NULL)
+ mps_free_reply(sc, cm->cm_reply_data);
+ cm->cm_reply = NULL;
+ cm->cm_flags = 0;
+ cm->cm_complete = NULL;
+ cm->cm_complete_data = NULL;
+ cm->cm_ccb = NULL;
+ cm->cm_targ = NULL;
+ cm->cm_lun = 0;
+ cm->cm_state = MPS_CM_STATE_FREE;
+ TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, chain_temp) {
+ TAILQ_REMOVE(&cm->cm_chain_list, chain, chain_link);
+ mps_free_chain(sc, chain);
+ }
+ TAILQ_INSERT_TAIL(&sc->high_priority_req_list, cm, cm_link);
+}
+
+static __inline struct mps_command *
+mps_alloc_high_priority_command(struct mps_softc *sc)
+{
+ struct mps_command *cm;
+
+ cm = TAILQ_FIRST(&sc->high_priority_req_list);
+ if (cm == NULL)
+ return (NULL);
+
+ TAILQ_REMOVE(&sc->high_priority_req_list, cm, cm_link);
+ KASSERT(cm->cm_state == MPS_CM_STATE_FREE, ("mps: Allocating busy command\n"));
+ cm->cm_state = MPS_CM_STATE_BUSY;
+ return (cm);
+}
+
+static __inline void
mps_lock(struct mps_softc *sc)
{
mtx_lock(&sc->mps_mtx);
@@ -315,6 +601,12 @@ mps_unlock(struct mps_softc *sc)
#define mps_printf(sc, args...) \
device_printf((sc)->mps_dev, ##args)
+#define mps_vprintf(sc, args...) \
+do { \
+ if (bootverbose) \
+ mps_printf(sc, ##args); \
+} while (0)
+
#define mps_dprint(sc, level, msg, args...) \
do { \
if (sc->mps_debug & level) \
@@ -375,7 +667,9 @@ mps_unmask_intr(struct mps_softc *sc)
mps_regwrite(sc, MPI2_HOST_INTERRUPT_MASK_OFFSET, mask);
}
-int mps_pci_setup_interrupts(struct mps_softc *);
+int mps_pci_setup_interrupts(struct mps_softc *sc);
+int mps_pci_restore(struct mps_softc *sc);
+
int mps_attach(struct mps_softc *sc);
int mps_free(struct mps_softc *sc);
void mps_intr(void *);
@@ -383,23 +677,97 @@ void mps_intr_msi(void *);
void mps_intr_locked(void *);
int mps_register_events(struct mps_softc *, uint8_t *, mps_evt_callback_t *,
void *, struct mps_event_handle **);
+int mps_restart(struct mps_softc *);
int mps_update_events(struct mps_softc *, struct mps_event_handle *, uint8_t *);
int mps_deregister_events(struct mps_softc *, struct mps_event_handle *);
-int mps_request_polled(struct mps_softc *sc, struct mps_command *cm);
-void mps_enqueue_request(struct mps_softc *, struct mps_command *);
int mps_push_sge(struct mps_command *, void *, size_t, int);
int mps_add_dmaseg(struct mps_command *, vm_paddr_t, size_t, u_int, int);
int mps_attach_sas(struct mps_softc *sc);
int mps_detach_sas(struct mps_softc *sc);
-int mps_map_command(struct mps_softc *sc, struct mps_command *cm);
int mps_read_config_page(struct mps_softc *, struct mps_config_params *);
int mps_write_config_page(struct mps_softc *, struct mps_config_params *);
void mps_memaddr_cb(void *, bus_dma_segment_t *, int , int );
void mpi_init_sge(struct mps_command *cm, void *req, void *sge);
int mps_attach_user(struct mps_softc *);
void mps_detach_user(struct mps_softc *);
+void mpssas_record_event(struct mps_softc *sc,
+ MPI2_EVENT_NOTIFICATION_REPLY *event_reply);
+
+int mps_map_command(struct mps_softc *sc, struct mps_command *cm);
+int mps_wait_command(struct mps_softc *sc, struct mps_command *cm, int timeout);
+int mps_request_polled(struct mps_softc *sc, struct mps_command *cm);
+
+int mps_config_get_bios_pg3(struct mps_softc *sc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage3_t *config_page);
+int mps_config_get_raid_volume_pg0(struct mps_softc *sc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 page_address);
+int mps_config_get_ioc_pg8(struct mps_softc *sc, Mpi2ConfigReply_t *,
+ Mpi2IOCPage8_t *);
+int mps_config_get_man_pg10(struct mps_softc *sc, Mpi2ConfigReply_t *mpi_reply);
+int mps_config_get_sas_device_pg0(struct mps_softc *, Mpi2ConfigReply_t *,
+ Mpi2SasDevicePage0_t *, u32 , u16 );
+int mps_config_get_dpm_pg0(struct mps_softc *, Mpi2ConfigReply_t *,
+ Mpi2DriverMappingPage0_t *, u16 );
+int mps_config_get_raid_volume_pg1(struct mps_softc *sc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form,
+ u16 handle);
+int mps_config_get_volume_wwid(struct mps_softc *sc, u16 volume_handle,
+ u64 *wwid);
+int mps_config_get_raid_pd_pg0(struct mps_softc *sc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page,
+ u32 page_address);
+void mpssas_ir_shutdown(struct mps_softc *sc);
+
+int mps_reinit(struct mps_softc *sc);
+void mpssas_handle_reinit(struct mps_softc *sc);
+
+void mps_base_static_config_pages(struct mps_softc *sc);
+void mps_wd_config_pages(struct mps_softc *sc);
+
+int mps_mapping_initialize(struct mps_softc *);
+void mps_mapping_topology_change_event(struct mps_softc *,
+ Mpi2EventDataSasTopologyChangeList_t *);
+int mps_mapping_is_reinit_required(struct mps_softc *);
+void mps_mapping_free_memory(struct mps_softc *sc);
+int mps_config_set_dpm_pg0(struct mps_softc *, Mpi2ConfigReply_t *,
+ Mpi2DriverMappingPage0_t *, u16 );
+void mps_mapping_exit(struct mps_softc *);
+void mps_mapping_check_devices(struct mps_softc *, int);
+int mps_mapping_allocate_memory(struct mps_softc *sc);
+unsigned int mps_mapping_get_sas_id(struct mps_softc *, uint64_t , u16);
+unsigned int mps_mapping_get_sas_id_from_handle(struct mps_softc *sc,
+ u16 handle);
+unsigned int mps_mapping_get_raid_id(struct mps_softc *sc, u64 wwid,
+ u16 handle);
+unsigned int mps_mapping_get_raid_id_from_handle(struct mps_softc *sc,
+ u16 volHandle);
+void mps_mapping_enclosure_dev_status_change_event(struct mps_softc *,
+ Mpi2EventDataSasEnclDevStatusChange_t *event_data);
+void mps_mapping_ir_config_change_event(struct mps_softc *sc,
+ Mpi2EventDataIrConfigChangeList_t *event_data);
+
+void mpssas_evt_handler(struct mps_softc *sc, uintptr_t data,
+ MPI2_EVENT_NOTIFICATION_REPLY *event);
+void mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle);
+int mpssas_startup(struct mps_softc *sc);
SYSCTL_DECL(_hw_mps);
+/* Compatibility shims for different OS versions */
+#if __FreeBSD_version >= 800001
+#define mps_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
+ kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
+#define mps_kproc_exit(arg) kproc_exit(arg)
+#else
+#define mps_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
+ kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
+#define mps_kproc_exit(arg) kthread_exit(arg)
+#endif
+
+#if defined(CAM_PRIORITY_XPT)
+#define MPS_PRIORITY_XPT CAM_PRIORITY_XPT
+#else
+#define MPS_PRIORITY_XPT 5
+#endif
#endif
diff --git a/sys/dev/netmap/ixgbe_netmap.h b/sys/dev/netmap/ixgbe_netmap.h
index f456044..8e5fe1b 100644
--- a/sys/dev/netmap/ixgbe_netmap.h
+++ b/sys/dev/netmap/ixgbe_netmap.h
@@ -191,6 +191,10 @@ fail:
* (this is also true for every use of ring in the kernel).
*
* ring->avail is never used, only checked for bogus values.
+ *
+ * do_lock is set iff the function is called from the ioctl handler.
+ * In this case, grab a lock around the body, and also reclaim transmitted
+ * buffers irrespective of interrupt mitigation.
*/
static int
ixgbe_netmap_txsync(void *a, u_int ring_nr, int do_lock)
@@ -292,10 +296,11 @@ ring_reset:
* need this.
*/
curr->read.buffer_addr = htole64(paddr);
- curr->read.olinfo_status = 0;
+ curr->read.olinfo_status = htole32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
curr->read.cmd_type_len =
htole32(txr->txd_cmd | len |
(IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_DEXT |
IXGBE_ADVTXD_DCMD_IFCS |
IXGBE_TXD_CMD_EOP | flags) );
/* If the buffer has changed, unload and reload map
@@ -328,15 +333,41 @@ ring_reset:
}
/*
- * If no packets are sent, or there is no room in the tx ring,
- * Check whether there are completed transmissions.
- * Because this is expensive (we need a register etc.)
- * we only do it if absolutely necessary, i.e. there is no room
- * in the tx ring, or where were no completed transmissions
- * (meaning that probably the caller really wanted to check
- * for completed transmissions).
+ * Reclaim buffers for completed transmissions.
+ * Because this is expensive (we read a NIC register etc.)
+ * we only do it in specific cases (see below).
+ * In all cases kring->nr_kflags indicates which slot will be
+ * checked upon a tx interrupt (nkr_num_slots means none).
*/
- if (n == 0 || kring->nr_hwavail < 1) {
+ if (do_lock) {
+ j = 1; /* forced reclaim, ignore interrupts */
+ kring->nr_kflags = kring->nkr_num_slots;
+ } else if (kring->nr_hwavail > 0) {
+ j = 0; /* buffers still available: no reclaim, ignore intr. */
+ kring->nr_kflags = kring->nkr_num_slots;
+ } else {
+ /*
+ * no buffers available, locate a slot for which we request
+ * ReportStatus (approximately half ring after next_to_clean)
+ * and record it in kring->nr_kflags.
+ * If the slot has DD set, do the reclaim looking at TDH,
+ * otherwise we go to sleep (in netmap_poll()) and will be
+ * woken up when slot nr_kflags will be ready.
+ */
+ struct ixgbe_legacy_tx_desc *txd = (struct ixgbe_legacy_tx_desc *)txr->tx_base;
+
+ j = txr->next_to_clean + kring->nkr_num_slots/2;
+ if (j >= kring->nkr_num_slots)
+ j -= kring->nkr_num_slots;
+ // round to the closest with dd set
+ j= (j < kring->nkr_num_slots / 4 || j >= kring->nkr_num_slots*3/4) ?
+ 0 : report_frequency;
+ kring->nr_kflags = j; /* the slot to check */
+ j = txd[j].upper.fields.status & IXGBE_TXD_STAT_DD;
+ }
+ if (!j) {
+ netmap_skip_txsync++;
+ } else {
int delta;
/*
@@ -391,6 +422,8 @@ ring_reset:
* We must subtract the newly consumed slots (cur - nr_hwcur)
* from nr_hwavail, make the descriptors available for the next reads,
* and set kring->nr_hwcur = ring->cur and ring->avail = kring->nr_hwavail.
+ *
+ * do_lock has a special meaning: please refer to txsync.
*/
static int
ixgbe_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
@@ -401,6 +434,7 @@ ixgbe_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
struct netmap_kring *kring = &na->rx_rings[ring_nr];
struct netmap_ring *ring = kring->ring;
int j, k, l, n, lim = kring->nkr_num_slots - 1;
+ int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
k = ring->cur; /* cache and check value, same as in txsync */
n = k - kring->nr_hwcur;
@@ -437,6 +471,7 @@ ixgbe_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
if (j > lim)
j -= lim + 1;
+ if (force_update) {
for (n = 0; ; n++) {
union ixgbe_adv_rx_desc *curr = &rxr->rx_base[l];
uint32_t staterr = le32toh(curr->wb.upper.status_error);
@@ -453,6 +488,8 @@ ixgbe_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
rxr->next_to_check = l;
kring->nr_hwavail += n;
}
+ kring->nr_kflags &= ~NKR_PENDINTR;
+ }
/*
* Skip past packets that userspace has already processed
diff --git a/sys/dev/netmap/netmap.c b/sys/dev/netmap/netmap.c
index e5695b3..1f282ce 100644
--- a/sys/dev/netmap/netmap.c
+++ b/sys/dev/netmap/netmap.c
@@ -146,6 +146,12 @@ SYSCTL_INT(_dev_netmap, OID_AUTO, total_buffers,
CTLFLAG_RD, &nm_buf_pool.total_buffers, 0, "total_buffers");
SYSCTL_INT(_dev_netmap, OID_AUTO, free_buffers,
CTLFLAG_RD, &nm_buf_pool.free, 0, "free_buffers");
+int netmap_mitigate = 1;
+SYSCTL_INT(_dev_netmap, OID_AUTO, mitigate, CTLFLAG_RW, &netmap_mitigate, 0, "");
+int netmap_skip_txsync;
+SYSCTL_INT(_dev_netmap, OID_AUTO, skip_txsync, CTLFLAG_RW, &netmap_skip_txsync, 0, "");
+int netmap_skip_rxsync;
+SYSCTL_INT(_dev_netmap, OID_AUTO, skip_rxsync, CTLFLAG_RW, &netmap_skip_rxsync, 0, "");
/*
* Allocate n buffers from the ring, and fill the slot.
diff --git a/sys/dev/netmap/netmap_kern.h b/sys/dev/netmap/netmap_kern.h
index ff4b151..08f11fe 100644
--- a/sys/dev/netmap/netmap_kern.h
+++ b/sys/dev/netmap/netmap_kern.h
@@ -65,13 +65,14 @@ struct netmap_kring {
struct netmap_ring *ring;
u_int nr_hwcur;
int nr_hwavail;
- u_int nr_kflags;
+ u_int nr_kflags; /* private driver flags */
+#define NKR_PENDINTR 0x1 // Pending interrupt.
u_int nkr_num_slots;
int nkr_hwofs; /* offset between NIC and netmap ring */
struct netmap_adapter *na; // debugging
struct selinfo si; /* poll/select wait queue */
-};
+} __attribute__((__aligned__(64)));
/*
* This struct is part of and extends the 'struct adapter' (or
@@ -171,6 +172,8 @@ struct netmap_slot *netmap_reset(struct netmap_adapter *na,
enum txrx tx, int n, u_int new_cur);
int netmap_ring_reinit(struct netmap_kring *);
+extern int netmap_mitigate;
+extern int netmap_skip_txsync, netmap_skip_rxsync;
extern u_int netmap_total_buffers;
extern char *netmap_buffer_base;
extern int netmap_verbose; // XXX debugging
diff --git a/sys/dev/ofw/openfirm.c b/sys/dev/ofw/openfirm.c
index af6ee80..4be2d2b 100644
--- a/sys/dev/ofw/openfirm.c
+++ b/sys/dev/ofw/openfirm.c
@@ -72,6 +72,8 @@ __FBSDID("$FreeBSD$");
#include "ofw_if.h"
+static void OF_putchar(int c, void *arg);
+
MALLOC_DEFINE(M_OFWPROP, "openfirm", "Open Firmware properties");
static ihandle_t stdout;
@@ -82,7 +84,7 @@ static struct ofw_kobj ofw_kernel_obj;
static struct kobj_ops ofw_kernel_kops;
/*
- * OFW install routines. Highest priority wins, equal priority also
+ * OFW install routines. Highest priority wins, equal priority also
* overrides allowing last-set to win.
*/
SET_DECLARE(ofw_set, ofw_def_t);
@@ -138,15 +140,27 @@ OF_init(void *cookie)
return (rv);
}
+static void
+OF_putchar(int c, void *arg __unused)
+{
+ char cbuf;
+
+ if (c == '\n') {
+ cbuf = '\r';
+ OF_write(stdout, &cbuf, 1);
+ }
+
+ cbuf = c;
+ OF_write(stdout, &cbuf, 1);
+}
+
void
OF_printf(const char *fmt, ...)
{
va_list va;
- char buf[1024];
va_start(va, fmt);
- vsprintf(buf, fmt, va);
- OF_write(stdout, buf, strlen(buf));
+ (void)kvprintf(fmt, OF_putchar, NULL, 10, va);
va_end(va);
}
diff --git a/sys/dev/pccbb/pccbb.c b/sys/dev/pccbb/pccbb.c
index b116b54..cd3e3cf 100644
--- a/sys/dev/pccbb/pccbb.c
+++ b/sys/dev/pccbb/pccbb.c
@@ -460,6 +460,13 @@ cbb_event_thread(void *arg)
int err;
int not_a_card = 0;
+ /*
+ * We need to act as a power sequencer on startup. Delay 2s/channel
+ * to ensure the other channels have had a chance to come up. We likely
+ * should add a lock that's shared on a per-slot basis so that only
+ * one power event can happen per slot at a time.
+ */
+ pause("cbbstart", hz * device_get_unit(sc->dev) * 2);
mtx_lock(&sc->mtx);
sc->flags |= CBB_KTHREAD_RUNNING;
while ((sc->flags & CBB_KTHREAD_DONE) == 0) {
diff --git a/sys/dev/pci/pcireg.h b/sys/dev/pci/pcireg.h
index 7951d13..0ee2615 100644
--- a/sys/dev/pci/pcireg.h
+++ b/sys/dev/pci/pcireg.h
@@ -697,6 +697,7 @@
#define PCIR_AER_UC_STATUS 0x04
#define PCIM_AER_UC_TRAINING_ERROR 0x00000001
#define PCIM_AER_UC_DL_PROTOCOL_ERROR 0x00000010
+#define PCIM_AER_UC_SUPRISE_LINK_DOWN 0x00000020
#define PCIM_AER_UC_POISONED_TLP 0x00001000
#define PCIM_AER_UC_FC_PROTOCOL_ERROR 0x00002000
#define PCIM_AER_UC_COMPLETION_TIMEOUT 0x00004000
diff --git a/sys/dev/sound/pci/csa.c b/sys/dev/sound/pci/csa.c
index 04ea4ec..a767993 100644
--- a/sys/dev/sound/pci/csa.c
+++ b/sys/dev/sound/pci/csa.c
@@ -861,7 +861,7 @@ static int
csa_downloadimage(csa_res *resp)
{
int i;
- u_int32_t tmp, src, dst, count, data;
+ u_int32_t tmp, src, dst, count;
for (i = 0; i < CLEAR__COUNT; i++) {
dst = ClrStat[i].BA1__DestByteOffset;
@@ -875,8 +875,7 @@ csa_downloadimage(csa_res *resp)
dst = FillStat[i].Offset;
count = FillStat[i].Size;
for (tmp = 0; tmp < count; tmp += 4) {
- data = FillStat[i].pFill[src];
- csa_writemem(resp, dst + tmp, data);
+ csa_writemem(resp, dst + tmp, FillStat[i].pFill[src]);
src++;
}
}
diff --git a/sys/dev/sound/pci/csareg.h b/sys/dev/sound/pci/csareg.h
index c6767fb..54b64e0 100644
--- a/sys/dev/sound/pci/csareg.h
+++ b/sys/dev/sound/pci/csareg.h
@@ -1949,24 +1949,4 @@
#define CS_AC97_POWER_CONTROL_MIXVON_ON 0x0004
#define CS_AC97_POWER_CONTROL_MIXVOFF_ON 0x0008
-/* The following struct holds the initialization array. */
-
-/*
- * this is 3*1024 for parameter, 3.5*1024 for sample and 2*3.5*1024 for code since
- * each instruction is 40 bits and takes two dwords
- */
-#define INKY_BA1_DWORD_SIZE (13 * 1024 + 512)
-#define INKY_MEMORY_COUNT 3
-
-struct BA1struct
-{
- struct
- {
- u_long ulDestByteOffset,
- ulSourceByteSize;
- } MemoryStat[INKY_MEMORY_COUNT];
-
- u_long BA1Array[INKY_BA1_DWORD_SIZE];
-};
-
#endif /* _CSA_REG_H */
diff --git a/sys/dev/sound/pci/hda/hdaa.c b/sys/dev/sound/pci/hda/hdaa.c
index 9d7ff0d..82a6676 100644
--- a/sys/dev/sound/pci/hda/hdaa.c
+++ b/sys/dev/sound/pci/hda/hdaa.c
@@ -1564,7 +1564,7 @@ hdaa_audio_setup(struct hdaa_chan *ch)
HDA_PARAM_PIN_CAP_HBR(wp->wclass.pin.cap)) {
wp->wclass.pin.ctrl &=
~HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK;
- if ((ch->fmt & AFMT_AC3) && (cchn == 8))
+ if ((ch->fmt & AFMT_AC3) && (cchn == 7))
wp->wclass.pin.ctrl |= 0x03;
hda_command(ch->devinfo->dev,
HDA_CMD_SET_PIN_WIDGET_CTRL(0, nid,
@@ -6485,9 +6485,12 @@ hdaa_chan_formula(struct hdaa_devinfo *devinfo, int asid,
c = devinfo->chans[as->chans[0]].channels;
if (c == 1)
snprintf(buf, buflen, "mono");
- else if (c == 2)
- buf[0] = 0;
- else if (as->pinset == 0x0003)
+ else if (c == 2) {
+ if (as->hpredir < 0)
+ buf[0] = 0;
+ else
+ snprintf(buf, buflen, "2.0");
+ } else if (as->pinset == 0x0003)
snprintf(buf, buflen, "3.1");
else if (as->pinset == 0x0005 || as->pinset == 0x0011)
snprintf(buf, buflen, "4.0");
@@ -6497,6 +6500,32 @@ hdaa_chan_formula(struct hdaa_devinfo *devinfo, int asid,
snprintf(buf, buflen, "7.1");
else
snprintf(buf, buflen, "%dch", c);
+ if (as->hpredir >= 0)
+ strlcat(buf, "+HP", buflen);
+}
+
+static int
+hdaa_chan_type(struct hdaa_devinfo *devinfo, int asid)
+{
+ struct hdaa_audio_as *as;
+ struct hdaa_widget *w;
+ int i, t = -1, t1;
+
+ as = &devinfo->as[asid];
+ for (i = 0; i < 16; i++) {
+ w = hdaa_widget_get(devinfo, as->pins[i]);
+ if (w == NULL || w->enable == 0 || w->type !=
+ HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
+ continue;
+ t1 = HDA_CONFIG_DEFAULTCONF_DEVICE(w->wclass.pin.config);
+ if (t == -1)
+ t = t1;
+ else if (t != t1) {
+ t = -2;
+ break;
+ }
+ }
+ return (t);
}
static int
@@ -6537,7 +6566,7 @@ hdaa_pcm_probe(device_t dev)
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
char chans1[8], chans2[8];
char buf[128];
- int loc1, loc2;
+ int loc1, loc2, t1, t2;
if (pdevinfo->playas >= 0)
loc1 = devinfo->as[pdevinfo->playas].location;
@@ -6553,12 +6582,17 @@ hdaa_pcm_probe(device_t dev)
loc1 = -2;
chans1[0] = 0;
chans2[0] = 0;
- if (pdevinfo->playas >= 0)
+ t1 = t2 = -1;
+ if (pdevinfo->playas >= 0) {
hdaa_chan_formula(devinfo, pdevinfo->playas,
chans1, sizeof(chans1));
- if (pdevinfo->recas >= 0)
+ t1 = hdaa_chan_type(devinfo, pdevinfo->playas);
+ }
+ if (pdevinfo->recas >= 0) {
hdaa_chan_formula(devinfo, pdevinfo->recas,
chans2, sizeof(chans2));
+ t2 = hdaa_chan_type(devinfo, pdevinfo->recas);
+ }
if (chans1[0] != 0 || chans2[0] != 0) {
if (chans1[0] == 0 && pdevinfo->playas >= 0)
snprintf(chans1, sizeof(chans1), "2.0");
@@ -6567,7 +6601,15 @@ hdaa_pcm_probe(device_t dev)
if (strcmp(chans1, chans2) == 0)
chans2[0] = 0;
}
- snprintf(buf, sizeof(buf), "%s PCM (%s%s%s%s%s%s%s)",
+ if (t1 == -1)
+ t1 = t2;
+ else if (t2 == -1)
+ t2 = t1;
+ if (t1 != t2)
+ t1 = -2;
+ if (pdevinfo->digital)
+ t1 = -2;
+ snprintf(buf, sizeof(buf), "%s PCM (%s%s%s%s%s%s%s%s%s)",
device_get_desc(device_get_parent(device_get_parent(dev))),
loc1 >= 0 ? HDA_LOCS[loc1] : "", loc1 >= 0 ? " " : "",
(pdevinfo->digital == 0x7)?"HDMI/DP":
@@ -6575,7 +6617,8 @@ hdaa_pcm_probe(device_t dev)
((pdevinfo->digital == 0x3)?"HDMI":
((pdevinfo->digital)?"Digital":"Analog"))),
chans1[0] ? " " : "", chans1,
- chans2[0] ? "/" : "", chans2);
+ chans2[0] ? "/" : "", chans2,
+ t1 >= 0 ? " " : "", t1 >= 0 ? HDA_DEVS[t1] : "");
device_set_desc_copy(dev, buf);
return (BUS_PROBE_SPECIFIC);
}
diff --git a/sys/dev/sound/pci/hda/hdac.c b/sys/dev/sound/pci/hda/hdac.c
index 9c32cff..cdbd650 100644
--- a/sys/dev/sound/pci/hda/hdac.c
+++ b/sys/dev/sound/pci/hda/hdac.c
@@ -46,7 +46,7 @@
#include <dev/sound/pci/hda/hda_reg.h>
#include <dev/sound/pci/hda/hdac.h>
-#define HDA_DRV_TEST_REV "20120111_0001"
+#define HDA_DRV_TEST_REV "20120126_0002"
SND_DECLARE_FILE("$FreeBSD$");
diff --git a/sys/dev/sound/pci/hda/hdac.h b/sys/dev/sound/pci/hda/hdac.h
index 71fae4f..71e6a67 100644
--- a/sys/dev/sound/pci/hda/hdac.h
+++ b/sys/dev/sound/pci/hda/hdac.h
@@ -305,6 +305,8 @@
#define HDA_CODEC_ALC662 HDA_CODEC_CONSTRUCT(REALTEK, 0x0662)
#define HDA_CODEC_ALC663 HDA_CODEC_CONSTRUCT(REALTEK, 0x0663)
#define HDA_CODEC_ALC665 HDA_CODEC_CONSTRUCT(REALTEK, 0x0665)
+#define HDA_CODEC_ALC670 HDA_CODEC_CONSTRUCT(REALTEK, 0x0670)
+#define HDA_CODEC_ALC680 HDA_CODEC_CONSTRUCT(REALTEK, 0x0680)
#define HDA_CODEC_ALC861 HDA_CODEC_CONSTRUCT(REALTEK, 0x0861)
#define HDA_CODEC_ALC861VD HDA_CODEC_CONSTRUCT(REALTEK, 0x0862)
#define HDA_CODEC_ALC880 HDA_CODEC_CONSTRUCT(REALTEK, 0x0880)
@@ -318,6 +320,18 @@
#define HDA_CODEC_ALC899 HDA_CODEC_CONSTRUCT(REALTEK, 0x0899)
#define HDA_CODEC_ALCXXXX HDA_CODEC_CONSTRUCT(REALTEK, 0xffff)
+/* Motorola */
+#define MOTO_VENDORID 0x1057
+#define HDA_CODEC_MOTOXXXX HDA_CODEC_CONSTRUCT(MOTO, 0xffff)
+
+/* Creative */
+#define CREATIVE_VENDORID 0x1102
+#define HDA_CODEC_CA0110 HDA_CODEC_CONSTRUCT(CREATIVE, 0x000a)
+#define HDA_CODEC_CA0110_2 HDA_CODEC_CONSTRUCT(CREATIVE, 0x000b)
+#define HDA_CODEC_SB0880 HDA_CODEC_CONSTRUCT(CREATIVE, 0x000d)
+#define HDA_CODEC_CA0132 HDA_CODEC_CONSTRUCT(CREATIVE, 0x0011)
+#define HDA_CODEC_CAXXXX HDA_CODEC_CONSTRUCT(CREATIVE, 0xffff)
+
/* Analog Devices */
#define ANALOGDEVICES_VENDORID 0x11d4
#define HDA_CODEC_AD1884A HDA_CODEC_CONSTRUCT(ANALOGDEVICES, 0x184a)
@@ -339,10 +353,14 @@
#define HDA_CODEC_ADXXXX HDA_CODEC_CONSTRUCT(ANALOGDEVICES, 0xffff)
/* CMedia */
-#define CMEDIA_VENDORID 0x434d
-#define HDA_CODEC_CMI9880 HDA_CODEC_CONSTRUCT(CMEDIA, 0x4980)
+#define CMEDIA_VENDORID 0x13f6
+#define HDA_CODEC_CMI9880 HDA_CODEC_CONSTRUCT(CMEDIA, 0x9880)
#define HDA_CODEC_CMIXXXX HDA_CODEC_CONSTRUCT(CMEDIA, 0xffff)
+#define CMEDIA2_VENDORID 0x434d
+#define HDA_CODEC_CMI98802 HDA_CODEC_CONSTRUCT(CMEDIA2, 0x4980)
+#define HDA_CODEC_CMIXXXX2 HDA_CODEC_CONSTRUCT(CMEDIA2, 0xffff)
+
/* Sigmatel */
#define SIGMATEL_VENDORID 0x8384
#define HDA_CODEC_STAC9230X HDA_CODEC_CONSTRUCT(SIGMATEL, 0x7612)
@@ -384,6 +402,10 @@
#define HDA_CODEC_STAC9205D HDA_CODEC_CONSTRUCT(SIGMATEL, 0x76a1)
#define HDA_CODEC_STAC9204X HDA_CODEC_CONSTRUCT(SIGMATEL, 0x76a2)
#define HDA_CODEC_STAC9204D HDA_CODEC_CONSTRUCT(SIGMATEL, 0x76a3)
+#define HDA_CODEC_STAC9255 HDA_CODEC_CONSTRUCT(SIGMATEL, 0x76a4)
+#define HDA_CODEC_STAC9255D HDA_CODEC_CONSTRUCT(SIGMATEL, 0x76a5)
+#define HDA_CODEC_STAC9254 HDA_CODEC_CONSTRUCT(SIGMATEL, 0x76a6)
+#define HDA_CODEC_STAC9254D HDA_CODEC_CONSTRUCT(SIGMATEL, 0x76a7)
#define HDA_CODEC_STAC9220_A2 HDA_CODEC_CONSTRUCT(SIGMATEL, 0x7880)
#define HDA_CODEC_STAC9220_A1 HDA_CODEC_CONSTRUCT(SIGMATEL, 0x7882)
#define HDA_CODEC_STACXXXX HDA_CODEC_CONSTRUCT(SIGMATEL, 0xffff)
@@ -398,10 +420,49 @@
#define HDA_CODEC_IDT92HD73C1 HDA_CODEC_CONSTRUCT(IDT, 0x7675)
#define HDA_CODEC_IDT92HD73E1 HDA_CODEC_CONSTRUCT(IDT, 0x7676)
#define HDA_CODEC_IDT92HD71B8 HDA_CODEC_CONSTRUCT(IDT, 0x76b0)
+#define HDA_CODEC_IDT92HD71B8_2 HDA_CODEC_CONSTRUCT(IDT, 0x76b1)
#define HDA_CODEC_IDT92HD71B7 HDA_CODEC_CONSTRUCT(IDT, 0x76b2)
+#define HDA_CODEC_IDT92HD71B7_2 HDA_CODEC_CONSTRUCT(IDT, 0x76b3)
+#define HDA_CODEC_IDT92HD71B6 HDA_CODEC_CONSTRUCT(IDT, 0x76b4)
+#define HDA_CODEC_IDT92HD71B6_2 HDA_CODEC_CONSTRUCT(IDT, 0x76b5)
#define HDA_CODEC_IDT92HD71B5 HDA_CODEC_CONSTRUCT(IDT, 0x76b6)
+#define HDA_CODEC_IDT92HD71B5_2 HDA_CODEC_CONSTRUCT(IDT, 0x76b7)
+#define HDA_CODEC_IDT92HD89C3 HDA_CODEC_CONSTRUCT(IDT, 0x76c0)
+#define HDA_CODEC_IDT92HD89C2 HDA_CODEC_CONSTRUCT(IDT, 0x76c1)
+#define HDA_CODEC_IDT92HD89C1 HDA_CODEC_CONSTRUCT(IDT, 0x76c2)
+#define HDA_CODEC_IDT92HD89B3 HDA_CODEC_CONSTRUCT(IDT, 0x76c3)
+#define HDA_CODEC_IDT92HD89B2 HDA_CODEC_CONSTRUCT(IDT, 0x76c4)
+#define HDA_CODEC_IDT92HD89B1 HDA_CODEC_CONSTRUCT(IDT, 0x76c5)
+#define HDA_CODEC_IDT92HD89E3 HDA_CODEC_CONSTRUCT(IDT, 0x76c6)
+#define HDA_CODEC_IDT92HD89E2 HDA_CODEC_CONSTRUCT(IDT, 0x76c7)
+#define HDA_CODEC_IDT92HD89E1 HDA_CODEC_CONSTRUCT(IDT, 0x76c8)
+#define HDA_CODEC_IDT92HD89D3 HDA_CODEC_CONSTRUCT(IDT, 0x76c9)
+#define HDA_CODEC_IDT92HD89D2 HDA_CODEC_CONSTRUCT(IDT, 0x76ca)
+#define HDA_CODEC_IDT92HD89D1 HDA_CODEC_CONSTRUCT(IDT, 0x76cb)
+#define HDA_CODEC_IDT92HD89F3 HDA_CODEC_CONSTRUCT(IDT, 0x76cc)
+#define HDA_CODEC_IDT92HD89F2 HDA_CODEC_CONSTRUCT(IDT, 0x76cd)
+#define HDA_CODEC_IDT92HD89F1 HDA_CODEC_CONSTRUCT(IDT, 0x76ce)
+#define HDA_CODEC_IDT92HD87B1_3 HDA_CODEC_CONSTRUCT(IDT, 0x76d1)
#define HDA_CODEC_IDT92HD83C1C HDA_CODEC_CONSTRUCT(IDT, 0x76d4)
#define HDA_CODEC_IDT92HD81B1C HDA_CODEC_CONSTRUCT(IDT, 0x76d5)
+#define HDA_CODEC_IDT92HD87B2_4 HDA_CODEC_CONSTRUCT(IDT, 0x76d9)
+#define HDA_CODEC_IDT92HD93BXX HDA_CODEC_CONSTRUCT(IDT, 0x76df)
+#define HDA_CODEC_IDT92HD91BXX HDA_CODEC_CONSTRUCT(IDT, 0x76e0)
+#define HDA_CODEC_IDT92HD98BXX HDA_CODEC_CONSTRUCT(IDT, 0x76e3)
+#define HDA_CODEC_IDT92HD99BXX HDA_CODEC_CONSTRUCT(IDT, 0x76e5)
+#define HDA_CODEC_IDT92HD90BXX HDA_CODEC_CONSTRUCT(IDT, 0x76e7)
+#define HDA_CODEC_IDT92HD66B1X5 HDA_CODEC_CONSTRUCT(IDT, 0x76e8)
+#define HDA_CODEC_IDT92HD66B2X5 HDA_CODEC_CONSTRUCT(IDT, 0x76e9)
+#define HDA_CODEC_IDT92HD66B3X5 HDA_CODEC_CONSTRUCT(IDT, 0x76ea)
+#define HDA_CODEC_IDT92HD66C1X5 HDA_CODEC_CONSTRUCT(IDT, 0x76eb)
+#define HDA_CODEC_IDT92HD66C2X5 HDA_CODEC_CONSTRUCT(IDT, 0x76ec)
+#define HDA_CODEC_IDT92HD66C3X5 HDA_CODEC_CONSTRUCT(IDT, 0x76ed)
+#define HDA_CODEC_IDT92HD66B1X3 HDA_CODEC_CONSTRUCT(IDT, 0x76ee)
+#define HDA_CODEC_IDT92HD66B2X3 HDA_CODEC_CONSTRUCT(IDT, 0x76ef)
+#define HDA_CODEC_IDT92HD66B3X3 HDA_CODEC_CONSTRUCT(IDT, 0x76f0)
+#define HDA_CODEC_IDT92HD66C1X3 HDA_CODEC_CONSTRUCT(IDT, 0x76f1)
+#define HDA_CODEC_IDT92HD66C2X3 HDA_CODEC_CONSTRUCT(IDT, 0x76f2)
+#define HDA_CODEC_IDT92HD66C3_65 HDA_CODEC_CONSTRUCT(IDT, 0x76f3)
#define HDA_CODEC_IDTXXXX HDA_CODEC_CONSTRUCT(IDT, 0xffff)
/* Silicon Image */
@@ -495,7 +556,9 @@
/* NVIDIA */
#define HDA_CODEC_NVIDIAMCP78 HDA_CODEC_CONSTRUCT(NVIDIA, 0x0002)
-#define HDA_CODEC_NVIDIAMCP78_2 HDA_CODEC_CONSTRUCT(NVIDIA, 0x0006)
+#define HDA_CODEC_NVIDIAMCP78_2 HDA_CODEC_CONSTRUCT(NVIDIA, 0x0003)
+#define HDA_CODEC_NVIDIAMCP78_3 HDA_CODEC_CONSTRUCT(NVIDIA, 0x0005)
+#define HDA_CODEC_NVIDIAMCP78_4 HDA_CODEC_CONSTRUCT(NVIDIA, 0x0006)
#define HDA_CODEC_NVIDIAMCP7A HDA_CODEC_CONSTRUCT(NVIDIA, 0x0007)
#define HDA_CODEC_NVIDIAGT220 HDA_CODEC_CONSTRUCT(NVIDIA, 0x000a)
#define HDA_CODEC_NVIDIAGT21X HDA_CODEC_CONSTRUCT(NVIDIA, 0x000b)
@@ -505,6 +568,10 @@
#define HDA_CODEC_NVIDIAMCP73 HDA_CODEC_CONSTRUCT(NVIDIA, 0x8001)
#define HDA_CODEC_NVIDIAXXXX HDA_CODEC_CONSTRUCT(NVIDIA, 0xffff)
+/* Chrontel */
+#define CHRONTEL_VENDORID 0x17e8
+#define HDA_CODEC_CHXXXX HDA_CODEC_CONSTRUCT(CHRONTEL, 0xffff)
+
/* INTEL */
#define HDA_CODEC_INTELIP HDA_CODEC_CONSTRUCT(INTEL, 0x0054)
#define HDA_CODEC_INTELBL HDA_CODEC_CONSTRUCT(INTEL, 0x2801)
diff --git a/sys/dev/sound/pci/hda/hdacc.c b/sys/dev/sound/pci/hda/hdacc.c
index c415e0f..5e5a8dd 100644
--- a/sys/dev/sound/pci/hda/hdacc.c
+++ b/sys/dev/sound/pci/hda/hdacc.c
@@ -92,6 +92,8 @@ static const struct {
{ HDA_CODEC_ALC662, 0, "Realtek ALC662" },
{ HDA_CODEC_ALC663, 0, "Realtek ALC663" },
{ HDA_CODEC_ALC665, 0, "Realtek ALC665" },
+ { HDA_CODEC_ALC670, 0, "Realtek ALC670" },
+ { HDA_CODEC_ALC680, 0, "Realtek ALC680" },
{ HDA_CODEC_ALC861, 0x0340, "Realtek ALC660" },
{ HDA_CODEC_ALC861, 0, "Realtek ALC861" },
{ HDA_CODEC_ALC861VD, 0, "Realtek ALC861-VD" },
@@ -123,7 +125,12 @@ static const struct {
{ HDA_CODEC_AD1988B, 0, "Analog Devices AD1988B" },
{ HDA_CODEC_AD1989A, 0, "Analog Devices AD1989A" },
{ HDA_CODEC_AD1989B, 0, "Analog Devices AD1989B" },
+ { HDA_CODEC_CA0110, 0, "Creative CA0110-IBG" },
+ { HDA_CODEC_CA0110_2, 0, "Creative CA0110-IBG" },
+ { HDA_CODEC_CA0132, 0, "Creative CA0132" },
+ { HDA_CODEC_SB0880, 0, "Creative SB0880 X-Fi" },
{ HDA_CODEC_CMI9880, 0, "CMedia CMI9880" },
+ { HDA_CODEC_CMI98802, 0, "CMedia CMI9880" },
{ HDA_CODEC_CXD9872RDK, 0, "Sigmatel CXD9872RD/K" },
{ HDA_CODEC_CXD9872AKD, 0, "Sigmatel CXD9872AKD" },
{ HDA_CODEC_STAC9200D, 0, "Sigmatel STAC9200D" },
@@ -148,6 +155,10 @@ static const struct {
{ HDA_CODEC_STAC9230D, 0, "Sigmatel STAC9230D" },
{ HDA_CODEC_STAC9250, 0, "Sigmatel STAC9250" },
{ HDA_CODEC_STAC9251, 0, "Sigmatel STAC9251" },
+ { HDA_CODEC_STAC9255, 0, "Sigmatel STAC9255" },
+ { HDA_CODEC_STAC9255D, 0, "Sigmatel STAC9255D" },
+ { HDA_CODEC_STAC9254, 0, "Sigmatel STAC9254" },
+ { HDA_CODEC_STAC9254D, 0, "Sigmatel STAC9254D" },
{ HDA_CODEC_STAC9271X, 0, "Sigmatel STAC9271X" },
{ HDA_CODEC_STAC9271D, 0, "Sigmatel STAC9271D" },
{ HDA_CODEC_STAC9272X, 0, "Sigmatel STAC9272X" },
@@ -163,11 +174,28 @@ static const struct {
{ HDA_CODEC_IDT92HD005D, 0, "IDT 92HD005D" },
{ HDA_CODEC_IDT92HD206X, 0, "IDT 92HD206X" },
{ HDA_CODEC_IDT92HD206D, 0, "IDT 92HD206D" },
+ { HDA_CODEC_IDT92HD66B1X5, 0, "IDT 92HD66B1X5" },
+ { HDA_CODEC_IDT92HD66B2X5, 0, "IDT 92HD66B2X5" },
+ { HDA_CODEC_IDT92HD66B3X5, 0, "IDT 92HD66B3X5" },
+ { HDA_CODEC_IDT92HD66C1X5, 0, "IDT 92HD66C1X5" },
+ { HDA_CODEC_IDT92HD66C2X5, 0, "IDT 92HD66C2X5" },
+ { HDA_CODEC_IDT92HD66C3X5, 0, "IDT 92HD66C3X5" },
+ { HDA_CODEC_IDT92HD66B1X3, 0, "IDT 92HD66B1X3" },
+ { HDA_CODEC_IDT92HD66B2X3, 0, "IDT 92HD66B2X3" },
+ { HDA_CODEC_IDT92HD66B3X3, 0, "IDT 92HD66B3X3" },
+ { HDA_CODEC_IDT92HD66C1X3, 0, "IDT 92HD66C1X3" },
+ { HDA_CODEC_IDT92HD66C2X3, 0, "IDT 92HD66C2X3" },
+ { HDA_CODEC_IDT92HD66C3_65, 0, "IDT 92HD66C3_65" },
{ HDA_CODEC_IDT92HD700X, 0, "IDT 92HD700X" },
{ HDA_CODEC_IDT92HD700D, 0, "IDT 92HD700D" },
{ HDA_CODEC_IDT92HD71B5, 0, "IDT 92HD71B5" },
+ { HDA_CODEC_IDT92HD71B5_2, 0, "IDT 92HD71B5" },
+ { HDA_CODEC_IDT92HD71B6, 0, "IDT 92HD71B6" },
+ { HDA_CODEC_IDT92HD71B6_2, 0, "IDT 92HD71B6" },
{ HDA_CODEC_IDT92HD71B7, 0, "IDT 92HD71B7" },
+ { HDA_CODEC_IDT92HD71B7_2, 0, "IDT 92HD71B7" },
{ HDA_CODEC_IDT92HD71B8, 0, "IDT 92HD71B8" },
+ { HDA_CODEC_IDT92HD71B8_2, 0, "IDT 92HD71B8" },
{ HDA_CODEC_IDT92HD73C1, 0, "IDT 92HD73C1" },
{ HDA_CODEC_IDT92HD73D1, 0, "IDT 92HD73D1" },
{ HDA_CODEC_IDT92HD73E1, 0, "IDT 92HD73E1" },
@@ -177,6 +205,28 @@ static const struct {
{ HDA_CODEC_IDT92HD81B1X, 0, "IDT 92HD81B1X" },
{ HDA_CODEC_IDT92HD83C1C, 0, "IDT 92HD83C1C" },
{ HDA_CODEC_IDT92HD83C1X, 0, "IDT 92HD83C1X" },
+ { HDA_CODEC_IDT92HD87B1_3, 0, "IDT 92HD87B1/3" },
+ { HDA_CODEC_IDT92HD87B2_4, 0, "IDT 92HD87B2/4" },
+ { HDA_CODEC_IDT92HD89C3, 0, "IDT 92HD89C3" },
+ { HDA_CODEC_IDT92HD89C2, 0, "IDT 92HD89C2" },
+ { HDA_CODEC_IDT92HD89C1, 0, "IDT 92HD89C1" },
+ { HDA_CODEC_IDT92HD89B3, 0, "IDT 92HD89B3" },
+ { HDA_CODEC_IDT92HD89B2, 0, "IDT 92HD89B2" },
+ { HDA_CODEC_IDT92HD89B1, 0, "IDT 92HD89B1" },
+ { HDA_CODEC_IDT92HD89E3, 0, "IDT 92HD89E3" },
+ { HDA_CODEC_IDT92HD89E2, 0, "IDT 92HD89E2" },
+ { HDA_CODEC_IDT92HD89E1, 0, "IDT 92HD89E1" },
+ { HDA_CODEC_IDT92HD89D3, 0, "IDT 92HD89D3" },
+ { HDA_CODEC_IDT92HD89D2, 0, "IDT 92HD89D2" },
+ { HDA_CODEC_IDT92HD89D1, 0, "IDT 92HD89D1" },
+ { HDA_CODEC_IDT92HD89F3, 0, "IDT 92HD89F3" },
+ { HDA_CODEC_IDT92HD89F2, 0, "IDT 92HD89F2" },
+ { HDA_CODEC_IDT92HD89F1, 0, "IDT 92HD89F1" },
+ { HDA_CODEC_IDT92HD90BXX, 0, "IDT 92HD90BXX" },
+ { HDA_CODEC_IDT92HD91BXX, 0, "IDT 92HD91BXX" },
+ { HDA_CODEC_IDT92HD93BXX, 0, "IDT 92HD93BXX" },
+ { HDA_CODEC_IDT92HD98BXX, 0, "IDT 92HD98BXX" },
+ { HDA_CODEC_IDT92HD99BXX, 0, "IDT 92HD99BXX" },
{ HDA_CODEC_CX20549, 0, "Conexant CX20549 (Venice)" },
{ HDA_CODEC_CX20551, 0, "Conexant CX20551 (Waikiki)" },
{ HDA_CODEC_CX20561, 0, "Conexant CX20561 (Hermosa)" },
@@ -250,6 +300,8 @@ static const struct {
{ HDA_CODEC_NVIDIAMCP73, 0, "NVIDIA MCP73" },
{ HDA_CODEC_NVIDIAMCP78, 0, "NVIDIA MCP78" },
{ HDA_CODEC_NVIDIAMCP78_2, 0, "NVIDIA MCP78" },
+ { HDA_CODEC_NVIDIAMCP78_3, 0, "NVIDIA MCP78" },
+ { HDA_CODEC_NVIDIAMCP78_4, 0, "NVIDIA MCP78" },
{ HDA_CODEC_NVIDIAMCP7A, 0, "NVIDIA MCP7A" },
{ HDA_CODEC_NVIDIAGT220, 0, "NVIDIA GT220" },
{ HDA_CODEC_NVIDIAGT21X, 0, "NVIDIA GT21x" },
@@ -266,19 +318,23 @@ static const struct {
{ HDA_CODEC_SII1390, 0, "Silicon Image SiI1390" },
{ HDA_CODEC_SII1392, 0, "Silicon Image SiI1392" },
/* Unknown CODECs */
- { HDA_CODEC_ALCXXXX, 0, "Realtek" },
{ HDA_CODEC_ADXXXX, 0, "Analog Devices" },
- { HDA_CODEC_CSXXXX, 0, "Cirrus Logic" },
- { HDA_CODEC_CMIXXXX, 0, "CMedia" },
- { HDA_CODEC_STACXXXX, 0, "Sigmatel" },
- { HDA_CODEC_SIIXXXX, 0, "Silicon Image" },
{ HDA_CODEC_AGEREXXXX, 0, "Lucent/Agere Systems" },
- { HDA_CODEC_CXXXXX, 0, "Conexant" },
- { HDA_CODEC_VTXXXX, 0, "VIA" },
+ { HDA_CODEC_ALCXXXX, 0, "Realtek" },
{ HDA_CODEC_ATIXXXX, 0, "ATI" },
- { HDA_CODEC_NVIDIAXXXX, 0, "NVIDIA" },
- { HDA_CODEC_INTELXXXX, 0, "Intel" },
+ { HDA_CODEC_CAXXXX, 0, "Creative" },
+ { HDA_CODEC_CMIXXXX, 0, "CMedia" },
+ { HDA_CODEC_CMIXXXX2, 0, "CMedia" },
+ { HDA_CODEC_CSXXXX, 0, "Cirrus Logic" },
+ { HDA_CODEC_CXXXXX, 0, "Conexant" },
+ { HDA_CODEC_CHXXXX, 0, "Chrontel" },
{ HDA_CODEC_IDTXXXX, 0, "IDT" },
+ { HDA_CODEC_INTELXXXX, 0, "Intel" },
+ { HDA_CODEC_MOTOXXXX, 0, "Motorola" },
+ { HDA_CODEC_NVIDIAXXXX, 0, "NVIDIA" },
+ { HDA_CODEC_SIIXXXX, 0, "Silicon Image" },
+ { HDA_CODEC_STACXXXX, 0, "Sigmatel" },
+ { HDA_CODEC_VTXXXX, 0, "VIA" },
};
#define HDACC_CODECS_LEN (sizeof(hdacc_codecs) / sizeof(hdacc_codecs[0]))
diff --git a/sys/dev/usb/usb_transfer.c b/sys/dev/usb/usb_transfer.c
index 4a5cfbd..e948299 100644
--- a/sys/dev/usb/usb_transfer.c
+++ b/sys/dev/usb/usb_transfer.c
@@ -42,6 +42,7 @@
#include <sys/callout.h>
#include <sys/malloc.h>
#include <sys/priv.h>
+#include <sys/proc.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
diff --git a/sys/dev/wi/if_wi.c b/sys/dev/wi/if_wi.c
index d31bdba..8e70dd6 100644
--- a/sys/dev/wi/if_wi.c
+++ b/sys/dev/wi/if_wi.c
@@ -62,6 +62,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_wlan.h"
+
#define WI_HERMES_STATS_WAR /* Work around stats counter bug. */
#include <sys/param.h>
diff --git a/sys/dev/xen/blkback/blkback.c b/sys/dev/xen/blkback/blkback.c
index c42bfd9..c6ab562 100644
--- a/sys/dev/xen/blkback/blkback.c
+++ b/sys/dev/xen/blkback/blkback.c
@@ -3434,6 +3434,10 @@ xbb_shutdown(struct xbb_softc *xbb)
DPRINTF("\n");
+ /*
+ * Before unlocking mutex, set this flag to prevent other threads from
+ * getting into this function
+ */
xbb->flags |= XBBF_IN_SHUTDOWN;
mtx_unlock(&xbb->lock);
diff --git a/sys/dev/xen/netback/netback.c b/sys/dev/xen/netback/netback.c
index b2be6e4..ef7b074 100644
--- a/sys/dev/xen/netback/netback.c
+++ b/sys/dev/xen/netback/netback.c
@@ -1,1595 +1,2535 @@
-/*
- * Copyright (c) 2006, Cisco Systems, Inc.
+/*-
+ * Copyright (c) 2009-2011 Spectra Logic Corporation
* All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
* are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
*
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Cisco Systems, Inc. nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Authors: Justin T. Gibbs (Spectra Logic Corporation)
+ * Alan Somers (Spectra Logic Corporation)
+ * John Suykerbuyk (Spectra Logic Corporation)
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+
+/**
+ * \file netback.c
+ *
+ * \brief Device driver supporting the vending of network access
+ * from this FreeBSD domain to other domains.
+ */
+#include "opt_inet.h"
+#include "opt_global.h"
+
#include "opt_sctp.h"
#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/sockio.h>
-#include <sys/mbuf.h>
-#include <sys/malloc.h>
#include <sys/kernel.h>
-#include <sys/socket.h>
-#include <sys/queue.h>
-#include <sys/taskqueue.h>
-#include <sys/module.h>
#include <sys/bus.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <net/if.h>
#include <net/if_arp.h>
-#include <net/if_types.h>
#include <net/ethernet.h>
-#include <net/if_bridgevar.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
-#include <netinet/in_systm.h>
#include <netinet/in.h>
-#include <netinet/in_var.h>
#include <netinet/ip.h>
+#include <netinet/if_ether.h>
+#if __FreeBSD_version >= 700000
#include <netinet/tcp.h>
-#include <netinet/udp.h>
-#ifdef SCTP
-#include <netinet/sctp.h>
-#include <netinet/sctp_crc32.h>
#endif
+#include <netinet/ip_icmp.h>
+#include <netinet/udp.h>
+#include <machine/in_cksum.h>
-#include <vm/vm_extern.h>
-#include <vm/vm_kern.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
-#include <machine/in_cksum.h>
-#include <machine/xen-os.h>
-#include <machine/hypervisor.h>
-#include <machine/hypervisor-ifs.h>
-#include <machine/xen_intr.h>
-#include <machine/evtchn.h>
-#include <machine/xenbus.h>
-#include <machine/gnttab.h>
-#include <machine/xen-public/memory.h>
-#include <dev/xen/xenbus/xenbus_comms.h>
-
-
-#ifdef XEN_NETBACK_DEBUG
-#define DPRINTF(fmt, args...) \
- printf("netback (%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
-#else
-#define DPRINTF(fmt, args...) ((void)0)
-#endif
+#include <machine/_inttypes.h>
+#include <machine/xen/xen-os.h>
+#include <machine/xen/xenvar.h>
-#ifdef XEN_NETBACK_DEBUG_LOTS
-#define DDPRINTF(fmt, args...) \
- printf("netback (%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
-#define DPRINTF_MBUF(_m) print_mbuf(_m, 0)
-#define DPRINTF_MBUF_LEN(_m, _len) print_mbuf(_m, _len)
-#else
-#define DDPRINTF(fmt, args...) ((void)0)
-#define DPRINTF_MBUF(_m) ((void)0)
-#define DPRINTF_MBUF_LEN(_m, _len) ((void)0)
-#endif
+#include <xen/evtchn.h>
+#include <xen/xen_intr.h>
+#include <xen/interface/io/netif.h>
+#include <xen/xenbus/xenbusvar.h>
-#define WPRINTF(fmt, args...) \
- printf("netback (%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
+/*--------------------------- Compile-time Tunables --------------------------*/
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
-#define BUG_ON PANIC_IF
+/*---------------------------------- Macros ----------------------------------*/
+/**
+ * Custom malloc type for all driver allocations.
+ */
+static MALLOC_DEFINE(M_XENNETBACK, "xnb", "Xen Net Back Driver Data");
-#define IFNAME(_np) (_np)->ifp->if_xname
+#define XNB_SG 1 /* netback driver supports feature-sg */
+#define XNB_GSO_TCPV4 1 /* netback driver supports feature-gso-tcpv4 */
+#define XNB_RX_COPY 1 /* netback driver supports feature-rx-copy */
+#define XNB_RX_FLIP 0 /* netback driver does not support feature-rx-flip */
-#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
-#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
+#undef XNB_DEBUG
+#define XNB_DEBUG /* hardcode on during development */
-struct ring_ref {
- vm_offset_t va;
- grant_handle_t handle;
- uint64_t bus_addr;
-};
+#ifdef XNB_DEBUG
+#define DPRINTF(fmt, args...) \
+ printf("xnb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
+#else
+#define DPRINTF(fmt, args...) do {} while (0)
+#endif
-typedef struct netback_info {
+/* Default length for stack-allocated grant tables */
+#define GNTTAB_LEN (64)
- /* Schedule lists */
- STAILQ_ENTRY(netback_info) next_tx;
- STAILQ_ENTRY(netback_info) next_rx;
- int on_tx_sched_list;
- int on_rx_sched_list;
+/* Features supported by all backends. TSO and LRO can be negotiated */
+#define XNB_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
- struct xenbus_device *xdev;
- XenbusState frontend_state;
+#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
+#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
- domid_t domid;
- int handle;
- char *bridge;
+/**
+ * Two argument version of the standard macro. Second argument is a tentative
+ * value of req_cons
+ */
+#define RING_HAS_UNCONSUMED_REQUESTS_2(_r, cons) ({ \
+ unsigned int req = (_r)->sring->req_prod - cons; \
+ unsigned int rsp = RING_SIZE(_r) - \
+ (cons - (_r)->rsp_prod_pvt); \
+ req < rsp ? req : rsp; \
+})
- int rings_connected;
- struct ring_ref tx_ring_ref;
- struct ring_ref rx_ring_ref;
- netif_tx_back_ring_t tx;
- netif_rx_back_ring_t rx;
- evtchn_port_t evtchn;
- int irq;
- void *irq_cookie;
+#define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT)
+#define virt_to_offset(x) ((x) & (PAGE_SIZE - 1))
- struct ifnet *ifp;
- int ref_cnt;
+/**
+ * Predefined array type of grant table copy descriptors. Used to pass around
+ * statically allocated memory structures.
+ */
+typedef struct gnttab_copy gnttab_copy_table[GNTTAB_LEN];
+
+/*--------------------------- Forward Declarations ---------------------------*/
+struct xnb_softc;
+struct xnb_pkt;
+
+static void xnb_attach_failed(struct xnb_softc *xnb,
+ int err, const char *fmt, ...)
+ __printflike(3,4);
+static int xnb_shutdown(struct xnb_softc *xnb);
+static int create_netdev(device_t dev);
+static int xnb_detach(device_t dev);
+static int xen_net_read_mac(device_t dev, uint8_t mac[]);
+static int xnb_ifmedia_upd(struct ifnet *ifp);
+static void xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
+static void xnb_intr(void *arg);
+static int xnb_send(netif_rx_back_ring_t *rxb, domid_t otherend,
+ const struct mbuf *mbufc, gnttab_copy_table gnttab);
+static int xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend,
+ struct mbuf **mbufc, struct ifnet *ifnet,
+ gnttab_copy_table gnttab);
+static int xnb_ring2pkt(struct xnb_pkt *pkt,
+ const netif_tx_back_ring_t *tx_ring,
+ RING_IDX start);
+static void xnb_txpkt2rsp(const struct xnb_pkt *pkt,
+ netif_tx_back_ring_t *ring, int error);
+static struct mbuf *xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp);
+static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt,
+ const struct mbuf *mbufc,
+ gnttab_copy_table gnttab,
+ const netif_tx_back_ring_t *txb,
+ domid_t otherend_id);
+static void xnb_update_mbufc(struct mbuf *mbufc,
+ const gnttab_copy_table gnttab, int n_entries);
+static int xnb_mbufc2pkt(const struct mbuf *mbufc,
+ struct xnb_pkt *pkt,
+ RING_IDX start, int space);
+static int xnb_rxpkt2gnttab(const struct xnb_pkt *pkt,
+ const struct mbuf *mbufc,
+ gnttab_copy_table gnttab,
+ const netif_rx_back_ring_t *rxb,
+ domid_t otherend_id);
+static int xnb_rxpkt2rsp(const struct xnb_pkt *pkt,
+ const gnttab_copy_table gnttab, int n_entries,
+ netif_rx_back_ring_t *ring);
+static void xnb_add_mbuf_cksum(struct mbuf *mbufc);
+static void xnb_stop(struct xnb_softc*);
+static int xnb_ioctl(struct ifnet*, u_long, caddr_t);
+static void xnb_start_locked(struct ifnet*);
+static void xnb_start(struct ifnet*);
+static void xnb_ifinit_locked(struct xnb_softc*);
+static void xnb_ifinit(void*);
+#ifdef XNB_DEBUG
+static int xnb_unit_test_main(SYSCTL_HANDLER_ARGS);
+static int xnb_dump_rings(SYSCTL_HANDLER_ARGS);
+#endif
+/*------------------------------ Data Structures -----------------------------*/
- device_t ndev;
- int attached;
-} netif_t;
+/**
+ * Representation of a xennet packet. Simplified version of a packet as
+ * stored in the Xen tx ring. Applicable to both RX and TX packets
+ */
+struct xnb_pkt{
+ /**
+ * Array index of the first data-bearing (eg, not extra info) entry
+ * for this packet
+ */
+ RING_IDX car;
-#define MAX_PENDING_REQS 256
-#define PKT_PROT_LEN 64
+ /**
+ * Array index of the second data-bearing entry for this packet.
+ * Invalid if the packet has only one data-bearing entry. If the
+ * packet has more than two data-bearing entries, then the second
+ * through the last will be sequential modulo the ring size
+ */
+ RING_IDX cdr;
-static struct {
- netif_tx_request_t req;
- netif_t *netif;
-} pending_tx_info[MAX_PENDING_REQS];
-static uint16_t pending_ring[MAX_PENDING_REQS];
-typedef unsigned int PEND_RING_IDX;
-#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
-static PEND_RING_IDX pending_prod, pending_cons;
-#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
+ /**
+ * Optional extra info. Only valid if flags contains
+ * NETTXF_extra_info. Note that extra.type will always be
+ * XEN_NETIF_EXTRA_TYPE_GSO. Currently, no known netfront or netback
+ * driver will ever set XEN_NETIF_EXTRA_TYPE_MCAST_*
+ */
+ netif_extra_info_t extra;
-static unsigned long mmap_vstart;
-#define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
+ /** Size of entire packet in bytes. */
+ uint16_t size;
-/* Freed TX mbufs get batched on this ring before return to pending_ring. */
-static uint16_t dealloc_ring[MAX_PENDING_REQS];
-static PEND_RING_IDX dealloc_prod, dealloc_cons;
+ /** The size of the first entry's data in bytes */
+ uint16_t car_size;
-static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
-static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
-static gnttab_transfer_t grant_rx_op[NET_RX_RING_SIZE];
+ /**
+ * Either NETTXF_ or NETRXF_ flags. Note that the flag values are
+ * not the same for TX and RX packets
+ */
+ uint16_t flags;
-static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
-static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
-static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
+ /**
+ * The number of valid data-bearing entries (either netif_tx_request's
+ * or netif_rx_response's) in the packet. If this is 0, it means the
+ * entire packet is invalid.
+ */
+ uint16_t list_len;
-static struct task net_tx_task, net_rx_task;
-static struct callout rx_task_callout;
+ /** There was an error processing the packet */
+ uint8_t error;
+};
-static STAILQ_HEAD(netback_tx_sched_list, netback_info) tx_sched_list =
- STAILQ_HEAD_INITIALIZER(tx_sched_list);
-static STAILQ_HEAD(netback_rx_sched_list, netback_info) rx_sched_list =
- STAILQ_HEAD_INITIALIZER(rx_sched_list);
-static struct mtx tx_sched_list_lock;
-static struct mtx rx_sched_list_lock;
+/** xnb_pkt method: initialize it */
+static inline void
+xnb_pkt_initialize(struct xnb_pkt *pxnb)
+{
+ bzero(pxnb, sizeof(*pxnb));
+}
-static int vif_unit_maker = 0;
+/** xnb_pkt method: mark the packet as valid */
+static inline void
+xnb_pkt_validate(struct xnb_pkt *pxnb)
+{
+ pxnb->error = 0;
+};
-/* Protos */
-static void netback_start(struct ifnet *ifp);
-static int netback_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
-static int vif_add_dev(struct xenbus_device *xdev);
-static void disconnect_rings(netif_t *netif);
+/** xnb_pkt method: mark the packet as invalid */
+static inline void
+xnb_pkt_invalidate(struct xnb_pkt *pxnb)
+{
+ pxnb->error = 1;
+};
+
+/** xnb_pkt method: Check whether the packet is valid */
+static inline int
+xnb_pkt_is_valid(const struct xnb_pkt *pxnb)
+{
+ return (! pxnb->error);
+}
+
+#ifdef XNB_DEBUG
+/** xnb_pkt method: print the packet's contents in human-readable format*/
+static void __unused
+xnb_dump_pkt(const struct xnb_pkt *pkt) {
+ if (pkt == NULL) {
+ DPRINTF("Was passed a null pointer.\n");
+ return;
+ }
+ DPRINTF("pkt address= %p\n", pkt);
+ DPRINTF("pkt->size=%d\n", pkt->size);
+ DPRINTF("pkt->car_size=%d\n", pkt->car_size);
+ DPRINTF("pkt->flags=0x%04x\n", pkt->flags);
+ DPRINTF("pkt->list_len=%d\n", pkt->list_len);
+ /* DPRINTF("pkt->extra"); TODO */
+ DPRINTF("pkt->car=%d\n", pkt->car);
+ DPRINTF("pkt->cdr=%d\n", pkt->cdr);
+ DPRINTF("pkt->error=%d\n", pkt->error);
+}
+#endif /* XNB_DEBUG */
-#ifdef XEN_NETBACK_DEBUG_LOTS
-/* Debug code to display the contents of an mbuf */
static void
-print_mbuf(struct mbuf *m, int max)
+xnb_dump_txreq(RING_IDX idx, const struct netif_tx_request *txreq)
{
- int i, j=0;
- printf("mbuf %08x len = %d", (unsigned int)m, m->m_pkthdr.len);
- for (; m; m = m->m_next) {
- unsigned char *d = m->m_data;
- for (i=0; i < m->m_len; i++) {
- if (max && j == max)
- break;
- if ((j++ % 16) == 0)
- printf("\n%04x:", j);
- printf(" %02x", d[i]);
- }
+ if (txreq != NULL) {
+ DPRINTF("netif_tx_request index =%u\n", idx);
+ DPRINTF("netif_tx_request.gref =%u\n", txreq->gref);
+ DPRINTF("netif_tx_request.offset=%hu\n", txreq->offset);
+ DPRINTF("netif_tx_request.flags =%hu\n", txreq->flags);
+ DPRINTF("netif_tx_request.id =%hu\n", txreq->id);
+ DPRINTF("netif_tx_request.size =%hu\n", txreq->size);
}
- printf("\n");
}
-#endif
-#define MAX_MFN_ALLOC 64
-static unsigned long mfn_list[MAX_MFN_ALLOC];
-static unsigned int alloc_index = 0;
+/**
+ * \brief Configuration data for a shared memory request ring
+ * used to communicate with the front-end client of this
+ * this driver.
+ */
+struct xnb_ring_config {
+ /**
+ * Runtime structures for ring access. Unfortunately, TX and RX rings
+ * use different data structures, and that cannot be changed since it
+ * is part of the interdomain protocol.
+ */
+ union{
+ netif_rx_back_ring_t rx_ring;
+ netif_tx_back_ring_t tx_ring;
+ } back_ring;
+
+ /**
+ * The device bus address returned by the hypervisor when
+ * mapping the ring and required to unmap it when a connection
+ * is torn down.
+ */
+ uint64_t bus_addr;
-static unsigned long
-alloc_mfn(void)
-{
- unsigned long mfn = 0;
- struct xen_memory_reservation reservation = {
- .extent_start = mfn_list,
- .nr_extents = MAX_MFN_ALLOC,
- .extent_order = 0,
- .domid = DOMID_SELF
- };
- if ( unlikely(alloc_index == 0) )
- alloc_index = HYPERVISOR_memory_op(
- XENMEM_increase_reservation, &reservation);
- if ( alloc_index != 0 )
- mfn = mfn_list[--alloc_index];
- return mfn;
-}
+ /** The pseudo-physical address where ring memory is mapped.*/
+ uint64_t gnt_addr;
+
+ /** KVA address where ring memory is mapped. */
+ vm_offset_t va;
+
+ /**
+ * Grant table handles, one per-ring page, returned by the
+ * hyperpervisor upon mapping of the ring and required to
+ * unmap it when a connection is torn down.
+ */
+ grant_handle_t handle;
+
+ /** The number of ring pages mapped for the current connection. */
+ unsigned ring_pages;
-static unsigned long
-alloc_empty_page_range(unsigned long nr_pages)
+ /**
+ * The grant references, one per-ring page, supplied by the
+ * front-end, allowing us to reference the ring pages in the
+ * front-end's domain and to map these pages into our own domain.
+ */
+ grant_ref_t ring_ref;
+};
+
+/**
+ * Per-instance connection state flags.
+ */
+typedef enum
{
- void *pages;
- int i = 0, j = 0;
- multicall_entry_t mcl[17];
- unsigned long mfn_list[16];
- struct xen_memory_reservation reservation = {
- .extent_start = mfn_list,
- .nr_extents = 0,
- .address_bits = 0,
- .extent_order = 0,
- .domid = DOMID_SELF
- };
+ /** Communication with the front-end has been established. */
+ XNBF_RING_CONNECTED = 0x01,
- pages = malloc(nr_pages*PAGE_SIZE, M_DEVBUF, M_NOWAIT);
- if (pages == NULL)
- return 0;
+ /**
+ * Front-end requests exist in the ring and are waiting for
+ * xnb_xen_req objects to free up.
+ */
+ XNBF_RESOURCE_SHORTAGE = 0x02,
- memset(mcl, 0, sizeof(mcl));
+ /** Connection teardown has started. */
+ XNBF_SHUTDOWN = 0x04,
- while (i < nr_pages) {
- unsigned long va = (unsigned long)pages + (i++ * PAGE_SIZE);
+ /** A thread is already performing shutdown processing. */
+ XNBF_IN_SHUTDOWN = 0x08
+} xnb_flag_t;
- mcl[j].op = __HYPERVISOR_update_va_mapping;
- mcl[j].args[0] = va;
+/**
+ * Types of rings. Used for array indices and to identify a ring's control
+ * data structure type
+ */
+typedef enum{
+ XNB_RING_TYPE_TX = 0, /* ID of TX rings, used for array indices */
+ XNB_RING_TYPE_RX = 1, /* ID of RX rings, used for array indices */
+ XNB_NUM_RING_TYPES
+} xnb_ring_type_t;
- mfn_list[j++] = vtomach(va) >> PAGE_SHIFT;
+/**
+ * Per-instance configuration data.
+ */
+struct xnb_softc {
+ /** NewBus device corresponding to this instance. */
+ device_t dev;
- xen_phys_machine[(vtophys(va) >> PAGE_SHIFT)] = INVALID_P2M_ENTRY;
+ /* Media related fields */
- if (j == 16 || i == nr_pages) {
- mcl[j-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_LOCAL;
+ /** Generic network media state */
+ struct ifmedia sc_media;
- reservation.nr_extents = j;
+ /** Media carrier info */
+ struct ifnet *xnb_ifp;
- mcl[j].op = __HYPERVISOR_memory_op;
- mcl[j].args[0] = XENMEM_decrease_reservation;
- mcl[j].args[1] = (unsigned long)&reservation;
-
- (void)HYPERVISOR_multicall(mcl, j+1);
+ /** Our own private carrier state */
+ unsigned carrier;
- mcl[j-1].args[MULTI_UVMFLAGS_INDEX] = 0;
- j = 0;
- }
- }
+ /** Device MAC Address */
+ uint8_t mac[ETHER_ADDR_LEN];
- return (unsigned long)pages;
-}
+ /* Xen related fields */
-#ifdef XEN_NETBACK_FIXUP_CSUM
-static void
-fixup_checksum(struct mbuf *m)
-{
- struct ether_header *eh = mtod(m, struct ether_header *);
- struct ip *ip = (struct ip *)(eh + 1);
- int iphlen = ip->ip_hl << 2;
- int iplen = ntohs(ip->ip_len);
-
- if ((m->m_pkthdr.csum_flags & CSUM_TCP)) {
- struct tcphdr *th = (struct tcphdr *)((caddr_t)ip + iphlen);
- th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
- htons(IPPROTO_TCP + (iplen - iphlen)));
- th->th_sum = in_cksum_skip(m, iplen + sizeof(*eh), sizeof(*eh) + iphlen);
- m->m_pkthdr.csum_flags &= ~CSUM_TCP;
-#ifdef SCTP
- } else if (sw_csum & CSUM_SCTP) {
- sctp_delayed_cksum(m, iphlen);
- sw_csum &= ~CSUM_SCTP;
-#endif
- } else {
- u_short csum;
- struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen);
- uh->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
- htons(IPPROTO_UDP + (iplen - iphlen)));
- if ((csum = in_cksum_skip(m, iplen + sizeof(*eh), sizeof(*eh) + iphlen)) == 0)
- csum = 0xffff;
- uh->uh_sum = csum;
- m->m_pkthdr.csum_flags &= ~CSUM_UDP;
- }
-}
+ /**
+ * \brief The netif protocol abi in effect.
+ *
+ * There are situations where the back and front ends can
+ * have a different, native abi (e.g. intel x86_64 and
+ * 32bit x86 domains on the same machine). The back-end
+ * always accomodates the front-end's native abi. That
+ * value is pulled from the XenStore and recorded here.
+ */
+ int abi;
+
+ /**
+ * Name of the bridge to which this VIF is connected, if any
+ * This field is dynamically allocated by xenbus and must be free()ed
+ * when no longer needed
+ */
+ char *bridge;
+
+ /** The interrupt driven even channel used to signal ring events. */
+ evtchn_port_t evtchn;
+
+ /** Xen device handle.*/
+ long handle;
+
+ /** IRQ mapping for the communication ring event channel. */
+ int irq;
+
+ /**
+ * \brief Cached value of the front-end's domain id.
+ *
+ * This value is used at once for each mapped page in
+ * a transaction. We cache it to avoid incuring the
+ * cost of an ivar access every time this is needed.
+ */
+ domid_t otherend_id;
+
+ /**
+ * Undocumented frontend feature. Has something to do with
+ * scatter/gather IO
+ */
+ uint8_t can_sg;
+ /** Undocumented frontend feature */
+ uint8_t gso;
+ /** Undocumented frontend feature */
+ uint8_t gso_prefix;
+ /** Can checksum TCP/UDP over IPv4 */
+ uint8_t ip_csum;
+
+ /* Implementation related fields */
+ /**
+ * Preallocated grant table copy descriptor for RX operations.
+ * Access must be protected by rx_lock
+ */
+ gnttab_copy_table rx_gnttab;
+
+ /**
+ * Preallocated grant table copy descriptor for TX operations.
+ * Access must be protected by tx_lock
+ */
+ gnttab_copy_table tx_gnttab;
+
+#ifdef XENHVM
+ /**
+ * Resource representing allocated physical address space
+ * associated with our per-instance kva region.
+ */
+ struct resource *pseudo_phys_res;
+
+ /** Resource id for allocated physical address space. */
+ int pseudo_phys_res_id;
#endif
-/* Add the interface to the specified bridge */
-static int
-add_to_bridge(struct ifnet *ifp, char *bridge)
-{
- struct ifdrv ifd;
- struct ifbreq ifb;
- struct ifnet *ifp_bridge = ifunit(bridge);
+ /** Ring mapping and interrupt configuration data. */
+ struct xnb_ring_config ring_configs[XNB_NUM_RING_TYPES];
- if (!ifp_bridge)
- return ENOENT;
+ /**
+ * Global pool of kva used for mapping remote domain ring
+ * and I/O transaction data.
+ */
+ vm_offset_t kva;
- bzero(&ifd, sizeof(ifd));
- bzero(&ifb, sizeof(ifb));
+ /** Psuedo-physical address corresponding to kva. */
+ uint64_t gnt_base_addr;
- strcpy(ifb.ifbr_ifsname, ifp->if_xname);
- strcpy(ifd.ifd_name, ifp->if_xname);
- ifd.ifd_cmd = BRDGADD;
- ifd.ifd_len = sizeof(ifb);
- ifd.ifd_data = &ifb;
+ /** Various configuration and state bit flags. */
+ xnb_flag_t flags;
- return bridge_ioctl_kern(ifp_bridge, SIOCSDRVSPEC, &ifd);
-
-}
+ /** Mutex protecting per-instance data in the receive path. */
+ struct mtx rx_lock;
-static int
-netif_create(int handle, struct xenbus_device *xdev, char *bridge)
-{
- netif_t *netif;
- struct ifnet *ifp;
+ /** Mutex protecting per-instance data in the softc structure. */
+ struct mtx sc_lock;
- netif = (netif_t *)malloc(sizeof(*netif), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (!netif)
- return ENOMEM;
+ /** Mutex protecting per-instance data in the transmit path. */
+ struct mtx tx_lock;
- netif->ref_cnt = 1;
- netif->handle = handle;
- netif->domid = xdev->otherend_id;
- netif->xdev = xdev;
- netif->bridge = bridge;
- xdev->data = netif;
-
- /* Set up ifnet structure */
- ifp = netif->ifp = if_alloc(IFT_ETHER);
- if (!ifp) {
- if (bridge)
- free(bridge, M_DEVBUF);
- free(netif, M_DEVBUF);
- return ENOMEM;
+ /** The size of the global kva pool. */
+ int kva_size;
+};
+
+/*---------------------------- Debugging functions ---------------------------*/
+#ifdef XNB_DEBUG
+static void __unused
+xnb_dump_gnttab_copy(const struct gnttab_copy *entry)
+{
+ if (entry == NULL) {
+ printf("NULL grant table pointer\n");
+ return;
}
- ifp->if_softc = netif;
- if_initname(ifp, "vif",
- atomic_fetchadd_int(&vif_unit_maker, 1) /* ifno */ );
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX;
- ifp->if_output = ether_output;
- ifp->if_start = netback_start;
- ifp->if_ioctl = netback_ioctl;
- ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1;
-
- DPRINTF("Created %s for domid=%d handle=%d\n", IFNAME(netif), netif->domid, netif->handle);
+ if (entry->flags & GNTCOPY_dest_gref)
+ printf("gnttab dest ref=\t%u\n", entry->dest.u.ref);
+ else
+ printf("gnttab dest gmfn=\t%lu\n", entry->dest.u.gmfn);
+ printf("gnttab dest offset=\t%hu\n", entry->dest.offset);
+ printf("gnttab dest domid=\t%hu\n", entry->dest.domid);
+ if (entry->flags & GNTCOPY_source_gref)
+ printf("gnttab source ref=\t%u\n", entry->source.u.ref);
+ else
+ printf("gnttab source gmfn=\t%lu\n", entry->source.u.gmfn);
+ printf("gnttab source offset=\t%hu\n", entry->source.offset);
+ printf("gnttab source domid=\t%hu\n", entry->source.domid);
+ printf("gnttab len=\t%hu\n", entry->len);
+ printf("gnttab flags=\t%hu\n", entry->flags);
+ printf("gnttab status=\t%hd\n", entry->status);
+}
- return 0;
+static int
+xnb_dump_rings(SYSCTL_HANDLER_ARGS)
+{
+ static char results[720];
+ struct xnb_softc const* xnb = (struct xnb_softc*)arg1;
+ netif_rx_back_ring_t const* rxb =
+ &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring;
+ netif_tx_back_ring_t const* txb =
+ &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring;
+
+ /* empty the result strings */
+ results[0] = 0;
+
+ if ( !txb || !txb->sring || !rxb || !rxb->sring )
+ return (SYSCTL_OUT(req, results, strnlen(results, 720)));
+
+ snprintf(results, 720,
+ "\n\t%35s %18s\n" /* TX, RX */
+ "\t%16s %18d %18d\n" /* req_cons */
+ "\t%16s %18d %18d\n" /* nr_ents */
+ "\t%16s %18d %18d\n" /* rsp_prod_pvt */
+ "\t%16s %18p %18p\n" /* sring */
+ "\t%16s %18d %18d\n" /* req_prod */
+ "\t%16s %18d %18d\n" /* req_event */
+ "\t%16s %18d %18d\n" /* rsp_prod */
+ "\t%16s %18d %18d\n", /* rsp_event */
+ "TX", "RX",
+ "req_cons", txb->req_cons, rxb->req_cons,
+ "nr_ents", txb->nr_ents, rxb->nr_ents,
+ "rsp_prod_pvt", txb->rsp_prod_pvt, rxb->rsp_prod_pvt,
+ "sring", txb->sring, rxb->sring,
+ "sring->req_prod", txb->sring->req_prod, rxb->sring->req_prod,
+ "sring->req_event", txb->sring->req_event, rxb->sring->req_event,
+ "sring->rsp_prod", txb->sring->rsp_prod, rxb->sring->rsp_prod,
+ "sring->rsp_event", txb->sring->rsp_event, rxb->sring->rsp_event);
+
+ return (SYSCTL_OUT(req, results, strnlen(results, 720)));
}
-static void
-netif_get(netif_t *netif)
+static void __unused
+xnb_dump_mbuf(const struct mbuf *m)
{
- atomic_add_int(&netif->ref_cnt, 1);
+ int len;
+ uint8_t *d;
+ if (m == NULL)
+ return;
+
+ printf("xnb_dump_mbuf:\n");
+ if (m->m_flags & M_PKTHDR) {
+ printf(" flowid=%10d, csum_flags=%#8x, csum_data=%#8x, "
+ "tso_segsz=%5hd\n",
+ m->m_pkthdr.flowid, m->m_pkthdr.csum_flags,
+ m->m_pkthdr.csum_data, m->m_pkthdr.tso_segsz);
+ printf(" rcvif=%16p, header=%18p, len=%19d\n",
+ m->m_pkthdr.rcvif, m->m_pkthdr.header, m->m_pkthdr.len);
+ }
+ printf(" m_next=%16p, m_nextpk=%16p, m_data=%16p\n",
+ m->m_next, m->m_nextpkt, m->m_data);
+ printf(" m_len=%17d, m_flags=%#15x, m_type=%18hd\n",
+ m->m_len, m->m_flags, m->m_type);
+
+ len = m->m_len;
+ d = mtod(m, uint8_t*);
+ while (len > 0) {
+ int i;
+ printf(" ");
+ for (i = 0; (i < 16) && (len > 0); i++, len--) {
+ printf("%02hhx ", *(d++));
+ }
+ printf("\n");
+ }
}
+#endif /* XNB_DEBUG */
+/*------------------------ Inter-Domain Communication ------------------------*/
+/**
+ * Free dynamically allocated KVA or pseudo-physical address allocations.
+ *
+ * \param xnb Per-instance xnb configuration structure.
+ */
static void
-netif_put(netif_t *netif)
+xnb_free_communication_mem(struct xnb_softc *xnb)
{
- if (atomic_fetchadd_int(&netif->ref_cnt, -1) == 1) {
- DPRINTF("%s\n", IFNAME(netif));
- disconnect_rings(netif);
- if (netif->ifp) {
- if_free(netif->ifp);
- netif->ifp = NULL;
+ if (xnb->kva != 0) {
+#ifndef XENHVM
+ kmem_free(kernel_map, xnb->kva, xnb->kva_size);
+#else
+ if (xnb->pseudo_phys_res != NULL) {
+ bus_release_resource(xnb->dev, SYS_RES_MEMORY,
+ xnb->pseudo_phys_res_id,
+ xnb->pseudo_phys_res);
+ xnb->pseudo_phys_res = NULL;
}
- if (netif->bridge)
- free(netif->bridge, M_DEVBUF);
- free(netif, M_DEVBUF);
+#endif /* XENHVM */
}
+ xnb->kva = 0;
+ xnb->gnt_base_addr = 0;
}
+/**
+ * Cleanup all inter-domain communication mechanisms.
+ *
+ * \param xnb Per-instance xnb configuration structure.
+ */
static int
-netback_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+xnb_disconnect(struct xnb_softc *xnb)
{
- switch (cmd) {
- case SIOCSIFFLAGS:
- DDPRINTF("%s cmd=SIOCSIFFLAGS flags=%x\n",
- IFNAME((struct netback_info *)ifp->if_softc), ((struct ifreq *)data)->ifr_flags);
- return 0;
+ struct gnttab_unmap_grant_ref gnts[XNB_NUM_RING_TYPES];
+ int error;
+ int i;
+
+ if (xnb->irq != 0) {
+ unbind_from_irqhandler(xnb->irq);
+ xnb->irq = 0;
}
- DDPRINTF("%s cmd=%lx\n", IFNAME((struct netback_info *)ifp->if_softc), cmd);
+ /*
+ * We may still have another thread currently processing requests. We
+ * must acquire the rx and tx locks to make sure those threads are done,
+ * but we can release those locks as soon as we acquire them, because no
+ * more interrupts will be arriving.
+ */
+ mtx_lock(&xnb->tx_lock);
+ mtx_unlock(&xnb->tx_lock);
+ mtx_lock(&xnb->rx_lock);
+ mtx_unlock(&xnb->rx_lock);
+
+ /* Free malloc'd softc member variables */
+ if (xnb->bridge != NULL)
+ free(xnb->bridge, M_XENSTORE);
+
+ /* All request processing has stopped, so unmap the rings */
+ for (i=0; i < XNB_NUM_RING_TYPES; i++) {
+ gnts[i].host_addr = xnb->ring_configs[i].gnt_addr;
+ gnts[i].dev_bus_addr = xnb->ring_configs[i].bus_addr;
+ gnts[i].handle = xnb->ring_configs[i].handle;
+ }
+ error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, gnts,
+ XNB_NUM_RING_TYPES);
+ KASSERT(error == 0, ("Grant table unmap op failed (%d)", error));
- return ether_ioctl(ifp, cmd, data);
-}
+ xnb_free_communication_mem(xnb);
+ /*
+ * Zero the ring config structs because the pointers, handles, and
+ * grant refs contained therein are no longer valid.
+ */
+ bzero(&xnb->ring_configs[XNB_RING_TYPE_TX],
+ sizeof(struct xnb_ring_config));
+ bzero(&xnb->ring_configs[XNB_RING_TYPE_RX],
+ sizeof(struct xnb_ring_config));
-static inline void
-maybe_schedule_tx_action(void)
-{
- smp_mb();
- if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) && !STAILQ_EMPTY(&tx_sched_list))
- taskqueue_enqueue(taskqueue_swi, &net_tx_task);
+ xnb->flags &= ~XNBF_RING_CONNECTED;
+ return (0);
}
-/* Removes netif from front of list and does not call netif_put() (caller must) */
-static netif_t *
-remove_from_tx_schedule_list(void)
+/**
+ * Map a single shared memory ring into domain local address space and
+ * initialize its control structure
+ *
+ * \param xnb Per-instance xnb configuration structure
+ * \param ring_type Array index of this ring in the xnb's array of rings
+ * \return An errno
+ */
+static int
+xnb_connect_ring(struct xnb_softc *xnb, xnb_ring_type_t ring_type)
{
- netif_t *netif;
+ struct gnttab_map_grant_ref gnt;
+ struct xnb_ring_config *ring = &xnb->ring_configs[ring_type];
+ int error;
- mtx_lock(&tx_sched_list_lock);
-
- if ((netif = STAILQ_FIRST(&tx_sched_list))) {
- STAILQ_REMOVE(&tx_sched_list, netif, netback_info, next_tx);
- STAILQ_NEXT(netif, next_tx) = NULL;
- netif->on_tx_sched_list = 0;
- }
+ /* TX ring type = 0, RX =1 */
+ ring->va = xnb->kva + ring_type * PAGE_SIZE;
+ ring->gnt_addr = xnb->gnt_base_addr + ring_type * PAGE_SIZE;
- mtx_unlock(&tx_sched_list_lock);
+ gnt.host_addr = ring->gnt_addr;
+ gnt.flags = GNTMAP_host_map;
+ gnt.ref = ring->ring_ref;
+ gnt.dom = xnb->otherend_id;
- return netif;
-}
-
-/* Adds netif to end of list and calls netif_get() */
-static void
-add_to_tx_schedule_list_tail(netif_t *netif)
-{
- if (netif->on_tx_sched_list)
- return;
+ error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &gnt, 1);
+ if (error != 0)
+ panic("netback: Ring page grant table op failed (%d)", error);
- mtx_lock(&tx_sched_list_lock);
- if (!netif->on_tx_sched_list && (netif->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- netif_get(netif);
- STAILQ_INSERT_TAIL(&tx_sched_list, netif, next_tx);
- netif->on_tx_sched_list = 1;
+ if (gnt.status != 0) {
+ ring->va = 0;
+ error = EACCES;
+ xenbus_dev_fatal(xnb->dev, error,
+ "Ring shared page mapping failed. "
+ "Status %d.", gnt.status);
+ } else {
+ ring->handle = gnt.handle;
+ ring->bus_addr = gnt.dev_bus_addr;
+
+ if (ring_type == XNB_RING_TYPE_TX) {
+ BACK_RING_INIT(&ring->back_ring.tx_ring,
+ (netif_tx_sring_t*)ring->va,
+ ring->ring_pages * PAGE_SIZE);
+ } else if (ring_type == XNB_RING_TYPE_RX) {
+ BACK_RING_INIT(&ring->back_ring.rx_ring,
+ (netif_rx_sring_t*)ring->va,
+ ring->ring_pages * PAGE_SIZE);
+ } else {
+ xenbus_dev_fatal(xnb->dev, error,
+ "Unknown ring type %d", ring_type);
+ }
}
- mtx_unlock(&tx_sched_list_lock);
+
+ return error;
}
-/*
- * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
- * If this driver is pipelining transmit requests then we can be very
- * aggressive in avoiding new-packet notifications -- frontend only needs to
- * send a notification if there are no outstanding unreceived responses.
- * If we may be buffer transmit buffers for any reason then we must be rather
- * more conservative and treat this as the final check for pending work.
+/**
+ * Setup the shared memory rings and bind an interrupt to the event channel
+ * used to notify us of ring changes.
+ *
+ * \param xnb Per-instance xnb configuration structure.
*/
-static void
-netif_schedule_tx_work(netif_t *netif)
+static int
+xnb_connect_comms(struct xnb_softc *xnb)
{
- int more_to_do;
+ int error;
+ xnb_ring_type_t i;
-#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
- more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
-#else
- RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
-#endif
+ if ((xnb->flags & XNBF_RING_CONNECTED) != 0)
+ return (0);
- if (more_to_do) {
- DDPRINTF("Adding %s to tx sched list\n", IFNAME(netif));
- add_to_tx_schedule_list_tail(netif);
- maybe_schedule_tx_action();
+ /*
+ * Kva for our rings are at the tail of the region of kva allocated
+ * by xnb_alloc_communication_mem().
+ */
+ for (i=0; i < XNB_NUM_RING_TYPES; i++) {
+ error = xnb_connect_ring(xnb, i);
+ if (error != 0)
+ return error;
}
-}
-static struct mtx dealloc_lock;
-MTX_SYSINIT(netback_dealloc, &dealloc_lock, "DEALLOC LOCK", MTX_SPIN | MTX_NOWITNESS);
+ xnb->flags |= XNBF_RING_CONNECTED;
+
+ error =
+ bind_interdomain_evtchn_to_irqhandler(xnb->otherend_id,
+ xnb->evtchn,
+ device_get_nameunit(xnb->dev),
+ xnb_intr, /*arg*/xnb,
+ INTR_TYPE_BIO | INTR_MPSAFE,
+ &xnb->irq);
+ if (error != 0) {
+ (void)xnb_disconnect(xnb);
+ xenbus_dev_fatal(xnb->dev, error, "binding event channel");
+ return (error);
+ }
-static void
-netif_idx_release(uint16_t pending_idx)
-{
- mtx_lock_spin(&dealloc_lock);
- dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
- mtx_unlock_spin(&dealloc_lock);
+ DPRINTF("rings connected!\n");
- taskqueue_enqueue(taskqueue_swi, &net_tx_task);
+ return (0);
}
-static void
-make_tx_response(netif_t *netif,
- uint16_t id,
- int8_t st)
+/**
+ * Size KVA and pseudo-physical address allocations based on negotiated
+ * values for the size and number of I/O requests, and the size of our
+ * communication ring.
+ *
+ * \param xnb Per-instance xnb configuration structure.
+ *
+ * These address spaces are used to dynamically map pages in the
+ * front-end's domain into our own.
+ */
+static int
+xnb_alloc_communication_mem(struct xnb_softc *xnb)
{
- RING_IDX i = netif->tx.rsp_prod_pvt;
- netif_tx_response_t *resp;
- int notify;
-
- resp = RING_GET_RESPONSE(&netif->tx, i);
- resp->id = id;
- resp->status = st;
-
- netif->tx.rsp_prod_pvt = ++i;
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
- if (notify)
- notify_remote_via_irq(netif->irq);
-
-#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
- if (i == netif->tx.req_cons) {
- int more_to_do;
- RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
- if (more_to_do)
- add_to_tx_schedule_list_tail(netif);
+ xnb_ring_type_t i;
+
+ xnb->kva_size = 0;
+ for (i=0; i < XNB_NUM_RING_TYPES; i++) {
+ xnb->kva_size += xnb->ring_configs[i].ring_pages * PAGE_SIZE;
}
-#endif
+#ifndef XENHVM
+ xnb->kva = kmem_alloc_nofault(kernel_map, xnb->kva_size);
+ if (xnb->kva == 0)
+ return (ENOMEM);
+ xnb->gnt_base_addr = xnb->kva;
+#else /* defined XENHVM */
+ /*
+ * Reserve a range of pseudo physical memory that we can map
+ * into kva. These pages will only be backed by machine
+ * pages ("real memory") during the lifetime of front-end requests
+ * via grant table operations. We will map the netif tx and rx rings
+ * into this space.
+ */
+ xnb->pseudo_phys_res_id = 0;
+ xnb->pseudo_phys_res = bus_alloc_resource(xnb->dev, SYS_RES_MEMORY,
+ &xnb->pseudo_phys_res_id,
+ 0, ~0, xnb->kva_size,
+ RF_ACTIVE);
+ if (xnb->pseudo_phys_res == NULL) {
+ xnb->kva = 0;
+ return (ENOMEM);
+ }
+ xnb->kva = (vm_offset_t)rman_get_virtual(xnb->pseudo_phys_res);
+ xnb->gnt_base_addr = rman_get_start(xnb->pseudo_phys_res);
+#endif /* !defined XENHVM */
+ return (0);
}
-static inline void
-net_tx_action_dealloc(void)
+/**
+ * Collect information from the XenStore related to our device and its frontend
+ *
+ * \param xnb Per-instance xnb configuration structure.
+ */
+static int
+xnb_collect_xenstore_info(struct xnb_softc *xnb)
{
- gnttab_unmap_grant_ref_t *gop;
- uint16_t pending_idx;
- PEND_RING_IDX dc, dp;
- netif_t *netif;
- int ret;
+ /**
+ * \todo Linux collects the following info. We should collect most
+ * of this, too:
+ * "feature-rx-notify"
+ */
+ const char *otherend_path;
+ const char *our_path;
+ int err;
+ unsigned int rx_copy, bridge_len;
+ uint8_t no_csum_offload;
+
+ otherend_path = xenbus_get_otherend_path(xnb->dev);
+ our_path = xenbus_get_node(xnb->dev);
+
+ /* Collect the critical communication parameters */
+ err = xs_gather(XST_NIL, otherend_path,
+ "tx-ring-ref", "%l" PRIu32,
+ &xnb->ring_configs[XNB_RING_TYPE_TX].ring_ref,
+ "rx-ring-ref", "%l" PRIu32,
+ &xnb->ring_configs[XNB_RING_TYPE_RX].ring_ref,
+ "event-channel", "%" PRIu32, &xnb->evtchn,
+ NULL);
+ if (err != 0) {
+ xenbus_dev_fatal(xnb->dev, err,
+ "Unable to retrieve ring information from "
+ "frontend %s. Unable to connect.",
+ otherend_path);
+ return (err);
+ }
- dc = dealloc_cons;
- dp = dealloc_prod;
+ /* Collect the handle from xenstore */
+ err = xs_scanf(XST_NIL, our_path, "handle", NULL, "%li", &xnb->handle);
+ if (err != 0) {
+ xenbus_dev_fatal(xnb->dev, err,
+ "Error reading handle from frontend %s. "
+ "Unable to connect.", otherend_path);
+ }
/*
- * Free up any grants we have finished using
+ * Collect the bridgename, if any. We do not need bridge_len; we just
+ * throw it away
*/
- gop = tx_unmap_ops;
- while (dc != dp) {
- pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
- gop->host_addr = MMAP_VADDR(pending_idx);
- gop->dev_bus_addr = 0;
- gop->handle = grant_tx_handle[pending_idx];
- gop++;
- }
- ret = HYPERVISOR_grant_table_op(
- GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
- BUG_ON(ret);
+ err = xs_read(XST_NIL, our_path, "bridge", &bridge_len,
+ (void**)&xnb->bridge);
+ if (err != 0)
+ xnb->bridge = NULL;
- while (dealloc_cons != dp) {
- pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
+ /*
+ * Does the frontend request that we use rx copy? If not, return an
+ * error because this driver only supports rx copy.
+ */
+ err = xs_scanf(XST_NIL, otherend_path, "request-rx-copy", NULL,
+ "%" PRIu32, &rx_copy);
+ if (err == ENOENT) {
+ err = 0;
+ rx_copy = 0;
+ }
+ if (err < 0) {
+ xenbus_dev_fatal(xnb->dev, err, "reading %s/request-rx-copy",
+ otherend_path);
+ return err;
+ }
+ /**
+ * \todo: figure out the exact meaning of this feature, and when
+ * the frontend will set it to true. It should be set to true
+ * at some point
+ */
+/* if (!rx_copy)*/
+/* return EOPNOTSUPP;*/
- netif = pending_tx_info[pending_idx].netif;
+ /** \todo Collect the rx notify feature */
- make_tx_response(netif, pending_tx_info[pending_idx].req.id,
- NETIF_RSP_OKAY);
-
- pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+ /* Collect the feature-sg. */
+ if (xs_scanf(XST_NIL, otherend_path, "feature-sg", NULL,
+ "%hhu", &xnb->can_sg) < 0)
+ xnb->can_sg = 0;
- netif_put(netif);
- }
-}
+ /* Collect remaining frontend features */
+ if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4", NULL,
+ "%hhu", &xnb->gso) < 0)
+ xnb->gso = 0;
-static void
-netif_page_release(void *buf, void *args)
-{
- uint16_t pending_idx = (unsigned int)args;
-
- DDPRINTF("pending_idx=%u\n", pending_idx);
+ if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4-prefix", NULL,
+ "%hhu", &xnb->gso_prefix) < 0)
+ xnb->gso_prefix = 0;
- KASSERT(pending_idx < MAX_PENDING_REQS, ("%s: bad index %u", __func__, pending_idx));
+ if (xs_scanf(XST_NIL, otherend_path, "feature-no-csum-offload", NULL,
+ "%hhu", &no_csum_offload) < 0)
+ no_csum_offload = 0;
+ xnb->ip_csum = (no_csum_offload == 0);
- netif_idx_release(pending_idx);
+ return (0);
}
-static void
-net_tx_action(void *context, int pending)
+/**
+ * Supply information about the physical device to the frontend
+ * via XenBus.
+ *
+ * \param xnb Per-instance xnb configuration structure.
+ */
+static int
+xnb_publish_backend_info(struct xnb_softc *xnb)
{
- struct mbuf *m;
- netif_t *netif;
- netif_tx_request_t txreq;
- uint16_t pending_idx;
- RING_IDX i;
- gnttab_map_grant_ref_t *mop;
- int ret, work_to_do;
- struct mbuf *txq = NULL, *txq_last = NULL;
-
- if (dealloc_cons != dealloc_prod)
- net_tx_action_dealloc();
-
- mop = tx_map_ops;
- while ((NR_PENDING_REQS < MAX_PENDING_REQS) && !STAILQ_EMPTY(&tx_sched_list)) {
-
- /* Get a netif from the list with work to do. */
- netif = remove_from_tx_schedule_list();
-
- DDPRINTF("Processing %s (prod=%u, cons=%u)\n",
- IFNAME(netif), netif->tx.sring->req_prod, netif->tx.req_cons);
-
- RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
- if (!work_to_do) {
- netif_put(netif);
- continue;
+ struct xs_transaction xst;
+ const char *our_path;
+ int error;
+
+ our_path = xenbus_get_node(xnb->dev);
+
+ do {
+ error = xs_transaction_start(&xst);
+ if (error != 0) {
+ xenbus_dev_fatal(xnb->dev, error,
+ "Error publishing backend info "
+ "(start transaction)");
+ break;
}
- i = netif->tx.req_cons;
- rmb(); /* Ensure that we see the request before we copy it. */
- memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
+ error = xs_printf(xst, our_path, "feature-sg",
+ "%d", XNB_SG);
+ if (error != 0)
+ break;
- /* If we want credit-based scheduling, coud add it here - WORK */
+ error = xs_printf(xst, our_path, "feature-gso-tcpv4",
+ "%d", XNB_GSO_TCPV4);
+ if (error != 0)
+ break;
- netif->tx.req_cons++;
+ error = xs_printf(xst, our_path, "feature-rx-copy",
+ "%d", XNB_RX_COPY);
+ if (error != 0)
+ break;
- netif_schedule_tx_work(netif);
+ error = xs_printf(xst, our_path, "feature-rx-flip",
+ "%d", XNB_RX_FLIP);
+ if (error != 0)
+ break;
- if (unlikely(txreq.size < ETHER_HDR_LEN) ||
- unlikely(txreq.size > (ETHER_MAX_LEN-ETHER_CRC_LEN))) {
- WPRINTF("Bad packet size: %d\n", txreq.size);
- make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
- netif_put(netif);
- continue;
+ error = xs_transaction_end(xst, 0);
+ if (error != 0 && error != EAGAIN) {
+ xenbus_dev_fatal(xnb->dev, error, "ending transaction");
+ break;
}
- /* No crossing a page as the payload mustn't fragment. */
- if (unlikely((txreq.offset + txreq.size) >= PAGE_SIZE)) {
- WPRINTF("txreq.offset: %x, size: %u, end: %u\n",
- txreq.offset, txreq.size,
- (txreq.offset & PAGE_MASK) + txreq.size);
- make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
- netif_put(netif);
- continue;
- }
+ } while (error == EAGAIN);
- pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+ return (error);
+}
- MGETHDR(m, M_DONTWAIT, MT_DATA);
- if (!m) {
- WPRINTF("Failed to allocate mbuf\n");
- make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
- netif_put(netif);
- break;
- }
- m->m_pkthdr.rcvif = netif->ifp;
-
- if ((m->m_pkthdr.len = txreq.size) > PKT_PROT_LEN) {
- struct mbuf *n;
- MGET(n, M_DONTWAIT, MT_DATA);
- if (!(m->m_next = n)) {
- m_freem(m);
- WPRINTF("Failed to allocate second mbuf\n");
- make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
- netif_put(netif);
- break;
- }
- n->m_len = txreq.size - PKT_PROT_LEN;
- m->m_len = PKT_PROT_LEN;
- } else
- m->m_len = txreq.size;
-
- mop->host_addr = MMAP_VADDR(pending_idx);
- mop->dom = netif->domid;
- mop->ref = txreq.gref;
- mop->flags = GNTMAP_host_map | GNTMAP_readonly;
- mop++;
-
- memcpy(&pending_tx_info[pending_idx].req,
- &txreq, sizeof(txreq));
- pending_tx_info[pending_idx].netif = netif;
- *((uint16_t *)m->m_data) = pending_idx;
-
- if (txq_last)
- txq_last->m_nextpkt = m;
- else
- txq = m;
- txq_last = m;
-
- pending_cons++;
-
- if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
- break;
- }
+/**
+ * Connect to our netfront peer now that it has completed publishing
+ * its configuration into the XenStore.
+ *
+ * \param xnb Per-instance xnb configuration structure.
+ */
+static void
+xnb_connect(struct xnb_softc *xnb)
+{
+ int error;
- if (!txq)
+ if (xenbus_get_state(xnb->dev) == XenbusStateConnected)
return;
- ret = HYPERVISOR_grant_table_op(
- GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
- BUG_ON(ret);
-
- mop = tx_map_ops;
- while ((m = txq) != NULL) {
- caddr_t data;
-
- txq = m->m_nextpkt;
- m->m_nextpkt = NULL;
-
- pending_idx = *((uint16_t *)m->m_data);
- netif = pending_tx_info[pending_idx].netif;
- memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
-
- /* Check the remap error code. */
- if (unlikely(mop->status)) {
- WPRINTF("#### netback grant fails\n");
- make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
- netif_put(netif);
- m_freem(m);
- mop++;
- pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
- continue;
- }
+ if (xnb_collect_xenstore_info(xnb) != 0)
+ return;
-#if 0
- /* Can't do this in FreeBSD since vtophys() returns the pfn */
- /* of the remote domain who loaned us the machine page - DPT */
- xen_phys_machine[(vtophys(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT)] =
- mop->dev_bus_addr >> PAGE_SHIFT;
-#endif
- grant_tx_handle[pending_idx] = mop->handle;
-
- /* Setup data in mbuf (lengths are already set) */
- data = (caddr_t)(MMAP_VADDR(pending_idx)|txreq.offset);
- bcopy(data, m->m_data, m->m_len);
- if (m->m_next) {
- struct mbuf *n = m->m_next;
- MEXTADD(n, MMAP_VADDR(pending_idx), PAGE_SIZE, netif_page_release,
- (void *)(unsigned int)pending_idx, M_RDONLY, EXT_NET_DRV);
- n->m_data = &data[PKT_PROT_LEN];
- } else {
- /* Schedule a response immediately. */
- netif_idx_release(pending_idx);
- }
+ xnb->flags &= ~XNBF_SHUTDOWN;
- if ((txreq.flags & NETTXF_data_validated)) {
- /* Tell the stack the checksums are okay */
- m->m_pkthdr.csum_flags |=
- (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
- m->m_pkthdr.csum_data = 0xffff;
- }
+ /* Read front end configuration. */
- /* If necessary, inform stack to compute the checksums if it forwards the packet */
- if ((txreq.flags & NETTXF_csum_blank)) {
- struct ether_header *eh = mtod(m, struct ether_header *);
- if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
- struct ip *ip = (struct ip *)&m->m_data[14];
- if (ip->ip_p == IPPROTO_TCP)
- m->m_pkthdr.csum_flags |= CSUM_TCP;
- else if (ip->ip_p == IPPROTO_UDP)
- m->m_pkthdr.csum_flags |= CSUM_UDP;
- }
- }
+ /* Allocate resources whose size depends on front-end configuration. */
+ error = xnb_alloc_communication_mem(xnb);
+ if (error != 0) {
+ xenbus_dev_fatal(xnb->dev, error,
+ "Unable to allocate communication memory");
+ return;
+ }
- netif->ifp->if_ibytes += m->m_pkthdr.len;
- netif->ifp->if_ipackets++;
+ /*
+ * Connect communication channel.
+ */
+ error = xnb_connect_comms(xnb);
+ if (error != 0) {
+ /* Specific errors are reported by xnb_connect_comms(). */
+ return;
+ }
+ xnb->carrier = 1;
+
+ /* Ready for I/O. */
+ xenbus_set_state(xnb->dev, XenbusStateConnected);
+}
+
+/*-------------------------- Device Teardown Support -------------------------*/
+/**
+ * Perform device shutdown functions.
+ *
+ * \param xnb Per-instance xnb configuration structure.
+ *
+ * Mark this instance as shutting down, wait for any active requests
+ * to drain, disconnect from the front-end, and notify any waiters (e.g.
+ * a thread invoking our detach method) that detach can now proceed.
+ */
+static int
+xnb_shutdown(struct xnb_softc *xnb)
+{
+ /*
+ * Due to the need to drop our mutex during some
+ * xenbus operations, it is possible for two threads
+ * to attempt to close out shutdown processing at
+ * the same time. Tell the caller that hits this
+ * race to try back later.
+ */
+ if ((xnb->flags & XNBF_IN_SHUTDOWN) != 0)
+ return (EAGAIN);
- DDPRINTF("RECV %d bytes from %s (cflags=%x)\n",
- m->m_pkthdr.len, IFNAME(netif), m->m_pkthdr.csum_flags);
- DPRINTF_MBUF_LEN(m, 128);
+ xnb->flags |= XNBF_SHUTDOWN;
- (*netif->ifp->if_input)(netif->ifp, m);
+ xnb->flags |= XNBF_IN_SHUTDOWN;
- mop++;
+ mtx_unlock(&xnb->sc_lock);
+ /* Free the network interface */
+ xnb->carrier = 0;
+ if (xnb->xnb_ifp != NULL) {
+ ether_ifdetach(xnb->xnb_ifp);
+ if_free(xnb->xnb_ifp);
+ xnb->xnb_ifp = NULL;
}
+ mtx_lock(&xnb->sc_lock);
+
+ xnb_disconnect(xnb);
+
+ mtx_unlock(&xnb->sc_lock);
+ if (xenbus_get_state(xnb->dev) < XenbusStateClosing)
+ xenbus_set_state(xnb->dev, XenbusStateClosing);
+ mtx_lock(&xnb->sc_lock);
+
+ xnb->flags &= ~XNBF_IN_SHUTDOWN;
+
+
+ /* Indicate to xnb_detach() that is it safe to proceed. */
+ wakeup(xnb);
+
+ return (0);
}
-/* Handle interrupt from a frontend */
+/**
+ * Report an attach time error to the console and Xen, and cleanup
+ * this instance by forcing immediate detach processing.
+ *
+ * \param xnb Per-instance xnb configuration structure.
+ * \param err Errno describing the error.
+ * \param fmt Printf style format and arguments
+ */
static void
-netback_intr(void *arg)
+xnb_attach_failed(struct xnb_softc *xnb, int err, const char *fmt, ...)
{
- netif_t *netif = arg;
- DDPRINTF("%s\n", IFNAME(netif));
- add_to_tx_schedule_list_tail(netif);
- maybe_schedule_tx_action();
+ va_list ap;
+ va_list ap_hotplug;
+
+ va_start(ap, fmt);
+ va_copy(ap_hotplug, ap);
+ xs_vprintf(XST_NIL, xenbus_get_node(xnb->dev),
+ "hotplug-error", fmt, ap_hotplug);
+ va_end(ap_hotplug);
+ xs_printf(XST_NIL, xenbus_get_node(xnb->dev),
+ "hotplug-status", "error");
+
+ xenbus_dev_vfatal(xnb->dev, err, fmt, ap);
+ va_end(ap);
+
+ xs_printf(XST_NIL, xenbus_get_node(xnb->dev),
+ "online", "0");
+ xnb_detach(xnb->dev);
}
-/* Removes netif from front of list and does not call netif_put() (caller must) */
-static netif_t *
-remove_from_rx_schedule_list(void)
+/*---------------------------- NewBus Entrypoints ----------------------------*/
+/**
+ * Inspect a XenBus device and claim it if is of the appropriate type.
+ *
+ * \param dev NewBus device object representing a candidate XenBus device.
+ *
+ * \return 0 for success, errno codes for failure.
+ */
+static int
+xnb_probe(device_t dev)
{
- netif_t *netif;
-
- mtx_lock(&rx_sched_list_lock);
-
- if ((netif = STAILQ_FIRST(&rx_sched_list))) {
- STAILQ_REMOVE(&rx_sched_list, netif, netback_info, next_rx);
- STAILQ_NEXT(netif, next_rx) = NULL;
- netif->on_rx_sched_list = 0;
+ if (!strcmp(xenbus_get_type(dev), "vif")) {
+ DPRINTF("Claiming device %d, %s\n", device_get_unit(dev),
+ devclass_get_name(device_get_devclass(dev)));
+ device_set_desc(dev, "Backend Virtual Network Device");
+ device_quiet(dev);
+ return (0);
}
-
- mtx_unlock(&rx_sched_list_lock);
-
- return netif;
+ return (ENXIO);
}
-/* Adds netif to end of list and calls netif_get() */
+/**
+ * Setup sysctl variables to control various Network Back parameters.
+ *
+ * \param xnb Xen Net Back softc.
+ *
+ */
static void
-add_to_rx_schedule_list_tail(netif_t *netif)
+xnb_setup_sysctl(struct xnb_softc *xnb)
{
- if (netif->on_rx_sched_list)
+ struct sysctl_ctx_list *sysctl_ctx = NULL;
+ struct sysctl_oid *sysctl_tree = NULL;
+
+ sysctl_ctx = device_get_sysctl_ctx(xnb->dev);
+ if (sysctl_ctx == NULL)
return;
- mtx_lock(&rx_sched_list_lock);
- if (!netif->on_rx_sched_list && (netif->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- netif_get(netif);
- STAILQ_INSERT_TAIL(&rx_sched_list, netif, next_rx);
- netif->on_rx_sched_list = 1;
- }
- mtx_unlock(&rx_sched_list_lock);
+ sysctl_tree = device_get_sysctl_tree(xnb->dev);
+ if (sysctl_tree == NULL)
+ return;
+
+#ifdef XNB_DEBUG
+ SYSCTL_ADD_PROC(sysctl_ctx,
+ SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO,
+ "unit_test_results",
+ CTLTYPE_STRING | CTLFLAG_RD,
+ xnb,
+ 0,
+ xnb_unit_test_main,
+ "A",
+ "Results of builtin unit tests");
+
+ SYSCTL_ADD_PROC(sysctl_ctx,
+ SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO,
+ "dump_rings",
+ CTLTYPE_STRING | CTLFLAG_RD,
+ xnb,
+ 0,
+ xnb_dump_rings,
+ "A",
+ "Xennet Back Rings");
+#endif /* XNB_DEBUG */
}
-static int
-make_rx_response(netif_t *netif, uint16_t id, int8_t st,
- uint16_t offset, uint16_t size, uint16_t flags)
+/**
+ * Create a network device.
+ * @param handle device handle
+ */
+int
+create_netdev(device_t dev)
{
- RING_IDX i = netif->rx.rsp_prod_pvt;
- netif_rx_response_t *resp;
- int notify;
+ struct ifnet *ifp;
+ struct xnb_softc *xnb;
+ int err = 0;
- resp = RING_GET_RESPONSE(&netif->rx, i);
- resp->offset = offset;
- resp->flags = flags;
- resp->id = id;
- resp->status = (int16_t)size;
- if (st < 0)
- resp->status = (int16_t)st;
+ xnb = device_get_softc(dev);
+ mtx_init(&xnb->sc_lock, "xnb_softc", "xen netback softc lock", MTX_DEF);
+ mtx_init(&xnb->tx_lock, "xnb_tx", "xen netback tx lock", MTX_DEF);
+ mtx_init(&xnb->rx_lock, "xnb_rx", "xen netback rx lock", MTX_DEF);
+
+ xnb->dev = dev;
+
+ ifmedia_init(&xnb->sc_media, 0, xnb_ifmedia_upd, xnb_ifmedia_sts);
+ ifmedia_add(&xnb->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
+ ifmedia_set(&xnb->sc_media, IFM_ETHER|IFM_MANUAL);
+
+ err = xen_net_read_mac(dev, xnb->mac);
+ if (err == 0) {
+ /* Set up ifnet structure */
+ ifp = xnb->xnb_ifp = if_alloc(IFT_ETHER);
+ ifp->if_softc = xnb;
+ if_initname(ifp, "xnb", device_get_unit(dev));
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = xnb_ioctl;
+ ifp->if_output = ether_output;
+ ifp->if_start = xnb_start;
+#ifdef notyet
+ ifp->if_watchdog = xnb_watchdog;
+#endif
+ ifp->if_init = xnb_ifinit;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_snd.ifq_maxlen = NET_RX_RING_SIZE - 1;
- DDPRINTF("rx resp(%d): off=%x fl=%x id=%x stat=%d\n",
- i, resp->offset, resp->flags, resp->id, resp->status);
+ ifp->if_hwassist = XNB_CSUM_FEATURES;
+ ifp->if_capabilities = IFCAP_HWCSUM;
+ ifp->if_capenable = IFCAP_HWCSUM;
- netif->rx.rsp_prod_pvt = ++i;
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, notify);
+ ether_ifattach(ifp, xnb->mac);
+ xnb->carrier = 0;
+ }
- return notify;
+ return err;
}
+/**
+ * Attach to a XenBus device that has been claimed by our probe routine.
+ *
+ * \param dev NewBus device object representing this Xen Net Back instance.
+ *
+ * \return 0 for success, errno codes for failure.
+ */
static int
-netif_rx(netif_t *netif)
+xnb_attach(device_t dev)
{
- struct ifnet *ifp = netif->ifp;
- struct mbuf *m;
- multicall_entry_t *mcl;
- mmu_update_t *mmu;
- gnttab_transfer_t *gop;
- unsigned long vdata, old_mfn, new_mfn;
- struct mbuf *rxq = NULL, *rxq_last = NULL;
- int ret, notify = 0, pkts_dequeued = 0;
+ struct xnb_softc *xnb;
+ int error;
+ xnb_ring_type_t i;
+
+ error = create_netdev(dev);
+ if (error != 0) {
+ xenbus_dev_fatal(dev, error, "creating netdev");
+ return (error);
+ }
- DDPRINTF("%s\n", IFNAME(netif));
+ DPRINTF("Attaching to %s\n", xenbus_get_node(dev));
- mcl = rx_mcl;
- mmu = rx_mmu;
- gop = grant_rx_op;
+ /*
+ * Basic initialization.
+ * After this block it is safe to call xnb_detach()
+ * to clean up any allocated data for this instance.
+ */
+ xnb = device_get_softc(dev);
+ xnb->otherend_id = xenbus_get_otherend_id(dev);
+ for (i=0; i < XNB_NUM_RING_TYPES; i++) {
+ xnb->ring_configs[i].ring_pages = 1;
+ }
- while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
-
- /* Quit if the target domain has no receive buffers */
- if (netif->rx.req_cons == netif->rx.sring->req_prod)
- break;
+ /*
+ * Setup sysctl variables.
+ */
+ xnb_setup_sysctl(xnb);
+
+ /* Update hot-plug status to satisfy xend. */
+ error = xs_printf(XST_NIL, xenbus_get_node(xnb->dev),
+ "hotplug-status", "connected");
+ if (error != 0) {
+ xnb_attach_failed(xnb, error, "writing %s/hotplug-status",
+ xenbus_get_node(xnb->dev));
+ return (error);
+ }
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
+ if ((error = xnb_publish_backend_info(xnb)) != 0) {
+ /*
+ * If we can't publish our data, we cannot participate
+ * in this connection, and waiting for a front-end state
+ * change will not help the situation.
+ */
+ xnb_attach_failed(xnb, error,
+ "Publishing backend status for %s",
+ xenbus_get_node(xnb->dev));
+ return error;
+ }
- pkts_dequeued++;
-
- /* Check if we need to copy the data */
- if (((m->m_flags & (M_RDONLY|M_EXT)) != M_EXT) ||
- (*m->m_ext.ref_cnt > 1) || m->m_next != NULL) {
- struct mbuf *n;
-
- DDPRINTF("copying mbuf (fl=%x ext=%x rc=%d n=%x)\n",
- m->m_flags,
- (m->m_flags & M_EXT) ? m->m_ext.ext_type : 0,
- (m->m_flags & M_EXT) ? *m->m_ext.ref_cnt : 0,
- (unsigned int)m->m_next);
-
- /* Make copy */
- MGETHDR(n, M_DONTWAIT, MT_DATA);
- if (!n)
- goto drop;
-
- MCLGET(n, M_DONTWAIT);
- if (!(n->m_flags & M_EXT)) {
- m_freem(n);
- goto drop;
- }
+ /* Tell the front end that we are ready to connect. */
+ xenbus_set_state(dev, XenbusStateInitWait);
+
+ return (0);
+}
- /* Leave space at front and keep current alignment */
- n->m_data += 16 + ((unsigned int)m->m_data & 0x3);
+/**
+ * Detach from a net back device instance.
+ *
+ * \param dev NewBus device object representing this Xen Net Back instance.
+ *
+ * \return 0 for success, errno codes for failure.
+ *
+ * \note A net back device may be detached at any time in its life-cycle,
+ * including part way through the attach process. For this reason,
+ * initialization order and the intialization state checks in this
+ * routine must be carefully coupled so that attach time failures
+ * are gracefully handled.
+ */
+static int
+xnb_detach(device_t dev)
+{
+ struct xnb_softc *xnb;
- if (m->m_pkthdr.len > M_TRAILINGSPACE(n)) {
- WPRINTF("pkt to big %d\n", m->m_pkthdr.len);
- m_freem(n);
- goto drop;
- }
- m_copydata(m, 0, m->m_pkthdr.len, n->m_data);
- n->m_pkthdr.len = n->m_len = m->m_pkthdr.len;
- n->m_pkthdr.csum_flags = (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA);
- m_freem(m);
- m = n;
- }
+ DPRINTF("\n");
- vdata = (unsigned long)m->m_data;
- old_mfn = vtomach(vdata) >> PAGE_SHIFT;
+ xnb = device_get_softc(dev);
+ mtx_lock(&xnb->sc_lock);
+ while (xnb_shutdown(xnb) == EAGAIN) {
+ msleep(xnb, &xnb->sc_lock, /*wakeup prio unchanged*/0,
+ "xnb_shutdown", 0);
+ }
+ mtx_unlock(&xnb->sc_lock);
+ DPRINTF("\n");
- if ((new_mfn = alloc_mfn()) == 0)
- goto drop;
+ mtx_destroy(&xnb->tx_lock);
+ mtx_destroy(&xnb->rx_lock);
+ mtx_destroy(&xnb->sc_lock);
+ return (0);
+}
-#ifdef XEN_NETBACK_FIXUP_CSUM
- /* Check if we need to compute a checksum. This happens */
- /* when bridging from one domain to another. */
- if ((m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) ||
- (m->m_pkthdr.csum_flags & CSUM_SCTP))
- fixup_checksum(m);
-#endif
+/**
+ * Prepare this net back device for suspension of this VM.
+ *
+ * \param dev NewBus device object representing this Xen net Back instance.
+ *
+ * \return 0 for success, errno codes for failure.
+ */
+static int
+xnb_suspend(device_t dev)
+{
+ return (0);
+}
- xen_phys_machine[(vtophys(vdata) >> PAGE_SHIFT)] = new_mfn;
-
- mcl->op = __HYPERVISOR_update_va_mapping;
- mcl->args[0] = vdata;
- mcl->args[1] = (new_mfn << PAGE_SHIFT) | PG_V | PG_RW | PG_M | PG_A;
- mcl->args[2] = 0;
- mcl->args[3] = 0;
- mcl++;
-
- gop->mfn = old_mfn;
- gop->domid = netif->domid;
- gop->ref = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons)->gref;
- netif->rx.req_cons++;
- gop++;
-
- mmu->ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
- mmu->val = vtophys(vdata) >> PAGE_SHIFT;
- mmu++;
-
- if (rxq_last)
- rxq_last->m_nextpkt = m;
- else
- rxq = m;
- rxq_last = m;
-
- DDPRINTF("XMIT %d bytes to %s\n", m->m_pkthdr.len, IFNAME(netif));
- DPRINTF_MBUF_LEN(m, 128);
-
- /* Filled the batch queue? */
- if ((gop - grant_rx_op) == ARRAY_SIZE(grant_rx_op))
- break;
-
- continue;
- drop:
- DDPRINTF("dropping pkt\n");
- ifp->if_oerrors++;
- m_freem(m);
- }
+/**
+ * Perform any processing required to recover from a suspended state.
+ *
+ * \param dev NewBus device object representing this Xen Net Back instance.
+ *
+ * \return 0 for success, errno codes for failure.
+ */
+static int
+xnb_resume(device_t dev)
+{
+ return (0);
+}
- if (mcl == rx_mcl)
- return pkts_dequeued;
+/**
+ * Handle state changes expressed via the XenStore by our front-end peer.
+ *
+ * \param dev NewBus device object representing this Xen
+ * Net Back instance.
+ * \param frontend_state The new state of the front-end.
+ *
+ * \return 0 for success, errno codes for failure.
+ */
+static void
+xnb_frontend_changed(device_t dev, XenbusState frontend_state)
+{
+ struct xnb_softc *xnb;
- mcl->op = __HYPERVISOR_mmu_update;
- mcl->args[0] = (unsigned long)rx_mmu;
- mcl->args[1] = mmu - rx_mmu;
- mcl->args[2] = 0;
- mcl->args[3] = DOMID_SELF;
- mcl++;
+ xnb = device_get_softc(dev);
- mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
- ret = HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
- BUG_ON(ret != 0);
+ DPRINTF("frontend_state=%s, xnb_state=%s\n",
+ xenbus_strstate(frontend_state),
+ xenbus_strstate(xenbus_get_state(xnb->dev)));
- ret = HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, gop - grant_rx_op);
- BUG_ON(ret != 0);
+ switch (frontend_state) {
+ case XenbusStateInitialising:
+ break;
+ case XenbusStateInitialised:
+ case XenbusStateConnected:
+ xnb_connect(xnb);
+ break;
+ case XenbusStateClosing:
+ case XenbusStateClosed:
+ mtx_lock(&xnb->sc_lock);
+ xnb_shutdown(xnb);
+ mtx_unlock(&xnb->sc_lock);
+ if (frontend_state == XenbusStateClosed)
+ xenbus_set_state(xnb->dev, XenbusStateClosed);
+ break;
+ default:
+ xenbus_dev_fatal(xnb->dev, EINVAL, "saw state %d at frontend",
+ frontend_state);
+ break;
+ }
+}
+
+
+/*---------------------------- Request Processing ----------------------------*/
+/**
+ * Interrupt handler bound to the shared ring's event channel.
+ * Entry point for the xennet transmit path in netback
+ * Transfers packets from the Xen ring to the host's generic networking stack
+ *
+ * \param arg Callback argument registerd during event channel
+ * binding - the xnb_softc for this instance.
+ */
+static void
+xnb_intr(void *arg)
+{
+ struct xnb_softc *xnb;
+ struct ifnet *ifp;
+ netif_tx_back_ring_t *txb;
+ RING_IDX req_prod_local;
+
+ xnb = (struct xnb_softc *)arg;
+ ifp = xnb->xnb_ifp;
+ txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring;
+
+ mtx_lock(&xnb->tx_lock);
+ do {
+ int notify;
+ req_prod_local = txb->sring->req_prod;
+ xen_rmb();
+
+ for (;;) {
+ struct mbuf *mbufc;
+ int err;
+
+ err = xnb_recv(txb, xnb->otherend_id, &mbufc, ifp,
+ xnb->tx_gnttab);
+ if (err || (mbufc == NULL))
+ break;
- mcl = rx_mcl;
- gop = grant_rx_op;
+ /* Send the packet to the generic network stack */
+ (*xnb->xnb_ifp->if_input)(xnb->xnb_ifp, mbufc);
+ }
- while ((m = rxq) != NULL) {
- int8_t status;
- uint16_t id, flags = 0;
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(txb, notify);
+ if (notify != 0)
+ notify_remote_via_irq(xnb->irq);
- rxq = m->m_nextpkt;
- m->m_nextpkt = NULL;
+ txb->sring->req_event = txb->req_cons + 1;
+ xen_mb();
+ } while (txb->sring->req_prod != req_prod_local) ;
+ mtx_unlock(&xnb->tx_lock);
- /* Rederive the machine addresses. */
- new_mfn = mcl->args[1] >> PAGE_SHIFT;
- old_mfn = gop->mfn;
+ xnb_start(ifp);
+}
- ifp->if_obytes += m->m_pkthdr.len;
- ifp->if_opackets++;
- /* The update_va_mapping() must not fail. */
- BUG_ON(mcl->result != 0);
+/**
+ * Build a struct xnb_pkt based on netif_tx_request's from a netif tx ring.
+ * Will read exactly 0 or 1 packets from the ring; never a partial packet.
+ * \param[out] pkt The returned packet. If there is an error building
+ * the packet, pkt.list_len will be set to 0.
+ * \param[in] tx_ring Pointer to the Ring that is the input to this function
+ * \param[in] start The ring index of the first potential request
+ * \return The number of requests consumed to build this packet
+ */
+static int
+xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring,
+ RING_IDX start)
+{
+ /*
+ * Outline:
+ * 1) Initialize pkt
+ * 2) Read the first request of the packet
+ * 3) Read the extras
+ * 4) Set cdr
+ * 5) Loop on the remainder of the packet
+ * 6) Finalize pkt (stuff like car_size and list_len)
+ */
+ int idx = start;
+ int discard = 0; /* whether to discard the packet */
+ int more_data = 0; /* there are more request past the last one */
+ uint16_t cdr_size = 0; /* accumulated size of requests 2 through n */
+
+ xnb_pkt_initialize(pkt);
+
+ /* Read the first request */
+ if (RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) {
+ netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx);
+ pkt->size = tx->size;
+ pkt->flags = tx->flags & ~NETTXF_more_data;
+ more_data = tx->flags & NETTXF_more_data;
+ pkt->list_len++;
+ pkt->car = idx;
+ idx++;
+ }
- /* Setup flags */
- if ((m->m_pkthdr.csum_flags & CSUM_DELAY_DATA))
- flags |= NETRXF_csum_blank | NETRXF_data_validated;
- else if ((m->m_pkthdr.csum_flags & CSUM_DATA_VALID))
- flags |= NETRXF_data_validated;
+ /* Read the extra info */
+ if ((pkt->flags & NETTXF_extra_info) &&
+ RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) {
+ netif_extra_info_t *ext =
+ (netif_extra_info_t*) RING_GET_REQUEST(tx_ring, idx);
+ pkt->extra.type = ext->type;
+ switch (pkt->extra.type) {
+ case XEN_NETIF_EXTRA_TYPE_GSO:
+ pkt->extra.u.gso = ext->u.gso;
+ break;
+ default:
+ /*
+ * The reference Linux netfront driver will
+ * never set any other extra.type. So we don't
+ * know what to do with it. Let's print an
+ * error, then consume and discard the packet
+ */
+ printf("xnb(%s:%d): Unknown extra info type %d."
+ " Discarding packet\n",
+ __func__, __LINE__, pkt->extra.type);
+ xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring,
+ start));
+ xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring,
+ idx));
+ discard = 1;
+ break;
+ }
- /* Check the reassignment error code. */
- status = NETIF_RSP_OKAY;
- if (gop->status != 0) {
- DPRINTF("Bad status %d from grant transfer to DOM%u\n",
- gop->status, netif->domid);
+ pkt->extra.flags = ext->flags;
+ if (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE) {
/*
- * Page no longer belongs to us unless GNTST_bad_page,
- * but that should be a fatal error anyway.
+ * The reference linux netfront driver never sets this
+ * flag (nor does any other known netfront). So we
+ * will discard the packet.
*/
- BUG_ON(gop->status == GNTST_bad_page);
- status = NETIF_RSP_ERROR;
+ printf("xnb(%s:%d): Request sets "
+ "XEN_NETIF_EXTRA_FLAG_MORE, but we can't handle "
+ "that\n", __func__, __LINE__);
+ xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start));
+ xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx));
+ discard = 1;
}
- id = RING_GET_REQUEST(&netif->rx, netif->rx.rsp_prod_pvt)->id;
- notify |= make_rx_response(netif, id, status,
- (unsigned long)m->m_data & PAGE_MASK,
- m->m_pkthdr.len, flags);
-
- m_freem(m);
- mcl++;
- gop++;
+
+ idx++;
}
- if (notify)
- notify_remote_via_irq(netif->irq);
+ /* Set cdr. If there is not more data, cdr is invalid */
+ pkt->cdr = idx;
+
+ /* Loop on remainder of packet */
+ while (more_data && RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) {
+ netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx);
+ pkt->list_len++;
+ cdr_size += tx->size;
+ if (tx->flags & ~NETTXF_more_data) {
+ /* There should be no other flags set at this point */
+ printf("xnb(%s:%d): Request sets unknown flags %d "
+ "after the 1st request in the packet.\n",
+ __func__, __LINE__, tx->flags);
+ xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start));
+ xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx));
+ }
- return pkts_dequeued;
-}
+ more_data = tx->flags & NETTXF_more_data;
+ idx++;
+ }
-static void
-rx_task_timer(void *arg)
-{
- DDPRINTF("\n");
- taskqueue_enqueue(taskqueue_swi, &net_rx_task);
+ /* Finalize packet */
+ if (more_data != 0) {
+ /* The ring ran out of requests before finishing the packet */
+ xnb_pkt_invalidate(pkt);
+ idx = start; /* tell caller that we consumed no requests */
+ } else {
+ /* Calculate car_size */
+ pkt->car_size = pkt->size - cdr_size;
+ }
+ if (discard != 0) {
+ xnb_pkt_invalidate(pkt);
+ }
+
+ return idx - start;
}
+
+/**
+ * Respond to all the requests that constituted pkt. Builds the responses and
+ * writes them to the ring, but doesn't push them to the shared ring.
+ * \param[in] pkt the packet that needs a response
+ * \param[in] error true if there was an error handling the packet, such
+ * as in the hypervisor copy op or mbuf allocation
+ * \param[out] ring Responses go here
+ */
static void
-net_rx_action(void *context, int pending)
+xnb_txpkt2rsp(const struct xnb_pkt *pkt, netif_tx_back_ring_t *ring,
+ int error)
{
- netif_t *netif, *last_zero_work = NULL;
-
- DDPRINTF("\n");
-
- while ((netif = remove_from_rx_schedule_list())) {
- struct ifnet *ifp = netif->ifp;
-
- if (netif == last_zero_work) {
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- add_to_rx_schedule_list_tail(netif);
- netif_put(netif);
- if (!STAILQ_EMPTY(&rx_sched_list))
- callout_reset(&rx_task_callout, 1, rx_task_timer, NULL);
- break;
- }
-
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- if (netif_rx(netif))
- last_zero_work = NULL;
- else if (!last_zero_work)
- last_zero_work = netif;
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- add_to_rx_schedule_list_tail(netif);
+ /*
+ * Outline:
+ * 1) Respond to the first request
+ * 2) Respond to the extra info reques
+ * Loop through every remaining request in the packet, generating
+ * responses that copy those requests' ids and sets the status
+ * appropriately.
+ */
+ netif_tx_request_t *tx;
+ netif_tx_response_t *rsp;
+ int i;
+ uint16_t status;
+
+ status = (xnb_pkt_is_valid(pkt) == 0) || error ?
+ NETIF_RSP_ERROR : NETIF_RSP_OKAY;
+ KASSERT((pkt->list_len == 0) || (ring->rsp_prod_pvt == pkt->car),
+ ("Cannot respond to ring requests out of order"));
+
+ if (pkt->list_len >= 1) {
+ uint16_t id;
+ tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt);
+ id = tx->id;
+ rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt);
+ rsp->id = id;
+ rsp->status = status;
+ ring->rsp_prod_pvt++;
+
+ if (pkt->flags & NETRXF_extra_info) {
+ rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt);
+ rsp->status = NETIF_RSP_NULL;
+ ring->rsp_prod_pvt++;
}
+ }
- netif_put(netif);
+ for (i=0; i < pkt->list_len - 1; i++) {
+ uint16_t id;
+ tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt);
+ id = tx->id;
+ rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt);
+ rsp->id = id;
+ rsp->status = status;
+ ring->rsp_prod_pvt++;
}
}
-static void
-netback_start(struct ifnet *ifp)
+/**
+ * Create an mbuf chain to represent a packet. Initializes all of the headers
+ * in the mbuf chain, but does not copy the data. The returned chain must be
+ * free()'d when no longer needed
+ * \param[in] pkt A packet to model the mbuf chain after
+ * \return A newly allocated mbuf chain, possibly with clusters attached.
+ * NULL on failure
+ */
+static struct mbuf*
+xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp)
{
- netif_t *netif = (netif_t *)ifp->if_softc;
+ /**
+ * \todo consider using a memory pool for mbufs instead of
+ * reallocating them for every packet
+ */
+ /** \todo handle extra data */
+ struct mbuf *m;
- DDPRINTF("%s\n", IFNAME(netif));
+ m = m_getm(NULL, pkt->size, M_NOWAIT, MT_DATA);
- add_to_rx_schedule_list_tail(netif);
- taskqueue_enqueue(taskqueue_swi, &net_rx_task);
+ if (m != NULL) {
+ m->m_pkthdr.rcvif = ifp;
+ if (pkt->flags & NETTXF_data_validated) {
+ /*
+ * We lie to the host OS and always tell it that the
+ * checksums are ok, because the packet is unlikely to
+ * get corrupted going across domains.
+ */
+ m->m_pkthdr.csum_flags = (
+ CSUM_IP_CHECKED |
+ CSUM_IP_VALID |
+ CSUM_DATA_VALID |
+ CSUM_PSEUDO_HDR
+ );
+ m->m_pkthdr.csum_data = 0xffff;
+ }
+ }
+ return m;
}
-/* Map a grant ref to a ring */
+/**
+ * Build a gnttab_copy table that can be used to copy data from a pkt
+ * to an mbufc. Does not actually perform the copy. Always uses gref's on
+ * the packet side.
+ * \param[in] pkt pkt's associated requests form the src for
+ * the copy operation
+ * \param[in] mbufc mbufc's storage forms the dest for the copy operation
+ * \param[out] gnttab Storage for the returned grant table
+ * \param[in] txb Pointer to the backend ring structure
+ * \param[in] otherend_id The domain ID of the other end of the copy
+ * \return The number of gnttab entries filled
+ */
static int
-map_ring(grant_ref_t ref, domid_t dom, struct ring_ref *ring)
+xnb_txpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc,
+ gnttab_copy_table gnttab, const netif_tx_back_ring_t *txb,
+ domid_t otherend_id)
{
- struct gnttab_map_grant_ref op;
-
- ring->va = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
- if (ring->va == 0)
- return ENOMEM;
- op.host_addr = ring->va;
- op.flags = GNTMAP_host_map;
- op.ref = ref;
- op.dom = dom;
- HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
- if (op.status) {
- WPRINTF("grant table op err=%d\n", op.status);
- kmem_free(kernel_map, ring->va, PAGE_SIZE);
- ring->va = 0;
- return EACCES;
+ const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */
+ int gnt_idx = 0; /* index into grant table */
+ RING_IDX r_idx = pkt->car; /* index into tx ring buffer */
+ int r_ofs = 0; /* offset of next data within tx request's data area */
+ int m_ofs = 0; /* offset of next data within mbuf's data area */
+ /* size in bytes that still needs to be represented in the table */
+ uint16_t size_remaining = pkt->size;
+
+ while (size_remaining > 0) {
+ const netif_tx_request_t *txq = RING_GET_REQUEST(txb, r_idx);
+ const size_t mbuf_space = M_TRAILINGSPACE(mbuf) - m_ofs;
+ const size_t req_size =
+ r_idx == pkt->car ? pkt->car_size : txq->size;
+ const size_t pkt_space = req_size - r_ofs;
+ /*
+ * space is the largest amount of data that can be copied in the
+ * grant table's next entry
+ */
+ const size_t space = MIN(pkt_space, mbuf_space);
+
+ /* TODO: handle this error condition without panicking */
+ KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short"));
+
+ gnttab[gnt_idx].source.u.ref = txq->gref;
+ gnttab[gnt_idx].source.domid = otherend_id;
+ gnttab[gnt_idx].source.offset = txq->offset + r_ofs;
+ gnttab[gnt_idx].dest.u.gmfn = virt_to_mfn(
+ mtod(mbuf, vm_offset_t) + m_ofs);
+ gnttab[gnt_idx].dest.offset = virt_to_offset(
+ mtod(mbuf, vm_offset_t) + m_ofs);
+ gnttab[gnt_idx].dest.domid = DOMID_SELF;
+ gnttab[gnt_idx].len = space;
+ gnttab[gnt_idx].flags = GNTCOPY_source_gref;
+
+ gnt_idx++;
+ r_ofs += space;
+ m_ofs += space;
+ size_remaining -= space;
+ if (req_size - r_ofs <= 0) {
+ /* Must move to the next tx request */
+ r_ofs = 0;
+ r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1;
+ }
+ if (M_TRAILINGSPACE(mbuf) - m_ofs <= 0) {
+ /* Must move to the next mbuf */
+ m_ofs = 0;
+ mbuf = mbuf->m_next;
+ }
}
- ring->handle = op.handle;
- ring->bus_addr = op.dev_bus_addr;
-
- return 0;
+ return gnt_idx;
}
-/* Unmap grant ref for a ring */
+/**
+ * Check the status of the grant copy operations, and update mbufs various
+ * non-data fields to reflect the data present.
+ * \param[in,out] mbufc mbuf chain to update. The chain must be valid and of
+ * the correct length, and data should already be present
+ * \param[in] gnttab A grant table for a just completed copy op
+ * \param[in] n_entries The number of valid entries in the grant table
+ */
static void
-unmap_ring(struct ring_ref *ring)
+xnb_update_mbufc(struct mbuf *mbufc, const gnttab_copy_table gnttab,
+ int n_entries)
{
- struct gnttab_unmap_grant_ref op;
-
- op.host_addr = ring->va;
- op.dev_bus_addr = ring->bus_addr;
- op.handle = ring->handle;
- HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
- if (op.status)
- WPRINTF("grant table op err=%d\n", op.status);
+ struct mbuf *mbuf = mbufc;
+ int i;
+ size_t total_size = 0;
+
+ for (i = 0; i < n_entries; i++) {
+ KASSERT(gnttab[i].status == GNTST_okay,
+ ("Some gnttab_copy entry had error status %hd\n",
+ gnttab[i].status));
+
+ mbuf->m_len += gnttab[i].len;
+ total_size += gnttab[i].len;
+ if (M_TRAILINGSPACE(mbuf) <= 0) {
+ mbuf = mbuf->m_next;
+ }
+ }
+ mbufc->m_pkthdr.len = total_size;
- kmem_free(kernel_map, ring->va, PAGE_SIZE);
- ring->va = 0;
+ xnb_add_mbuf_cksum(mbufc);
}
+/**
+ * Dequeue at most one packet from the shared ring
+ * \param[in,out] txb Netif tx ring. A packet will be removed from it, and
+ * its private indices will be updated. But the indices
+ * will not be pushed to the shared ring.
+ * \param[in] ifnet Interface to which the packet will be sent
+ * \param[in] otherend Domain ID of the other end of the ring
+ * \param[out] mbufc The assembled mbuf chain, ready to send to the generic
+ * networking stack
+ * \param[in,out] gnttab Pointer to enough memory for a grant table. We make
+ * this a function parameter so that we will take less
+ * stack space.
+ * \return An error code
+ */
static int
-connect_rings(netif_t *netif)
+xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, struct mbuf **mbufc,
+ struct ifnet *ifnet, gnttab_copy_table gnttab)
{
- struct xenbus_device *xdev = netif->xdev;
- netif_tx_sring_t *txs;
- netif_rx_sring_t *rxs;
- unsigned long tx_ring_ref, rx_ring_ref;
- evtchn_port_t evtchn;
- evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
- int err;
+ struct xnb_pkt pkt;
+ /* number of tx requests consumed to build the last packet */
+ int num_consumed;
+ int nr_ents;
- // Grab FE data and map his memory
- err = xenbus_gather(NULL, xdev->otherend,
- "tx-ring-ref", "%lu", &tx_ring_ref,
- "rx-ring-ref", "%lu", &rx_ring_ref,
- "event-channel", "%u", &evtchn, NULL);
- if (err) {
- xenbus_dev_fatal(xdev, err,
- "reading %s/ring-ref and event-channel",
- xdev->otherend);
- return err;
- }
+ *mbufc = NULL;
+ num_consumed = xnb_ring2pkt(&pkt, txb, txb->req_cons);
+ if (num_consumed == 0)
+ return 0; /* Nothing to receive */
- err = map_ring(tx_ring_ref, netif->domid, &netif->tx_ring_ref);
- if (err) {
- xenbus_dev_fatal(xdev, err, "mapping tx ring");
- return err;
- }
- txs = (netif_tx_sring_t *)netif->tx_ring_ref.va;
- BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
+ /* update statistics indepdent of errors */
+ ifnet->if_ipackets++;
- err = map_ring(rx_ring_ref, netif->domid, &netif->rx_ring_ref);
- if (err) {
- unmap_ring(&netif->tx_ring_ref);
- xenbus_dev_fatal(xdev, err, "mapping rx ring");
- return err;
- }
- rxs = (netif_rx_sring_t *)netif->rx_ring_ref.va;
- BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
-
- op.u.bind_interdomain.remote_dom = netif->domid;
- op.u.bind_interdomain.remote_port = evtchn;
- err = HYPERVISOR_event_channel_op(&op);
- if (err) {
- unmap_ring(&netif->tx_ring_ref);
- unmap_ring(&netif->rx_ring_ref);
- xenbus_dev_fatal(xdev, err, "binding event channel");
- return err;
+ /*
+ * if we got here, then 1 or more requests was consumed, but the packet
+ * is not necesarily valid.
+ */
+ if (xnb_pkt_is_valid(&pkt) == 0) {
+ /* got a garbage packet, respond and drop it */
+ xnb_txpkt2rsp(&pkt, txb, 1);
+ txb->req_cons += num_consumed;
+ DPRINTF("xnb_intr: garbage packet, num_consumed=%d\n",
+ num_consumed);
+ ifnet->if_ierrors++;
+ return EINVAL;
}
- netif->evtchn = op.u.bind_interdomain.local_port;
- /* bind evtchn to irq handler */
- netif->irq =
- bind_evtchn_to_irqhandler(netif->evtchn, "netback",
- netback_intr, netif, INTR_TYPE_NET|INTR_MPSAFE, &netif->irq_cookie);
+ *mbufc = xnb_pkt2mbufc(&pkt, ifnet);
+
+ if (*mbufc == NULL) {
+ /*
+ * Couldn't allocate mbufs. Respond and drop the packet. Do
+ * not consume the requests
+ */
+ xnb_txpkt2rsp(&pkt, txb, 1);
+ DPRINTF("xnb_intr: Couldn't allocate mbufs, num_consumed=%d\n",
+ num_consumed);
+ ifnet->if_iqdrops++;
+ return ENOMEM;
+ }
- netif->rings_connected = 1;
+ nr_ents = xnb_txpkt2gnttab(&pkt, *mbufc, gnttab, txb, otherend);
- DPRINTF("%s connected! evtchn=%d irq=%d\n",
- IFNAME(netif), netif->evtchn, netif->irq);
+ if (nr_ents > 0) {
+ int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
+ gnttab, nr_ents);
+ KASSERT(hv_ret == 0,
+ ("HYPERVISOR_grant_table_op returned %d\n", hv_ret));
+ xnb_update_mbufc(*mbufc, gnttab, nr_ents);
+ }
+ xnb_txpkt2rsp(&pkt, txb, 0);
+ txb->req_cons += num_consumed;
return 0;
}
-static void
-disconnect_rings(netif_t *netif)
+/**
+ * Create an xnb_pkt based on the contents of an mbuf chain.
+ * \param[in] mbufc mbuf chain to transform into a packet
+ * \param[out] pkt Storage for the newly generated xnb_pkt
+ * \param[in] start The ring index of the first available slot in the rx
+ * ring
+ * \param[in] space The number of free slots in the rx ring
+ * \retval 0 Success
+ * \retval EINVAL mbufc was corrupt or not convertible into a pkt
+ * \retval EAGAIN There was not enough space in the ring to queue the
+ * packet
+ */
+static int
+xnb_mbufc2pkt(const struct mbuf *mbufc, struct xnb_pkt *pkt,
+ RING_IDX start, int space)
{
- DPRINTF("\n");
- if (netif->rings_connected) {
- unbind_from_irqhandler(netif->irq, netif->irq_cookie);
- netif->irq = 0;
- unmap_ring(&netif->tx_ring_ref);
- unmap_ring(&netif->rx_ring_ref);
- netif->rings_connected = 0;
- }
-}
+ int retval = 0;
-static void
-connect(netif_t *netif)
-{
- if (!netif->xdev ||
- !netif->attached ||
- netif->frontend_state != XenbusStateConnected) {
- return;
- }
+ if ((mbufc == NULL) ||
+ ( (mbufc->m_flags & M_PKTHDR) == 0) ||
+ (mbufc->m_pkthdr.len == 0)) {
+ xnb_pkt_invalidate(pkt);
+ retval = EINVAL;
+ } else {
+ int slots_required;
+
+ xnb_pkt_validate(pkt);
+ pkt->flags = 0;
+ pkt->size = mbufc->m_pkthdr.len;
+ pkt->car = start;
+ pkt->car_size = mbufc->m_len;
+
+ if (mbufc->m_pkthdr.csum_flags & CSUM_TSO) {
+ pkt->flags |= NETRXF_extra_info;
+ pkt->extra.u.gso.size = mbufc->m_pkthdr.tso_segsz;
+ pkt->extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
+ pkt->extra.u.gso.pad = 0;
+ pkt->extra.u.gso.features = 0;
+ pkt->extra.type = XEN_NETIF_EXTRA_TYPE_GSO;
+ pkt->extra.flags = 0;
+ pkt->cdr = start + 2;
+ } else {
+ pkt->cdr = start + 1;
+ }
+ if (mbufc->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_DELAY_DATA)) {
+ pkt->flags |=
+ (NETRXF_csum_blank | NETRXF_data_validated);
+ }
- if (!connect_rings(netif)) {
- xenbus_switch_state(netif->xdev, NULL, XenbusStateConnected);
+ /*
+ * Each ring response can have up to PAGE_SIZE of data.
+ * Assume that we can defragment the mbuf chain efficiently
+ * into responses so that each response but the last uses all
+ * PAGE_SIZE bytes.
+ */
+ pkt->list_len = (pkt->size + PAGE_SIZE - 1) / PAGE_SIZE;
- /* Turn on interface */
- netif->ifp->if_drv_flags |= IFF_DRV_RUNNING;
- netif->ifp->if_flags |= IFF_UP;
+ if (pkt->list_len > 1) {
+ pkt->flags |= NETRXF_more_data;
+ }
+
+ slots_required = pkt->list_len +
+ (pkt->flags & NETRXF_extra_info ? 1 : 0);
+ if (slots_required > space) {
+ xnb_pkt_invalidate(pkt);
+ retval = EAGAIN;
+ }
}
+
+ return retval;
}
+/**
+ * Build a gnttab_copy table that can be used to copy data from an mbuf chain
+ * to the frontend's shared buffers. Does not actually perform the copy.
+ * Always uses gref's on the other end's side.
+ * \param[in] pkt pkt's associated responses form the dest for the copy
+ * operatoin
+ * \param[in] mbufc The source for the copy operation
+ * \param[out] gnttab Storage for the returned grant table
+ * \param[in] rxb Pointer to the backend ring structure
+ * \param[in] otherend_id The domain ID of the other end of the copy
+ * \return The number of gnttab entries filled
+ */
static int
-netback_remove(struct xenbus_device *xdev)
+xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc,
+ gnttab_copy_table gnttab, const netif_rx_back_ring_t *rxb,
+ domid_t otherend_id)
{
- netif_t *netif = xdev->data;
- device_t ndev;
-
- DPRINTF("remove %s\n", xdev->nodename);
- if ((ndev = netif->ndev)) {
- netif->ndev = NULL;
- mtx_lock(&Giant);
- device_detach(ndev);
- mtx_unlock(&Giant);
+ const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */
+ int gnt_idx = 0; /* index into grant table */
+ RING_IDX r_idx = pkt->car; /* index into rx ring buffer */
+ int r_ofs = 0; /* offset of next data within rx request's data area */
+ int m_ofs = 0; /* offset of next data within mbuf's data area */
+ /* size in bytes that still needs to be represented in the table */
+ uint16_t size_remaining;
+
+ size_remaining = (xnb_pkt_is_valid(pkt) != 0) ? pkt->size : 0;
+
+ while (size_remaining > 0) {
+ const netif_rx_request_t *rxq = RING_GET_REQUEST(rxb, r_idx);
+ const size_t mbuf_space = mbuf->m_len - m_ofs;
+ /* Xen shared pages have an implied size of PAGE_SIZE */
+ const size_t req_size = PAGE_SIZE;
+ const size_t pkt_space = req_size - r_ofs;
+ /*
+ * space is the largest amount of data that can be copied in the
+ * grant table's next entry
+ */
+ const size_t space = MIN(pkt_space, mbuf_space);
+
+ /* TODO: handle this error condition without panicing */
+ KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short"));
+
+ gnttab[gnt_idx].dest.u.ref = rxq->gref;
+ gnttab[gnt_idx].dest.domid = otherend_id;
+ gnttab[gnt_idx].dest.offset = r_ofs;
+ gnttab[gnt_idx].source.u.gmfn = virt_to_mfn(
+ mtod(mbuf, vm_offset_t) + m_ofs);
+ gnttab[gnt_idx].source.offset = virt_to_offset(
+ mtod(mbuf, vm_offset_t) + m_ofs);
+ gnttab[gnt_idx].source.domid = DOMID_SELF;
+ gnttab[gnt_idx].len = space;
+ gnttab[gnt_idx].flags = GNTCOPY_dest_gref;
+
+ gnt_idx++;
+
+ r_ofs += space;
+ m_ofs += space;
+ size_remaining -= space;
+ if (req_size - r_ofs <= 0) {
+ /* Must move to the next rx request */
+ r_ofs = 0;
+ r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1;
+ }
+ if (mbuf->m_len - m_ofs <= 0) {
+ /* Must move to the next mbuf */
+ m_ofs = 0;
+ mbuf = mbuf->m_next;
+ }
}
- xdev->data = NULL;
- netif->xdev = NULL;
- netif_put(netif);
-
- return 0;
+ return gnt_idx;
}
/**
- * Entry point to this code when a new device is created. Allocate the basic
- * structures and the ring buffers for communication with the frontend.
- * Switch to Connected state.
+ * Generates responses for all the requests that constituted pkt. Builds
+ * responses and writes them to the ring, but doesn't push the shared ring
+ * indices.
+ * \param[in] pkt the packet that needs a response
+ * \param[in] gnttab The grant copy table corresponding to this packet.
+ * Used to determine how many rsp->netif_rx_response_t's to
+ * generate.
+ * \param[in] n_entries Number of relevant entries in the grant table
+ * \param[out] ring Responses go here
+ * \return The number of RX requests that were consumed to generate
+ * the responses
*/
static int
-netback_probe(struct xenbus_device *xdev, const struct xenbus_device_id *id)
+xnb_rxpkt2rsp(const struct xnb_pkt *pkt, const gnttab_copy_table gnttab,
+ int n_entries, netif_rx_back_ring_t *ring)
{
- int err;
- long handle;
- char *bridge;
-
- DPRINTF("node=%s\n", xdev->nodename);
-
- /* Grab the handle */
- err = xenbus_scanf(NULL, xdev->nodename, "handle", "%li", &handle);
- if (err != 1) {
- xenbus_dev_fatal(xdev, err, "reading handle");
- return err;
- }
+ /*
+ * This code makes the following assumptions:
+ * * All entries in gnttab set GNTCOPY_dest_gref
+ * * The entries in gnttab are grouped by their grefs: any two
+ * entries with the same gref must be adjacent
+ */
+ int error = 0;
+ int gnt_idx, i;
+ int n_responses = 0;
+ grant_ref_t last_gref = GRANT_REF_INVALID;
+ RING_IDX r_idx;
- /* Check for bridge */
- bridge = xenbus_read(NULL, xdev->nodename, "bridge", NULL);
- if (IS_ERR(bridge))
- bridge = NULL;
+ KASSERT(gnttab != NULL, ("Received a null granttable copy"));
- err = xenbus_switch_state(xdev, NULL, XenbusStateInitWait);
- if (err) {
- xenbus_dev_fatal(xdev, err, "writing switch state");
- return err;
+ /*
+ * In the event of an error, we only need to send one response to the
+ * netfront. In that case, we musn't write any data to the responses
+ * after the one we send. So we must loop all the way through gnttab
+ * looking for errors before we generate any responses
+ *
+ * Since we're looping through the grant table anyway, we'll count the
+ * number of different gref's in it, which will tell us how many
+ * responses to generate
+ */
+ for (gnt_idx = 0; gnt_idx < n_entries; gnt_idx++) {
+ int16_t status = gnttab[gnt_idx].status;
+ if (status != GNTST_okay) {
+ DPRINTF(
+ "Got error %d for hypervisor gnttab_copy status\n",
+ status);
+ error = 1;
+ break;
+ }
+ if (gnttab[gnt_idx].dest.u.ref != last_gref) {
+ n_responses++;
+ last_gref = gnttab[gnt_idx].dest.u.ref;
+ }
}
- err = netif_create(handle, xdev, bridge);
- if (err) {
- xenbus_dev_fatal(xdev, err, "creating netif");
- return err;
- }
+ if (error != 0) {
+ uint16_t id;
+ netif_rx_response_t *rsp;
+
+ id = RING_GET_REQUEST(ring, ring->rsp_prod_pvt)->id;
+ rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt);
+ rsp->id = id;
+ rsp->status = NETIF_RSP_ERROR;
+ n_responses = 1;
+ } else {
+ gnt_idx = 0;
+ const int has_extra = pkt->flags & NETRXF_extra_info;
+ if (has_extra != 0)
+ n_responses++;
- err = vif_add_dev(xdev);
- if (err) {
- netif_put((netif_t *)xdev->data);
- xenbus_dev_fatal(xdev, err, "adding vif device");
- return err;
+ for (i = 0; i < n_responses; i++) {
+ netif_rx_request_t rxq;
+ netif_rx_response_t *rsp;
+
+ r_idx = ring->rsp_prod_pvt + i;
+ /*
+ * We copy the structure of rxq instead of making a
+ * pointer because it shares the same memory as rsp.
+ */
+ rxq = *(RING_GET_REQUEST(ring, r_idx));
+ rsp = RING_GET_RESPONSE(ring, r_idx);
+ if (has_extra && (i == 1)) {
+ netif_extra_info_t *ext =
+ (netif_extra_info_t*)rsp;
+ ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
+ ext->flags = 0;
+ ext->u.gso.size = pkt->extra.u.gso.size;
+ ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
+ ext->u.gso.pad = 0;
+ ext->u.gso.features = 0;
+ } else {
+ rsp->id = rxq.id;
+ rsp->status = GNTST_okay;
+ rsp->offset = 0;
+ rsp->flags = 0;
+ if (i < pkt->list_len - 1)
+ rsp->flags |= NETRXF_more_data;
+ if ((i == 0) && has_extra)
+ rsp->flags |= NETRXF_extra_info;
+ if ((i == 0) &&
+ (pkt->flags & NETRXF_data_validated)) {
+ rsp->flags |= NETRXF_data_validated;
+ rsp->flags |= NETRXF_csum_blank;
+ }
+ rsp->status = 0;
+ for (; gnttab[gnt_idx].dest.u.ref == rxq.gref;
+ gnt_idx++) {
+ rsp->status += gnttab[gnt_idx].len;
+ }
+ }
+ }
}
- return 0;
+ ring->req_cons += n_responses;
+ ring->rsp_prod_pvt += n_responses;
+ return n_responses;
}
/**
- * We are reconnecting to the backend, due to a suspend/resume, or a backend
- * driver restart. We tear down our netif structure and recreate it, but
- * leave the device-layer structures intact so that this is transparent to the
- * rest of the kernel.
- */
-static int netback_resume(struct xenbus_device *xdev)
-{
- DPRINTF("node=%s\n", xdev->nodename);
- return 0;
-}
-
-
-/**
- * Callback received when the frontend's state changes.
+ * Add IP, TCP, and/or UDP checksums to every mbuf in a chain. The first mbuf
+ * in the chain must start with a struct ether_header.
+ *
+ * XXX This function will perform incorrectly on UDP packets that are split up
+ * into multiple ethernet frames.
*/
-static void frontend_changed(struct xenbus_device *xdev,
- XenbusState frontend_state)
+static void
+xnb_add_mbuf_cksum(struct mbuf *mbufc)
{
- netif_t *netif = xdev->data;
+ struct ether_header *eh;
+ struct ip *iph;
+ uint16_t ether_type;
+
+ eh = mtod(mbufc, struct ether_header*);
+ ether_type = ntohs(eh->ether_type);
+ if (ether_type != ETHERTYPE_IP) {
+ /* Nothing to calculate */
+ return;
+ }
- DPRINTF("state=%d\n", frontend_state);
-
- netif->frontend_state = frontend_state;
+ iph = (struct ip*)(eh + 1);
+ if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) {
+ iph->ip_sum = 0;
+ iph->ip_sum = in_cksum_hdr(iph);
+ }
- switch (frontend_state) {
- case XenbusStateInitialising:
- case XenbusStateInitialised:
- break;
- case XenbusStateConnected:
- connect(netif);
- break;
- case XenbusStateClosing:
- xenbus_switch_state(xdev, NULL, XenbusStateClosing);
+ switch (iph->ip_p) {
+ case IPPROTO_TCP:
+ if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) {
+ size_t tcplen = ntohs(iph->ip_len) - sizeof(struct ip);
+ struct tcphdr *th = (struct tcphdr*)(iph + 1);
+ th->th_sum = in_pseudo(iph->ip_src.s_addr,
+ iph->ip_dst.s_addr, htons(IPPROTO_TCP + tcplen));
+ th->th_sum = in_cksum_skip(mbufc,
+ sizeof(struct ether_header) + ntohs(iph->ip_len),
+ sizeof(struct ether_header) + (iph->ip_hl << 2));
+ }
break;
- case XenbusStateClosed:
- xenbus_remove_device(xdev);
+ case IPPROTO_UDP:
+ if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) {
+ size_t udplen = ntohs(iph->ip_len) - sizeof(struct ip);
+ struct udphdr *uh = (struct udphdr*)(iph + 1);
+ uh->uh_sum = in_pseudo(iph->ip_src.s_addr,
+ iph->ip_dst.s_addr, htons(IPPROTO_UDP + udplen));
+ uh->uh_sum = in_cksum_skip(mbufc,
+ sizeof(struct ether_header) + ntohs(iph->ip_len),
+ sizeof(struct ether_header) + (iph->ip_hl << 2));
+ }
break;
- case XenbusStateUnknown:
- case XenbusStateInitWait:
- xenbus_dev_fatal(xdev, EINVAL, "saw state %d at frontend",
- frontend_state);
+ default:
break;
}
}
-/* ** Driver registration ** */
-
-static struct xenbus_device_id netback_ids[] = {
- { "vif" },
- { "" }
-};
-
-static struct xenbus_driver netback = {
- .name = "netback",
- .ids = netback_ids,
- .probe = netback_probe,
- .remove = netback_remove,
- .resume= netback_resume,
- .otherend_changed = frontend_changed,
-};
-
static void
-netback_init(void *unused)
+xnb_stop(struct xnb_softc *xnb)
{
- callout_init(&rx_task_callout, CALLOUT_MPSAFE);
-
- mmap_vstart = alloc_empty_page_range(MAX_PENDING_REQS);
- BUG_ON(!mmap_vstart);
-
- pending_cons = 0;
- for (pending_prod = 0; pending_prod < MAX_PENDING_REQS; pending_prod++)
- pending_ring[pending_prod] = pending_prod;
-
- TASK_INIT(&net_tx_task, 0, net_tx_action, NULL);
- TASK_INIT(&net_rx_task, 0, net_rx_action, NULL);
- mtx_init(&tx_sched_list_lock, "nb_tx_sched_lock", "netback tx sched lock", MTX_DEF);
- mtx_init(&rx_sched_list_lock, "nb_rx_sched_lock", "netback rx sched lock", MTX_DEF);
-
- DPRINTF("registering %s\n", netback.name);
+ struct ifnet *ifp;
- xenbus_register_backend(&netback);
+ mtx_assert(&xnb->sc_lock, MA_OWNED);
+ ifp = xnb->xnb_ifp;
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ if_link_state_change(ifp, LINK_STATE_DOWN);
}
-SYSINIT(xnbedev, SI_SUB_PSEUDO, SI_ORDER_ANY, netback_init, NULL)
-
static int
-vif_add_dev(struct xenbus_device *xdev)
+xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
- netif_t *netif = xdev->data;
- device_t nexus, ndev;
- devclass_t dc;
- int err = 0;
-
- mtx_lock(&Giant);
-
- /* We will add a vif device as a child of nexus0 (for now) */
- if (!(dc = devclass_find("nexus")) ||
- !(nexus = devclass_get_device(dc, 0))) {
- WPRINTF("could not find nexus0!\n");
- err = ENOENT;
- goto done;
- }
-
+ struct xnb_softc *xnb = ifp->if_softc;
+#ifdef INET
+ struct ifreq *ifr = (struct ifreq*) data;
+ struct ifaddr *ifa = (struct ifaddr*)data;
+#endif
+ int error = 0;
- /* Create a newbus device representing the vif */
- ndev = BUS_ADD_CHILD(nexus, 0, "vif", netif->ifp->if_dunit);
- if (!ndev) {
- WPRINTF("could not create newbus device %s!\n", IFNAME(netif));
- err = EFAULT;
- goto done;
+ switch (cmd) {
+ case SIOCSIFFLAGS:
+ mtx_lock(&xnb->sc_lock);
+ if (ifp->if_flags & IFF_UP) {
+ xnb_ifinit_locked(xnb);
+ } else {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ xnb_stop(xnb);
+ }
+ }
+ /*
+ * Note: netfront sets a variable named xn_if_flags
+ * here, but that variable is never read
+ */
+ mtx_unlock(&xnb->sc_lock);
+ break;
+ case SIOCSIFADDR:
+ case SIOCGIFADDR:
+#ifdef INET
+ mtx_lock(&xnb->sc_lock);
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING |
+ IFF_DRV_OACTIVE);
+ if_link_state_change(ifp,
+ LINK_STATE_DOWN);
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ if_link_state_change(ifp,
+ LINK_STATE_UP);
+ }
+ arp_ifinit(ifp, ifa);
+ mtx_unlock(&xnb->sc_lock);
+ } else {
+ mtx_unlock(&xnb->sc_lock);
+#endif
+ error = ether_ioctl(ifp, cmd, data);
+#ifdef INET
+ }
+#endif
+ break;
+ case SIOCSIFCAP:
+ mtx_lock(&xnb->sc_lock);
+ if (ifr->ifr_reqcap & IFCAP_TXCSUM) {
+ ifp->if_capenable |= IFCAP_TXCSUM;
+ ifp->if_hwassist |= XNB_CSUM_FEATURES;
+ } else {
+ ifp->if_capenable &= ~(IFCAP_TXCSUM);
+ ifp->if_hwassist &= ~(XNB_CSUM_FEATURES);
+ }
+ if ((ifr->ifr_reqcap & IFCAP_RXCSUM)) {
+ ifp->if_capenable |= IFCAP_RXCSUM;
+ } else {
+ ifp->if_capenable &= ~(IFCAP_RXCSUM);
+ }
+ /*
+ * TODO enable TSO4 and LRO once we no longer need
+ * to calculate checksums in software
+ */
+#if 0
+ if (ifr->if_reqcap |= IFCAP_TSO4) {
+ if (IFCAP_TXCSUM & ifp->if_capenable) {
+ printf("xnb: Xen netif requires that "
+ "TXCSUM be enabled in order "
+ "to use TSO4\n");
+ error = EINVAL;
+ } else {
+ ifp->if_capenable |= IFCAP_TSO4;
+ ifp->if_hwassist |= CSUM_TSO;
+ }
+ } else {
+ ifp->if_capenable &= ~(IFCAP_TSO4);
+ ifp->if_hwassist &= ~(CSUM_TSO);
+ }
+ if (ifr->ifreqcap |= IFCAP_LRO) {
+ ifp->if_capenable |= IFCAP_LRO;
+ } else {
+ ifp->if_capenable &= ~(IFCAP_LRO);
+ }
+#endif
+ mtx_unlock(&xnb->sc_lock);
+ break;
+ case SIOCSIFMTU:
+ ifp->if_mtu = ifr->ifr_mtu;
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ xnb_ifinit(xnb);
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &xnb->sc_media, cmd);
+ break;
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
}
-
- netif_get(netif);
- device_set_ivars(ndev, netif);
- netif->ndev = ndev;
-
- device_probe_and_attach(ndev);
+ return (error);
+}
- done:
+static void
+xnb_start_locked(struct ifnet *ifp)
+{
+ netif_rx_back_ring_t *rxb;
+ struct xnb_softc *xnb;
+ struct mbuf *mbufc;
+ RING_IDX req_prod_local;
- mtx_unlock(&Giant);
+ xnb = ifp->if_softc;
+ rxb = &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring;
- return err;
-}
+ if (!xnb->carrier)
+ return;
-enum {
- VIF_SYSCTL_DOMID,
- VIF_SYSCTL_HANDLE,
- VIF_SYSCTL_TXRING,
- VIF_SYSCTL_RXRING,
-};
+ do {
+ int out_of_space = 0;
+ int notify;
+ req_prod_local = rxb->sring->req_prod;
+ xen_rmb();
+ for (;;) {
+ int error;
-static char *
-vif_sysctl_ring_info(netif_t *netif, int cmd)
-{
- char *buf = malloc(256, M_DEVBUF, M_WAITOK);
- if (buf) {
- if (!netif->rings_connected)
- sprintf(buf, "rings not connected\n");
- else if (cmd == VIF_SYSCTL_TXRING) {
- netif_tx_back_ring_t *tx = &netif->tx;
- sprintf(buf, "nr_ents=%x req_cons=%x"
- " req_prod=%x req_event=%x"
- " rsp_prod=%x rsp_event=%x",
- tx->nr_ents, tx->req_cons,
- tx->sring->req_prod, tx->sring->req_event,
- tx->sring->rsp_prod, tx->sring->rsp_event);
- } else {
- netif_rx_back_ring_t *rx = &netif->rx;
- sprintf(buf, "nr_ents=%x req_cons=%x"
- " req_prod=%x req_event=%x"
- " rsp_prod=%x rsp_event=%x",
- rx->nr_ents, rx->req_cons,
- rx->sring->req_prod, rx->sring->req_event,
- rx->sring->rsp_prod, rx->sring->rsp_event);
+ IF_DEQUEUE(&ifp->if_snd, mbufc);
+ if (mbufc == NULL)
+ break;
+ error = xnb_send(rxb, xnb->otherend_id, mbufc,
+ xnb->rx_gnttab);
+ switch (error) {
+ case EAGAIN:
+ /*
+ * Insufficient space in the ring.
+ * Requeue pkt and send when space is
+ * available.
+ */
+ IF_PREPEND(&ifp->if_snd, mbufc);
+ /*
+ * Perhaps the frontend missed an IRQ
+ * and went to sleep. Notify it to wake
+ * it up.
+ */
+ out_of_space = 1;
+ break;
+
+ case EINVAL:
+ /* OS gave a corrupt packet. Drop it.*/
+ ifp->if_oerrors++;
+ /* FALLTHROUGH */
+ default:
+ /* Send succeeded, or packet had error.
+ * Free the packet */
+ ifp->if_opackets++;
+ if (mbufc)
+ m_freem(mbufc);
+ break;
+ }
+ if (out_of_space != 0)
+ break;
}
- }
- return buf;
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(rxb, notify);
+ if ((notify != 0) || (out_of_space != 0))
+ notify_remote_via_irq(xnb->irq);
+ rxb->sring->req_event = req_prod_local + 1;
+ xen_mb();
+ } while (rxb->sring->req_prod != req_prod_local) ;
}
+/**
+ * Sends one packet to the ring. Blocks until the packet is on the ring
+ * \param[in] mbufc Contains one packet to send. Caller must free
+ * \param[in,out] rxb The packet will be pushed onto this ring, but the
+ * otherend will not be notified.
+ * \param[in] otherend The domain ID of the other end of the connection
+ * \retval EAGAIN The ring did not have enough space for the packet.
+ * The ring has not been modified
+ * \param[in,out] gnttab Pointer to enough memory for a grant table. We make
+ * this a function parameter so that we will take less
+ * stack space.
+ * \retval EINVAL mbufc was corrupt or not convertible into a pkt
+ */
static int
-vif_sysctl_handler(SYSCTL_HANDLER_ARGS)
+xnb_send(netif_rx_back_ring_t *ring, domid_t otherend, const struct mbuf *mbufc,
+ gnttab_copy_table gnttab)
{
- device_t dev = (device_t)arg1;
- netif_t *netif = (netif_t *)device_get_ivars(dev);
- const char *value;
- char *buf = NULL;
- int err;
-
- switch (arg2) {
- case VIF_SYSCTL_DOMID:
- return sysctl_handle_int(oidp, NULL, netif->domid, req);
- case VIF_SYSCTL_HANDLE:
- return sysctl_handle_int(oidp, NULL, netif->handle, req);
- case VIF_SYSCTL_TXRING:
- case VIF_SYSCTL_RXRING:
- value = buf = vif_sysctl_ring_info(netif, arg2);
- break;
- default:
- return (EINVAL);
+ struct xnb_pkt pkt;
+ int error, n_entries, n_reqs;
+ RING_IDX space;
+
+ space = ring->sring->req_prod - ring->req_cons;
+ error = xnb_mbufc2pkt(mbufc, &pkt, ring->rsp_prod_pvt, space);
+ if (error != 0)
+ return error;
+ n_entries = xnb_rxpkt2gnttab(&pkt, mbufc, gnttab, ring, otherend);
+ if (n_entries != 0) {
+ int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
+ gnttab, n_entries);
+ KASSERT(hv_ret == 0, ("HYPERVISOR_grant_table_op returned %d\n",
+ hv_ret));
}
- err = SYSCTL_OUT(req, value, strlen(value));
- if (buf != NULL)
- free(buf, M_DEVBUF);
+ n_reqs = xnb_rxpkt2rsp(&pkt, gnttab, n_entries, ring);
- return err;
+ return 0;
}
-/* Newbus vif device driver probe */
-static int
-vif_probe(device_t dev)
+static void
+xnb_start(struct ifnet *ifp)
{
- DDPRINTF("vif%d\n", device_get_unit(dev));
- return 0;
+ struct xnb_softc *xnb;
+
+ xnb = ifp->if_softc;
+ mtx_lock(&xnb->rx_lock);
+ xnb_start_locked(ifp);
+ mtx_unlock(&xnb->rx_lock);
}
-/* Newbus vif device driver attach */
-static int
-vif_attach(device_t dev)
+/* equivalent of network_open() in Linux */
+static void
+xnb_ifinit_locked(struct xnb_softc *xnb)
{
- netif_t *netif = (netif_t *)device_get_ivars(dev);
- uint8_t mac[ETHER_ADDR_LEN];
-
- DDPRINTF("%s\n", IFNAME(netif));
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "domid", CTLTYPE_INT|CTLFLAG_RD,
- dev, VIF_SYSCTL_DOMID, vif_sysctl_handler, "I",
- "domid of frontend");
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "handle", CTLTYPE_INT|CTLFLAG_RD,
- dev, VIF_SYSCTL_HANDLE, vif_sysctl_handler, "I",
- "handle of frontend");
-#ifdef XEN_NETBACK_DEBUG
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "txring", CTLTYPE_STRING | CTLFLAG_RD,
- dev, VIF_SYSCTL_TXRING, vif_sysctl_handler, "A",
- "tx ring info");
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "rxring", CTLTYPE_STRING | CTLFLAG_RD,
- dev, VIF_SYSCTL_RXRING, vif_sysctl_handler, "A",
- "rx ring info");
-#endif
+ struct ifnet *ifp;
- memset(mac, 0xff, sizeof(mac));
- mac[0] &= ~0x01;
-
- ether_ifattach(netif->ifp, mac);
- netif->attached = 1;
+ ifp = xnb->xnb_ifp;
- connect(netif);
+ mtx_assert(&xnb->sc_lock, MA_OWNED);
- if (netif->bridge) {
- DPRINTF("Adding %s to bridge %s\n", IFNAME(netif), netif->bridge);
- int err = add_to_bridge(netif->ifp, netif->bridge);
- if (err) {
- WPRINTF("Error adding %s to %s; err=%d\n",
- IFNAME(netif), netif->bridge, err);
- }
- }
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ return;
+
+ xnb_stop(xnb);
- return bus_generic_attach(dev);
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ if_link_state_change(ifp, LINK_STATE_UP);
}
-/* Newbus vif device driver detach */
-static int
-vif_detach(device_t dev)
-{
- netif_t *netif = (netif_t *)device_get_ivars(dev);
- struct ifnet *ifp = netif->ifp;
- DDPRINTF("%s\n", IFNAME(netif));
+static void
+xnb_ifinit(void *xsc)
+{
+ struct xnb_softc *xnb = xsc;
- /* Tell the stack that the interface is no longer active */
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ mtx_lock(&xnb->sc_lock);
+ xnb_ifinit_locked(xnb);
+ mtx_unlock(&xnb->sc_lock);
+}
- ether_ifdetach(ifp);
- bus_generic_detach(dev);
+/**
+ * Read the 'mac' node at the given device's node in the store, and parse that
+ * as colon-separated octets, placing result the given mac array. mac must be
+ * a preallocated array of length ETHER_ADDR_LEN ETH_ALEN (as declared in
+ * net/ethernet.h).
+ * Return 0 on success, or errno on error.
+ */
+static int
+xen_net_read_mac(device_t dev, uint8_t mac[])
+{
+ char *s, *e, *macstr;
+ const char *path;
+ int error = 0;
+ int i;
+
+ path = xenbus_get_node(dev);
+ error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
+ if (error != 0) {
+ xenbus_dev_fatal(dev, error, "parsing %s/mac", path);
+ } else {
+ s = macstr;
+ for (i = 0; i < ETHER_ADDR_LEN; i++) {
+ mac[i] = strtoul(s, &e, 16);
+ if (s == e || (e[0] != ':' && e[0] != 0)) {
+ error = ENOENT;
+ break;
+ }
+ s = &e[1];
+ }
+ free(macstr, M_XENBUS);
+ }
+ return error;
+}
- netif->attached = 0;
- netif_put(netif);
+/**
+ * Callback used by the generic networking code to tell us when our carrier
+ * state has changed. Since we don't have a physical carrier, we don't care
+ */
+static int
+xnb_ifmedia_upd(struct ifnet *ifp)
+{
+ return (0);
+}
- return 0;
+/**
+ * Callback used by the generic networking code to ask us what our carrier
+ * state is. Since we don't have a physical carrier, this is very simple
+ */
+static void
+xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
+ ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
}
-static device_method_t vif_methods[] = {
+
+/*---------------------------- NewBus Registration ---------------------------*/
+static device_method_t xnb_methods[] = {
/* Device interface */
- DEVMETHOD(device_probe, vif_probe),
- DEVMETHOD(device_attach, vif_attach),
- DEVMETHOD(device_detach, vif_detach),
+ DEVMETHOD(device_probe, xnb_probe),
+ DEVMETHOD(device_attach, xnb_attach),
+ DEVMETHOD(device_detach, xnb_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
- DEVMETHOD(device_suspend, bus_generic_suspend),
- DEVMETHOD(device_resume, bus_generic_resume),
- {0, 0}
-};
+ DEVMETHOD(device_suspend, xnb_suspend),
+ DEVMETHOD(device_resume, xnb_resume),
-static devclass_t vif_devclass;
+ /* Xenbus interface */
+ DEVMETHOD(xenbus_otherend_changed, xnb_frontend_changed),
-static driver_t vif_driver = {
- "vif",
- vif_methods,
- 0,
+ { 0, 0 }
};
-DRIVER_MODULE(vif, nexus, vif_driver, vif_devclass, 0, 0);
+static driver_t xnb_driver = {
+ "xnb",
+ xnb_methods,
+ sizeof(struct xnb_softc),
+};
+devclass_t xnb_devclass;
+DRIVER_MODULE(xnb, xenbusb_back, xnb_driver, xnb_devclass, 0, 0);
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: t
- * End:
- */
+
+/*-------------------------- Unit Tests -------------------------------------*/
+#ifdef XNB_DEBUG
+#include "netback_unit_tests.c"
+#endif
diff --git a/sys/dev/xen/netback/netback_unit_tests.c b/sys/dev/xen/netback/netback_unit_tests.c
new file mode 100644
index 0000000..a3b0bc8
--- /dev/null
+++ b/sys/dev/xen/netback/netback_unit_tests.c
@@ -0,0 +1,2530 @@
+/*-
+ * Copyright (c) 2009-2011 Spectra Logic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * Authors: Justin T. Gibbs (Spectra Logic Corporation)
+ * Alan Somers (Spectra Logic Corporation)
+ * John Suykerbuyk (Spectra Logic Corporation)
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/**
+ * \file netback_unit_tests.c
+ *
+ * \brief Unit tests for the Xen netback driver.
+ *
+ * Due to the driver's use of static functions, these tests cannot be compiled
+ * standalone; they must be #include'd from the driver's .c file.
+ */
+
+
+/** Helper macro used to snprintf to a buffer and update the buffer pointer */
+#define SNCATF(buffer, buflen, ...) do { \
+ size_t new_chars = snprintf(buffer, buflen, __VA_ARGS__); \
+ buffer += new_chars; \
+ /* be careful; snprintf's return value can be > buflen */ \
+ buflen -= MIN(buflen, new_chars); \
+} while (0)
+
+/* STRINGIFY and TOSTRING are used only to help turn __LINE__ into a string */
+#define STRINGIFY(x) #x
+#define TOSTRING(x) STRINGIFY(x)
+
+/**
+ * Writes an error message to buffer if cond is false, and returns true
+ * iff the assertion failed. Note the implied parameters buffer and
+ * buflen
+ */
+#define XNB_ASSERT(cond) ({ \
+ int passed = (cond); \
+ char *_buffer = (buffer); \
+ size_t _buflen = (buflen); \
+ if (! passed) { \
+ strlcat(_buffer, __func__, _buflen); \
+ strlcat(_buffer, ":" TOSTRING(__LINE__) \
+ " Assertion Error: " #cond "\n", _buflen); \
+ } \
+ ! passed; })
+
+
+/**
+ * The signature used by all testcases. If the test writes anything
+ * to buffer, then it will be considered a failure
+ * \param buffer Return storage for error messages
+ * \param buflen The space available in the buffer
+ */
+typedef void testcase_t(char *buffer, size_t buflen);
+
+/**
+ * Signature used by setup functions
+ * \return nonzero on error
+ */
+typedef int setup_t(void);
+
+typedef void teardown_t(void);
+
+/** A simple test fixture comprising setup, teardown, and test */
+struct test_fixture {
+ /** Will be run before the test to allocate and initialize variables */
+ setup_t *setup;
+
+ /** Will be run if setup succeeds */
+ testcase_t *test;
+
+ /** Cleans up test data whether or not the setup suceeded*/
+ teardown_t *teardown;
+};
+
+typedef struct test_fixture test_fixture_t;
+
+static void xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len,
+ uint16_t ip_id, uint16_t ip_p,
+ uint16_t ip_off, uint16_t ip_sum);
+static void xnb_fill_tcp(struct mbuf *m);
+static int xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags);
+static int xnb_unit_test_runner(test_fixture_t const tests[], int ntests,
+ char *buffer, size_t buflen);
+
+static int __unused
+null_setup(void) { return 0; }
+
+static void __unused
+null_teardown(void) { }
+
+static setup_t setup_pvt_data;
+static teardown_t teardown_pvt_data;
+static testcase_t xnb_ring2pkt_emptyring;
+static testcase_t xnb_ring2pkt_1req;
+static testcase_t xnb_ring2pkt_2req;
+static testcase_t xnb_ring2pkt_3req;
+static testcase_t xnb_ring2pkt_extra;
+static testcase_t xnb_ring2pkt_partial;
+static testcase_t xnb_ring2pkt_wraps;
+static testcase_t xnb_txpkt2rsp_emptypkt;
+static testcase_t xnb_txpkt2rsp_1req;
+static testcase_t xnb_txpkt2rsp_extra;
+static testcase_t xnb_txpkt2rsp_long;
+static testcase_t xnb_txpkt2rsp_invalid;
+static testcase_t xnb_txpkt2rsp_error;
+static testcase_t xnb_txpkt2rsp_wraps;
+static testcase_t xnb_pkt2mbufc_empty;
+static testcase_t xnb_pkt2mbufc_short;
+static testcase_t xnb_pkt2mbufc_csum;
+static testcase_t xnb_pkt2mbufc_1cluster;
+static testcase_t xnb_pkt2mbufc_largecluster;
+static testcase_t xnb_pkt2mbufc_2cluster;
+static testcase_t xnb_txpkt2gnttab_empty;
+static testcase_t xnb_txpkt2gnttab_short;
+static testcase_t xnb_txpkt2gnttab_2req;
+static testcase_t xnb_txpkt2gnttab_2cluster;
+static testcase_t xnb_update_mbufc_short;
+static testcase_t xnb_update_mbufc_2req;
+static testcase_t xnb_update_mbufc_2cluster;
+static testcase_t xnb_mbufc2pkt_empty;
+static testcase_t xnb_mbufc2pkt_short;
+static testcase_t xnb_mbufc2pkt_1cluster;
+static testcase_t xnb_mbufc2pkt_2short;
+static testcase_t xnb_mbufc2pkt_long;
+static testcase_t xnb_mbufc2pkt_extra;
+static testcase_t xnb_mbufc2pkt_nospace;
+static testcase_t xnb_rxpkt2gnttab_empty;
+static testcase_t xnb_rxpkt2gnttab_short;
+static testcase_t xnb_rxpkt2gnttab_2req;
+static testcase_t xnb_rxpkt2rsp_empty;
+static testcase_t xnb_rxpkt2rsp_short;
+static testcase_t xnb_rxpkt2rsp_extra;
+static testcase_t xnb_rxpkt2rsp_2short;
+static testcase_t xnb_rxpkt2rsp_2slots;
+static testcase_t xnb_rxpkt2rsp_copyerror;
+/* TODO: add test cases for xnb_add_mbuf_cksum for IPV6 tcp and udp */
+static testcase_t xnb_add_mbuf_cksum_arp;
+static testcase_t xnb_add_mbuf_cksum_tcp;
+static testcase_t xnb_add_mbuf_cksum_udp;
+static testcase_t xnb_add_mbuf_cksum_icmp;
+static testcase_t xnb_add_mbuf_cksum_tcp_swcksum;
+static testcase_t xnb_sscanf_llu;
+static testcase_t xnb_sscanf_lld;
+static testcase_t xnb_sscanf_hhu;
+static testcase_t xnb_sscanf_hhd;
+static testcase_t xnb_sscanf_hhn;
+
+/** Private data used by unit tests */
+static struct {
+ gnttab_copy_table gnttab;
+ netif_rx_back_ring_t rxb;
+ netif_rx_front_ring_t rxf;
+ netif_tx_back_ring_t txb;
+ netif_tx_front_ring_t txf;
+ struct ifnet* ifp;
+ netif_rx_sring_t* rxs;
+ netif_tx_sring_t* txs;
+} xnb_unit_pvt;
+
+static inline void safe_m_freem(struct mbuf **ppMbuf) {
+ if (*ppMbuf != NULL) {
+ m_freem(*ppMbuf);
+ *ppMbuf = NULL;
+ }
+}
+
+/**
+ * The unit test runner. It will run every supplied test and return an
+ * output message as a string
+ * \param tests An array of tests. Every test will be attempted.
+ * \param ntests The length of tests
+ * \param buffer Return storage for the result string
+ * \param buflen The length of buffer
+ * \return The number of tests that failed
+ */
+static int
+xnb_unit_test_runner(test_fixture_t const tests[], int ntests, char *buffer,
+ size_t buflen)
+{
+ int i;
+ int n_passes;
+ int n_failures = 0;
+
+ for (i = 0; i < ntests; i++) {
+ int error = tests[i].setup();
+ if (error != 0) {
+ SNCATF(buffer, buflen,
+ "Setup failed for test idx %d\n", i);
+ n_failures++;
+ } else {
+ size_t new_chars;
+
+ tests[i].test(buffer, buflen);
+ new_chars = strnlen(buffer, buflen);
+ buffer += new_chars;
+ buflen -= new_chars;
+
+ if (new_chars > 0) {
+ n_failures++;
+ }
+ }
+ tests[i].teardown();
+ }
+
+ n_passes = ntests - n_failures;
+ if (n_passes > 0) {
+ SNCATF(buffer, buflen, "%d Tests Passed\n", n_passes);
+ }
+ if (n_failures > 0) {
+ SNCATF(buffer, buflen, "%d Tests FAILED\n", n_failures);
+ }
+
+ return n_failures;
+}
+
+/** Number of unit tests. Must match the length of the tests array below */
+#define TOTAL_TESTS (53)
+/**
+ * Max memory available for returning results. 400 chars/test should give
+ * enough space for a five line error message for every test
+ */
+#define TOTAL_BUFLEN (400 * TOTAL_TESTS + 2)
+
+/**
+ * Called from userspace by a sysctl. Runs all internal unit tests, and
+ * returns the results to userspace as a string
+ * \param oidp unused
+ * \param arg1 pointer to an xnb_softc for a specific xnb device
+ * \param arg2 unused
+ * \param req sysctl access structure
+ * \return a string via the special SYSCTL_OUT macro.
+ */
+
+static int
+xnb_unit_test_main(SYSCTL_HANDLER_ARGS) {
+ test_fixture_t const tests[TOTAL_TESTS] = {
+ {setup_pvt_data, xnb_ring2pkt_emptyring, teardown_pvt_data},
+ {setup_pvt_data, xnb_ring2pkt_1req, teardown_pvt_data},
+ {setup_pvt_data, xnb_ring2pkt_2req, teardown_pvt_data},
+ {setup_pvt_data, xnb_ring2pkt_3req, teardown_pvt_data},
+ {setup_pvt_data, xnb_ring2pkt_extra, teardown_pvt_data},
+ {setup_pvt_data, xnb_ring2pkt_partial, teardown_pvt_data},
+ {setup_pvt_data, xnb_ring2pkt_wraps, teardown_pvt_data},
+ {setup_pvt_data, xnb_txpkt2rsp_emptypkt, teardown_pvt_data},
+ {setup_pvt_data, xnb_txpkt2rsp_1req, teardown_pvt_data},
+ {setup_pvt_data, xnb_txpkt2rsp_extra, teardown_pvt_data},
+ {setup_pvt_data, xnb_txpkt2rsp_long, teardown_pvt_data},
+ {setup_pvt_data, xnb_txpkt2rsp_invalid, teardown_pvt_data},
+ {setup_pvt_data, xnb_txpkt2rsp_error, teardown_pvt_data},
+ {setup_pvt_data, xnb_txpkt2rsp_wraps, teardown_pvt_data},
+ {setup_pvt_data, xnb_pkt2mbufc_empty, teardown_pvt_data},
+ {setup_pvt_data, xnb_pkt2mbufc_short, teardown_pvt_data},
+ {setup_pvt_data, xnb_pkt2mbufc_csum, teardown_pvt_data},
+ {setup_pvt_data, xnb_pkt2mbufc_1cluster, teardown_pvt_data},
+ {setup_pvt_data, xnb_pkt2mbufc_largecluster, teardown_pvt_data},
+ {setup_pvt_data, xnb_pkt2mbufc_2cluster, teardown_pvt_data},
+ {setup_pvt_data, xnb_txpkt2gnttab_empty, teardown_pvt_data},
+ {setup_pvt_data, xnb_txpkt2gnttab_short, teardown_pvt_data},
+ {setup_pvt_data, xnb_txpkt2gnttab_2req, teardown_pvt_data},
+ {setup_pvt_data, xnb_txpkt2gnttab_2cluster, teardown_pvt_data},
+ {setup_pvt_data, xnb_update_mbufc_short, teardown_pvt_data},
+ {setup_pvt_data, xnb_update_mbufc_2req, teardown_pvt_data},
+ {setup_pvt_data, xnb_update_mbufc_2cluster, teardown_pvt_data},
+ {setup_pvt_data, xnb_mbufc2pkt_empty, teardown_pvt_data},
+ {setup_pvt_data, xnb_mbufc2pkt_short, teardown_pvt_data},
+ {setup_pvt_data, xnb_mbufc2pkt_1cluster, teardown_pvt_data},
+ {setup_pvt_data, xnb_mbufc2pkt_2short, teardown_pvt_data},
+ {setup_pvt_data, xnb_mbufc2pkt_long, teardown_pvt_data},
+ {setup_pvt_data, xnb_mbufc2pkt_extra, teardown_pvt_data},
+ {setup_pvt_data, xnb_mbufc2pkt_nospace, teardown_pvt_data},
+ {setup_pvt_data, xnb_rxpkt2gnttab_empty, teardown_pvt_data},
+ {setup_pvt_data, xnb_rxpkt2gnttab_short, teardown_pvt_data},
+ {setup_pvt_data, xnb_rxpkt2gnttab_2req, teardown_pvt_data},
+ {setup_pvt_data, xnb_rxpkt2rsp_empty, teardown_pvt_data},
+ {setup_pvt_data, xnb_rxpkt2rsp_short, teardown_pvt_data},
+ {setup_pvt_data, xnb_rxpkt2rsp_extra, teardown_pvt_data},
+ {setup_pvt_data, xnb_rxpkt2rsp_2short, teardown_pvt_data},
+ {setup_pvt_data, xnb_rxpkt2rsp_2slots, teardown_pvt_data},
+ {setup_pvt_data, xnb_rxpkt2rsp_copyerror, teardown_pvt_data},
+ {null_setup, xnb_add_mbuf_cksum_arp, null_teardown},
+ {null_setup, xnb_add_mbuf_cksum_icmp, null_teardown},
+ {null_setup, xnb_add_mbuf_cksum_tcp, null_teardown},
+ {null_setup, xnb_add_mbuf_cksum_tcp_swcksum, null_teardown},
+ {null_setup, xnb_add_mbuf_cksum_udp, null_teardown},
+ {null_setup, xnb_sscanf_hhd, null_teardown},
+ {null_setup, xnb_sscanf_hhu, null_teardown},
+ {null_setup, xnb_sscanf_lld, null_teardown},
+ {null_setup, xnb_sscanf_llu, null_teardown},
+ {null_setup, xnb_sscanf_hhn, null_teardown},
+ };
+ /**
+ * results is static so that the data will persist after this function
+ * returns. The sysctl code expects us to return a constant string.
+ * \todo: the static variable is not thread safe. Put a mutex around
+ * it.
+ */
+ static char results[TOTAL_BUFLEN];
+
+ /* empty the result strings */
+ results[0] = 0;
+ xnb_unit_test_runner(tests, TOTAL_TESTS, results, TOTAL_BUFLEN);
+
+ return (SYSCTL_OUT(req, results, strnlen(results, TOTAL_BUFLEN)));
+}
+
+static int
+setup_pvt_data(void)
+{
+ int error = 0;
+
+ bzero(xnb_unit_pvt.gnttab, sizeof(xnb_unit_pvt.gnttab));
+
+ xnb_unit_pvt.txs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
+ if (xnb_unit_pvt.txs != NULL) {
+ SHARED_RING_INIT(xnb_unit_pvt.txs);
+ BACK_RING_INIT(&xnb_unit_pvt.txb, xnb_unit_pvt.txs, PAGE_SIZE);
+ FRONT_RING_INIT(&xnb_unit_pvt.txf, xnb_unit_pvt.txs, PAGE_SIZE);
+ } else {
+ error = 1;
+ }
+
+ xnb_unit_pvt.ifp = if_alloc(IFT_ETHER);
+ if (xnb_unit_pvt.ifp == NULL) {
+ error = 1;
+ }
+
+ xnb_unit_pvt.rxs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
+ if (xnb_unit_pvt.rxs != NULL) {
+ SHARED_RING_INIT(xnb_unit_pvt.rxs);
+ BACK_RING_INIT(&xnb_unit_pvt.rxb, xnb_unit_pvt.rxs, PAGE_SIZE);
+ FRONT_RING_INIT(&xnb_unit_pvt.rxf, xnb_unit_pvt.rxs, PAGE_SIZE);
+ } else {
+ error = 1;
+ }
+
+ return error;
+}
+
+static void
+teardown_pvt_data(void)
+{
+ if (xnb_unit_pvt.txs != NULL) {
+ free(xnb_unit_pvt.txs, M_XENNETBACK);
+ }
+ if (xnb_unit_pvt.rxs != NULL) {
+ free(xnb_unit_pvt.rxs, M_XENNETBACK);
+ }
+ if (xnb_unit_pvt.ifp != NULL) {
+ if_free(xnb_unit_pvt.ifp);
+ }
+}
+
+/**
+ * Verify that xnb_ring2pkt will not consume any requests from an empty ring
+ */
+static void
+xnb_ring2pkt_emptyring(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int num_consumed;
+
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+ XNB_ASSERT(num_consumed == 0);
+}
+
+/**
+ * Verify that xnb_ring2pkt can convert a single request packet correctly
+ */
+static void
+xnb_ring2pkt_1req(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int num_consumed;
+ struct netif_tx_request *req;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+
+ req->flags = 0;
+ req->size = 69; /* arbitrary number for test */
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+ XNB_ASSERT(num_consumed == 1);
+ XNB_ASSERT(pkt.size == 69);
+ XNB_ASSERT(pkt.car_size == 69);
+ XNB_ASSERT(pkt.flags == 0);
+ XNB_ASSERT(xnb_pkt_is_valid(&pkt));
+ XNB_ASSERT(pkt.list_len == 1);
+ XNB_ASSERT(pkt.car == 0);
+}
+
+/**
+ * Verify that xnb_ring2pkt can convert a two request packet correctly.
+ * This tests handling of the MORE_DATA flag and cdr
+ */
+static void
+xnb_ring2pkt_2req(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int num_consumed;
+ struct netif_tx_request *req;
+ RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = NETTXF_more_data;
+ req->size = 100;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = 0;
+ req->size = 40;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+ XNB_ASSERT(num_consumed == 2);
+ XNB_ASSERT(pkt.size == 100);
+ XNB_ASSERT(pkt.car_size == 60);
+ XNB_ASSERT(pkt.flags == 0);
+ XNB_ASSERT(xnb_pkt_is_valid(&pkt));
+ XNB_ASSERT(pkt.list_len == 2);
+ XNB_ASSERT(pkt.car == start_idx);
+ XNB_ASSERT(pkt.cdr == start_idx + 1);
+}
+
+/**
+ * Verify that xnb_ring2pkt can convert a three request packet correctly
+ */
+static void
+xnb_ring2pkt_3req(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int num_consumed;
+ struct netif_tx_request *req;
+ RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = NETTXF_more_data;
+ req->size = 200;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = NETTXF_more_data;
+ req->size = 40;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = 0;
+ req->size = 50;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+ XNB_ASSERT(num_consumed == 3);
+ XNB_ASSERT(pkt.size == 200);
+ XNB_ASSERT(pkt.car_size == 110);
+ XNB_ASSERT(pkt.flags == 0);
+ XNB_ASSERT(xnb_pkt_is_valid(&pkt));
+ XNB_ASSERT(pkt.list_len == 3);
+ XNB_ASSERT(pkt.car == start_idx);
+ XNB_ASSERT(pkt.cdr == start_idx + 1);
+ XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
+}
+
+/**
+ * Verify that xnb_ring2pkt can read extra inf
+ */
+static void
+xnb_ring2pkt_extra(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int num_consumed;
+ struct netif_tx_request *req;
+ struct netif_extra_info *ext;
+ RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = NETTXF_extra_info | NETTXF_more_data;
+ req->size = 150;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ ext = (struct netif_extra_info*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ ext->flags = 0;
+ ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
+ ext->u.gso.size = 250;
+ ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
+ ext->u.gso.features = 0;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = 0;
+ req->size = 50;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+ XNB_ASSERT(num_consumed == 3);
+ XNB_ASSERT(pkt.extra.flags == 0);
+ XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
+ XNB_ASSERT(pkt.extra.u.gso.size == 250);
+ XNB_ASSERT(pkt.extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4);
+ XNB_ASSERT(pkt.size == 150);
+ XNB_ASSERT(pkt.car_size == 100);
+ XNB_ASSERT(pkt.flags == NETTXF_extra_info);
+ XNB_ASSERT(xnb_pkt_is_valid(&pkt));
+ XNB_ASSERT(pkt.list_len == 2);
+ XNB_ASSERT(pkt.car == start_idx);
+ XNB_ASSERT(pkt.cdr == start_idx + 2);
+ XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr) == req);
+}
+
+/**
+ * Verify that xnb_ring2pkt will consume no requests if the entire packet is
+ * not yet in the ring
+ */
+static void
+xnb_ring2pkt_partial(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int num_consumed;
+ struct netif_tx_request *req;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = NETTXF_more_data;
+ req->size = 150;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+ XNB_ASSERT(num_consumed == 0);
+ XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
+}
+
+/**
+ * Verity that xnb_ring2pkt can read a packet whose requests wrap around
+ * the end of the ring
+ */
+static void
+xnb_ring2pkt_wraps(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int num_consumed;
+ struct netif_tx_request *req;
+ unsigned int rsize;
+
+ /*
+ * Manually tweak the ring indices to create a ring with no responses
+ * and the next request slot at position 2 from the end
+ */
+ rsize = RING_SIZE(&xnb_unit_pvt.txf);
+ xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
+ xnb_unit_pvt.txf.rsp_cons = rsize - 2;
+ xnb_unit_pvt.txs->req_prod = rsize - 2;
+ xnb_unit_pvt.txs->req_event = rsize - 1;
+ xnb_unit_pvt.txs->rsp_prod = rsize - 2;
+ xnb_unit_pvt.txs->rsp_event = rsize - 1;
+ xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
+ xnb_unit_pvt.txb.req_cons = rsize - 2;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = NETTXF_more_data;
+ req->size = 550;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = NETTXF_more_data;
+ req->size = 100;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = 0;
+ req->size = 50;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+ XNB_ASSERT(num_consumed == 3);
+ XNB_ASSERT(xnb_pkt_is_valid(&pkt));
+ XNB_ASSERT(pkt.list_len == 3);
+ XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
+}
+
+
+/**
+ * xnb_txpkt2rsp should do nothing for an empty packet
+ */
+static void
+xnb_txpkt2rsp_emptypkt(char *buffer, size_t buflen)
+{
+ int num_consumed;
+ struct xnb_pkt pkt;
+ netif_tx_back_ring_t txb_backup = xnb_unit_pvt.txb;
+ netif_tx_sring_t txs_backup = *xnb_unit_pvt.txs;
+ pkt.list_len = 0;
+
+ /* must call xnb_ring2pkt just to intialize pkt */
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+ xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
+ XNB_ASSERT(
+ memcmp(&txb_backup, &xnb_unit_pvt.txb, sizeof(txb_backup)) == 0);
+ XNB_ASSERT(
+ memcmp(&txs_backup, xnb_unit_pvt.txs, sizeof(txs_backup)) == 0);
+}
+
+/**
+ * xnb_txpkt2rsp responding to one request
+ */
+static void
+xnb_txpkt2rsp_1req(char *buffer, size_t buflen)
+{
+ uint16_t num_consumed;
+ struct xnb_pkt pkt;
+ struct netif_tx_request *req;
+ struct netif_tx_response *rsp;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->size = 1000;
+ req->flags = 0;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+ xnb_unit_pvt.txb.req_cons += num_consumed;
+
+ xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
+
+ XNB_ASSERT(
+ xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
+ XNB_ASSERT(rsp->id == req->id);
+ XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
+};
+
+/**
+ * xnb_txpkt2rsp responding to 1 data request and 1 extra info
+ */
+static void
+xnb_txpkt2rsp_extra(char *buffer, size_t buflen)
+{
+ uint16_t num_consumed;
+ struct xnb_pkt pkt;
+ struct netif_tx_request *req;
+ netif_extra_info_t *ext;
+ struct netif_tx_response *rsp;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->size = 1000;
+ req->flags = NETTXF_extra_info;
+ req->id = 69;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
+ ext->flags = 0;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+ xnb_unit_pvt.txb.req_cons += num_consumed;
+
+ xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
+
+ XNB_ASSERT(
+ xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
+
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
+ XNB_ASSERT(rsp->id == req->id);
+ XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
+
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
+ xnb_unit_pvt.txf.rsp_cons + 1);
+ XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
+};
+
+/**
+ * xnb_pkg2rsp responding to 3 data requests and 1 extra info
+ */
+static void
+xnb_txpkt2rsp_long(char *buffer, size_t buflen)
+{
+ uint16_t num_consumed;
+ struct xnb_pkt pkt;
+ struct netif_tx_request *req;
+ netif_extra_info_t *ext;
+ struct netif_tx_response *rsp;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->size = 1000;
+ req->flags = NETTXF_extra_info | NETTXF_more_data;
+ req->id = 254;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
+ ext->flags = 0;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->size = 300;
+ req->flags = NETTXF_more_data;
+ req->id = 1034;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->size = 400;
+ req->flags = 0;
+ req->id = 34;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+ xnb_unit_pvt.txb.req_cons += num_consumed;
+
+ xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
+
+ XNB_ASSERT(
+ xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
+
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
+ XNB_ASSERT(rsp->id ==
+ RING_GET_REQUEST(&xnb_unit_pvt.txf, 0)->id);
+ XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
+
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
+ xnb_unit_pvt.txf.rsp_cons + 1);
+ XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
+
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
+ xnb_unit_pvt.txf.rsp_cons + 2);
+ XNB_ASSERT(rsp->id ==
+ RING_GET_REQUEST(&xnb_unit_pvt.txf, 2)->id);
+ XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
+
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
+ xnb_unit_pvt.txf.rsp_cons + 3);
+ XNB_ASSERT(rsp->id ==
+ RING_GET_REQUEST(&xnb_unit_pvt.txf, 3)->id);
+ XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
+}
+
+/**
+ * xnb_txpkt2rsp responding to an invalid packet.
+ * Note: this test will result in an error message being printed to the console
+ * such as:
+ * xnb(xnb_ring2pkt:1306): Unknown extra info type 255. Discarding packet
+ */
+static void
+xnb_txpkt2rsp_invalid(char *buffer, size_t buflen)
+{
+ uint16_t num_consumed;
+ struct xnb_pkt pkt;
+ struct netif_tx_request *req;
+ netif_extra_info_t *ext;
+ struct netif_tx_response *rsp;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->size = 1000;
+ req->flags = NETTXF_extra_info;
+ req->id = 69;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ ext->type = 0xFF; /* Invalid extra type */
+ ext->flags = 0;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+ xnb_unit_pvt.txb.req_cons += num_consumed;
+ XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
+
+ xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
+
+ XNB_ASSERT(
+ xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
+
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
+ XNB_ASSERT(rsp->id == req->id);
+ XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
+
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
+ xnb_unit_pvt.txf.rsp_cons + 1);
+ XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
+};
+
+/**
+ * xnb_txpkt2rsp responding to one request which caused an error
+ */
+static void
+xnb_txpkt2rsp_error(char *buffer, size_t buflen)
+{
+ uint16_t num_consumed;
+ struct xnb_pkt pkt;
+ struct netif_tx_request *req;
+ struct netif_tx_response *rsp;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->size = 1000;
+ req->flags = 0;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+ xnb_unit_pvt.txb.req_cons += num_consumed;
+
+ xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 1);
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
+
+ XNB_ASSERT(
+ xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
+ XNB_ASSERT(rsp->id == req->id);
+ XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
+};
+
+/**
+ * xnb_txpkt2rsp's responses wrap around the end of the ring
+ */
+static void
+xnb_txpkt2rsp_wraps(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int num_consumed;
+ struct netif_tx_request *req;
+ struct netif_tx_response *rsp;
+ unsigned int rsize;
+
+ /*
+ * Manually tweak the ring indices to create a ring with no responses
+ * and the next request slot at position 2 from the end
+ */
+ rsize = RING_SIZE(&xnb_unit_pvt.txf);
+ xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
+ xnb_unit_pvt.txf.rsp_cons = rsize - 2;
+ xnb_unit_pvt.txs->req_prod = rsize - 2;
+ xnb_unit_pvt.txs->req_event = rsize - 1;
+ xnb_unit_pvt.txs->rsp_prod = rsize - 2;
+ xnb_unit_pvt.txs->rsp_event = rsize - 1;
+ xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
+ xnb_unit_pvt.txb.req_cons = rsize - 2;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = NETTXF_more_data;
+ req->size = 550;
+ req->id = 1;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = NETTXF_more_data;
+ req->size = 100;
+ req->id = 2;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = 0;
+ req->size = 50;
+ req->id = 3;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+
+ xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
+
+ XNB_ASSERT(
+ xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
+ xnb_unit_pvt.txf.rsp_cons + 2);
+ XNB_ASSERT(rsp->id == req->id);
+ XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
+}
+
+
+/**
+ * Helper function used to setup pkt2mbufc tests
+ * \param size size in bytes of the single request to push to the ring
+ * \param flags optional flags to put in the netif request
+ * \param[out] pkt the returned packet object
+ * \return number of requests consumed from the ring
+ */
+static int
+xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags)
+{
+ struct netif_tx_request *req;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = flags;
+ req->size = size;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ return xnb_ring2pkt(pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+}
+
+/**
+ * xnb_pkt2mbufc on an empty packet
+ */
+static void
+xnb_pkt2mbufc_empty(char *buffer, size_t buflen)
+{
+ int num_consumed;
+ struct xnb_pkt pkt;
+ struct mbuf *pMbuf;
+ pkt.list_len = 0;
+
+ /* must call xnb_ring2pkt just to intialize pkt */
+ num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
+ xnb_unit_pvt.txb.req_cons);
+ pkt.size = 0;
+ pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
+ safe_m_freem(&pMbuf);
+}
+
+/**
+ * xnb_pkt2mbufc on short packet that can fit in an mbuf internal buffer
+ */
+static void
+xnb_pkt2mbufc_short(char *buffer, size_t buflen)
+{
+ const size_t size = MINCLSIZE - 1;
+ struct xnb_pkt pkt;
+ struct mbuf *pMbuf;
+
+ xnb_get1pkt(&pkt, size, 0);
+
+ pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
+ XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
+ safe_m_freem(&pMbuf);
+}
+
+/**
+ * xnb_pkt2mbufc on short packet whose checksum was validated by the netfron
+ */
+static void
+xnb_pkt2mbufc_csum(char *buffer, size_t buflen)
+{
+ const size_t size = MINCLSIZE - 1;
+ struct xnb_pkt pkt;
+ struct mbuf *pMbuf;
+
+ xnb_get1pkt(&pkt, size, NETTXF_data_validated);
+
+ pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
+ XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
+ XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_CHECKED);
+ XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_VALID);
+ XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID);
+ XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR);
+ safe_m_freem(&pMbuf);
+}
+
+/**
+ * xnb_pkt2mbufc on packet that can fit in one cluster
+ */
+static void
+xnb_pkt2mbufc_1cluster(char *buffer, size_t buflen)
+{
+ const size_t size = MINCLSIZE;
+ struct xnb_pkt pkt;
+ struct mbuf *pMbuf;
+
+ xnb_get1pkt(&pkt, size, 0);
+
+ pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
+ XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
+ safe_m_freem(&pMbuf);
+}
+
+/**
+ * xnb_pkt2mbufc on packet that cannot fit in one regular cluster
+ */
+static void
+xnb_pkt2mbufc_largecluster(char *buffer, size_t buflen)
+{
+ const size_t size = MCLBYTES + 1;
+ struct xnb_pkt pkt;
+ struct mbuf *pMbuf;
+
+ xnb_get1pkt(&pkt, size, 0);
+
+ pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
+ XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
+ safe_m_freem(&pMbuf);
+}
+
+/**
+ * xnb_pkt2mbufc on packet that cannot fit in one clusters
+ */
+static void
+xnb_pkt2mbufc_2cluster(char *buffer, size_t buflen)
+{
+ const size_t size = 2 * MCLBYTES + 1;
+ size_t space = 0;
+ struct xnb_pkt pkt;
+ struct mbuf *pMbuf;
+ struct mbuf *m;
+
+ xnb_get1pkt(&pkt, size, 0);
+
+ pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
+
+ for (m = pMbuf; m != NULL; m = m->m_next) {
+ space += M_TRAILINGSPACE(m);
+ }
+ XNB_ASSERT(space >= size);
+ safe_m_freem(&pMbuf);
+}
+
+/**
+ * xnb_txpkt2gnttab on an empty packet. Should return empty gnttab
+ */
+static void
+xnb_txpkt2gnttab_empty(char *buffer, size_t buflen)
+{
+ int n_entries;
+ struct xnb_pkt pkt;
+ struct mbuf *pMbuf;
+ pkt.list_len = 0;
+
+ /* must call xnb_ring2pkt just to intialize pkt */
+ xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
+ pkt.size = 0;
+ pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
+ n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
+ XNB_ASSERT(n_entries == 0);
+ safe_m_freem(&pMbuf);
+}
+
+/**
+ * xnb_txpkt2gnttab on a short packet, that can fit in one mbuf internal buffer
+ * and has one request
+ */
+static void
+xnb_txpkt2gnttab_short(char *buffer, size_t buflen)
+{
+ const size_t size = MINCLSIZE - 1;
+ int n_entries;
+ struct xnb_pkt pkt;
+ struct mbuf *pMbuf;
+
+ struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = 0;
+ req->size = size;
+ req->gref = 7;
+ req->offset = 17;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
+
+ pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
+ n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
+ XNB_ASSERT(n_entries == 1);
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
+ /* flags should indicate gref's for source */
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_source_gref);
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == req->offset);
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
+ mtod(pMbuf, vm_offset_t)));
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.u.gmfn ==
+ virt_to_mfn(mtod(pMbuf, vm_offset_t)));
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
+ safe_m_freem(&pMbuf);
+}
+
+/**
+ * xnb_txpkt2gnttab on a packet with two requests, that can fit into a single
+ * mbuf cluster
+ */
+static void
+xnb_txpkt2gnttab_2req(char *buffer, size_t buflen)
+{
+ int n_entries;
+ struct xnb_pkt pkt;
+ struct mbuf *pMbuf;
+
+ struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = NETTXF_more_data;
+ req->size = 1900;
+ req->gref = 7;
+ req->offset = 0;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = 0;
+ req->size = 500;
+ req->gref = 8;
+ req->offset = 0;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
+
+ pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
+ n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
+
+ XNB_ASSERT(n_entries == 2);
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 1400);
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
+ mtod(pMbuf, vm_offset_t)));
+
+ XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 500);
+ XNB_ASSERT(xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
+ mtod(pMbuf, vm_offset_t) + 1400));
+ safe_m_freem(&pMbuf);
+}
+
+/**
+ * xnb_txpkt2gnttab on a single request that spans two mbuf clusters
+ */
+static void
+xnb_txpkt2gnttab_2cluster(char *buffer, size_t buflen)
+{
+ int n_entries;
+ struct xnb_pkt pkt;
+ struct mbuf *pMbuf;
+ const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
+
+ struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = 0;
+ req->size = data_this_transaction;
+ req->gref = 8;
+ req->offset = 0;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+ xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
+
+ pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
+ n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
+
+ if (M_TRAILINGSPACE(pMbuf) == MCLBYTES) {
+ /* there should be three mbufs and three gnttab entries */
+ XNB_ASSERT(n_entries == 3);
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == MCLBYTES);
+ XNB_ASSERT(
+ xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
+ mtod(pMbuf, vm_offset_t)));
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
+
+ XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == MCLBYTES);
+ XNB_ASSERT(
+ xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
+ mtod(pMbuf->m_next, vm_offset_t)));
+ XNB_ASSERT(xnb_unit_pvt.gnttab[1].source.offset == MCLBYTES);
+
+ XNB_ASSERT(xnb_unit_pvt.gnttab[2].len == 1);
+ XNB_ASSERT(
+ xnb_unit_pvt.gnttab[2].dest.offset == virt_to_offset(
+ mtod(pMbuf->m_next, vm_offset_t)));
+ XNB_ASSERT(xnb_unit_pvt.gnttab[2].source.offset == 2 *
+ MCLBYTES);
+ } else if (M_TRAILINGSPACE(pMbuf) == 2 * MCLBYTES) {
+ /* there should be two mbufs and two gnttab entries */
+ XNB_ASSERT(n_entries == 2);
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 2 * MCLBYTES);
+ XNB_ASSERT(
+ xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
+ mtod(pMbuf, vm_offset_t)));
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
+
+ XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 1);
+ XNB_ASSERT(
+ xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
+ mtod(pMbuf->m_next, vm_offset_t)));
+ XNB_ASSERT(
+ xnb_unit_pvt.gnttab[1].source.offset == 2 * MCLBYTES);
+
+ } else {
+ /* should never get here */
+ XNB_ASSERT(0);
+ }
+ if (pMbuf != NULL)
+ m_freem(pMbuf);
+}
+
+
+/**
+ * xnb_update_mbufc on a short packet that only has one gnttab entry
+ */
+static void
+xnb_update_mbufc_short(char *buffer, size_t buflen)
+{
+ const size_t size = MINCLSIZE - 1;
+ int n_entries;
+ struct xnb_pkt pkt;
+ struct mbuf *pMbuf;
+
+ struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = 0;
+ req->size = size;
+ req->gref = 7;
+ req->offset = 17;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
+
+ pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
+ n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
+
+ /* Update grant table's status fields as the hypervisor call would */
+ xnb_unit_pvt.gnttab[0].status = GNTST_okay;
+
+ xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
+ XNB_ASSERT(pMbuf->m_len == size);
+ XNB_ASSERT(pMbuf->m_pkthdr.len == size);
+ safe_m_freem(&pMbuf);
+}
+
+/**
+ * xnb_update_mbufc on a packet with two requests, that can fit into a single
+ * mbuf cluster
+ */
+static void
+xnb_update_mbufc_2req(char *buffer, size_t buflen)
+{
+ int n_entries;
+ struct xnb_pkt pkt;
+ struct mbuf *pMbuf;
+
+ struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = NETTXF_more_data;
+ req->size = 1900;
+ req->gref = 7;
+ req->offset = 0;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = 0;
+ req->size = 500;
+ req->gref = 8;
+ req->offset = 0;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+
+ xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
+
+ pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
+ n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
+
+ /* Update grant table's status fields as the hypervisor call would */
+ xnb_unit_pvt.gnttab[0].status = GNTST_okay;
+ xnb_unit_pvt.gnttab[1].status = GNTST_okay;
+
+ xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
+ XNB_ASSERT(n_entries == 2);
+ XNB_ASSERT(pMbuf->m_pkthdr.len == 1900);
+ XNB_ASSERT(pMbuf->m_len == 1900);
+
+ safe_m_freem(&pMbuf);
+}
+
+/**
+ * xnb_update_mbufc on a single request that spans two mbuf clusters
+ */
+static void
+xnb_update_mbufc_2cluster(char *buffer, size_t buflen)
+{
+ int i;
+ int n_entries;
+ struct xnb_pkt pkt;
+ struct mbuf *pMbuf;
+ const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
+
+ struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->flags = 0;
+ req->size = data_this_transaction;
+ req->gref = 8;
+ req->offset = 0;
+ xnb_unit_pvt.txf.req_prod_pvt++;
+
+ RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
+ xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
+
+ pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
+ n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
+
+ /* Update grant table's status fields */
+ for (i = 0; i < n_entries; i++) {
+ xnb_unit_pvt.gnttab[0].status = GNTST_okay;
+ }
+ xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
+
+ if (n_entries == 3) {
+ /* there should be three mbufs and three gnttab entries */
+ XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
+ XNB_ASSERT(pMbuf->m_len == MCLBYTES);
+ XNB_ASSERT(pMbuf->m_next->m_len == MCLBYTES);
+ XNB_ASSERT(pMbuf->m_next->m_next->m_len == 1);
+ } else if (n_entries == 2) {
+ /* there should be two mbufs and two gnttab entries */
+ XNB_ASSERT(n_entries == 2);
+ XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
+ XNB_ASSERT(pMbuf->m_len == 2 * MCLBYTES);
+ XNB_ASSERT(pMbuf->m_next->m_len == 1);
+ } else {
+ /* should never get here */
+ XNB_ASSERT(0);
+ }
+ safe_m_freem(&pMbuf);
+}
+
+/** xnb_mbufc2pkt on an empty mbufc */
+static void
+xnb_mbufc2pkt_empty(char *buffer, size_t buflen) {
+ struct xnb_pkt pkt;
+ int free_slots = 64;
+ struct mbuf *mbuf;
+
+ mbuf = m_get(M_WAITOK, MT_DATA);
+ /*
+ * note: it is illegal to set M_PKTHDR on a mbuf with no data. Doing so
+ * will cause m_freem to segfault
+ */
+ XNB_ASSERT(mbuf->m_len == 0);
+
+ xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
+ XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
+
+ safe_m_freem(&mbuf);
+}
+
+/** xnb_mbufc2pkt on a short mbufc */
+static void
+xnb_mbufc2pkt_short(char *buffer, size_t buflen) {
+ struct xnb_pkt pkt;
+ size_t size = 128;
+ int free_slots = 64;
+ RING_IDX start = 9;
+ struct mbuf *mbuf;
+
+ mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
+ mbuf->m_flags |= M_PKTHDR;
+ mbuf->m_pkthdr.len = size;
+ mbuf->m_len = size;
+
+ xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
+ XNB_ASSERT(xnb_pkt_is_valid(&pkt));
+ XNB_ASSERT(pkt.size == size);
+ XNB_ASSERT(pkt.car_size == size);
+ XNB_ASSERT(! (pkt.flags &
+ (NETRXF_more_data | NETRXF_extra_info)));
+ XNB_ASSERT(pkt.list_len == 1);
+ XNB_ASSERT(pkt.car == start);
+
+ safe_m_freem(&mbuf);
+}
+
+/** xnb_mbufc2pkt on a single mbuf with an mbuf cluster */
+static void
+xnb_mbufc2pkt_1cluster(char *buffer, size_t buflen) {
+ struct xnb_pkt pkt;
+ size_t size = MCLBYTES;
+ int free_slots = 32;
+ RING_IDX start = 12;
+ struct mbuf *mbuf;
+
+ mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
+ mbuf->m_flags |= M_PKTHDR;
+ mbuf->m_pkthdr.len = size;
+ mbuf->m_len = size;
+
+ xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
+ XNB_ASSERT(xnb_pkt_is_valid(&pkt));
+ XNB_ASSERT(pkt.size == size);
+ XNB_ASSERT(pkt.car_size == size);
+ XNB_ASSERT(! (pkt.flags &
+ (NETRXF_more_data | NETRXF_extra_info)));
+ XNB_ASSERT(pkt.list_len == 1);
+ XNB_ASSERT(pkt.car == start);
+
+ safe_m_freem(&mbuf);
+}
+
+/** xnb_mbufc2pkt on a a two-mbuf chain with short data regions */
+static void
+xnb_mbufc2pkt_2short(char *buffer, size_t buflen) {
+ struct xnb_pkt pkt;
+ size_t size1 = MHLEN - 5;
+ size_t size2 = MHLEN - 15;
+ int free_slots = 32;
+ RING_IDX start = 14;
+ struct mbuf *mbufc, *mbufc2;
+
+ mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
+ mbufc->m_flags |= M_PKTHDR;
+ if (mbufc == NULL) {
+ XNB_ASSERT(mbufc != NULL);
+ return;
+ }
+
+ mbufc2 = m_getm(mbufc, size2, M_WAITOK, MT_DATA);
+ if (mbufc2 == NULL) {
+ XNB_ASSERT(mbufc2 != NULL);
+ safe_m_freem(&mbufc);
+ return;
+ }
+ mbufc2->m_pkthdr.len = size1 + size2;
+ mbufc2->m_len = size1;
+
+ xnb_mbufc2pkt(mbufc2, &pkt, start, free_slots);
+ XNB_ASSERT(xnb_pkt_is_valid(&pkt));
+ XNB_ASSERT(pkt.size == size1 + size2);
+ XNB_ASSERT(pkt.car == start);
+ /*
+ * The second m_getm may allocate a new mbuf and append
+ * it to the chain, or it may simply extend the first mbuf.
+ */
+ if (mbufc2->m_next != NULL) {
+ XNB_ASSERT(pkt.car_size == size1);
+ XNB_ASSERT(pkt.list_len == 1);
+ XNB_ASSERT(pkt.cdr == start + 1);
+ }
+
+ safe_m_freem(&mbufc2);
+}
+
+/** xnb_mbufc2pkt on a a mbuf chain with >1 mbuf cluster */
+static void
+xnb_mbufc2pkt_long(char *buffer, size_t buflen) {
+ struct xnb_pkt pkt;
+ size_t size = 14 * MCLBYTES / 3;
+ size_t size_remaining;
+ int free_slots = 15;
+ RING_IDX start = 3;
+ struct mbuf *mbufc, *m;
+
+ mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
+ mbufc->m_flags |= M_PKTHDR;
+ if (mbufc == NULL) {
+ XNB_ASSERT(mbufc != NULL);
+ return;
+ }
+
+ mbufc->m_pkthdr.len = size;
+ size_remaining = size;
+ for (m = mbufc; m != NULL; m = m->m_next) {
+ m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
+ size_remaining -= m->m_len;
+ }
+
+ xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
+ XNB_ASSERT(xnb_pkt_is_valid(&pkt));
+ XNB_ASSERT(pkt.size == size);
+ XNB_ASSERT(pkt.car == start);
+ XNB_ASSERT(pkt.car_size = mbufc->m_len);
+ /*
+ * There should be >1 response in the packet, and there is no
+ * extra info.
+ */
+ XNB_ASSERT(! (pkt.flags & NETRXF_extra_info));
+ XNB_ASSERT(pkt.cdr == pkt.car + 1);
+
+ safe_m_freem(&mbufc);
+}
+
+/** xnb_mbufc2pkt on a a mbuf chain with >1 mbuf cluster and extra info */
+static void
+xnb_mbufc2pkt_extra(char *buffer, size_t buflen) {
+ struct xnb_pkt pkt;
+ size_t size = 14 * MCLBYTES / 3;
+ size_t size_remaining;
+ int free_slots = 15;
+ RING_IDX start = 3;
+ struct mbuf *mbufc, *m;
+
+ mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
+ if (mbufc == NULL) {
+ XNB_ASSERT(mbufc != NULL);
+ return;
+ }
+
+ mbufc->m_flags |= M_PKTHDR;
+ mbufc->m_pkthdr.len = size;
+ mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
+ mbufc->m_pkthdr.tso_segsz = TCP_MSS - 40;
+ size_remaining = size;
+ for (m = mbufc; m != NULL; m = m->m_next) {
+ m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
+ size_remaining -= m->m_len;
+ }
+
+ xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
+ XNB_ASSERT(xnb_pkt_is_valid(&pkt));
+ XNB_ASSERT(pkt.size == size);
+ XNB_ASSERT(pkt.car == start);
+ XNB_ASSERT(pkt.car_size = mbufc->m_len);
+ /* There should be >1 response in the packet, there is extra info */
+ XNB_ASSERT(pkt.flags & NETRXF_extra_info);
+ XNB_ASSERT(pkt.flags & NETRXF_data_validated);
+ XNB_ASSERT(pkt.cdr == pkt.car + 2);
+ XNB_ASSERT(pkt.extra.u.gso.size = mbufc->m_pkthdr.tso_segsz);
+ XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
+ XNB_ASSERT(! (pkt.extra.flags & XEN_NETIF_EXTRA_FLAG_MORE));
+
+ safe_m_freem(&mbufc);
+}
+
+/** xnb_mbufc2pkt with insufficient space in the ring */
+static void
+xnb_mbufc2pkt_nospace(char *buffer, size_t buflen) {
+ struct xnb_pkt pkt;
+ size_t size = 14 * MCLBYTES / 3;
+ size_t size_remaining;
+ int free_slots = 2;
+ RING_IDX start = 3;
+ struct mbuf *mbufc, *m;
+ int error;
+
+ mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
+ mbufc->m_flags |= M_PKTHDR;
+ if (mbufc == NULL) {
+ XNB_ASSERT(mbufc != NULL);
+ return;
+ }
+
+ mbufc->m_pkthdr.len = size;
+ size_remaining = size;
+ for (m = mbufc; m != NULL; m = m->m_next) {
+ m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
+ size_remaining -= m->m_len;
+ }
+
+ error = xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
+ XNB_ASSERT(error == EAGAIN);
+ XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
+
+ safe_m_freem(&mbufc);
+}
+
+/**
+ * xnb_rxpkt2gnttab on an empty packet. Should return empty gnttab
+ */
+static void
+xnb_rxpkt2gnttab_empty(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int nr_entries;
+ int free_slots = 60;
+ struct mbuf *mbuf;
+
+ mbuf = m_get(M_WAITOK, MT_DATA);
+
+ xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
+ nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
+
+ XNB_ASSERT(nr_entries == 0);
+
+ safe_m_freem(&mbuf);
+}
+
+/** xnb_rxpkt2gnttab on a short packet without extra data */
+static void
+xnb_rxpkt2gnttab_short(char *buffer, size_t buflen) {
+ struct xnb_pkt pkt;
+ int nr_entries;
+ size_t size = 128;
+ int free_slots = 60;
+ RING_IDX start = 9;
+ struct netif_rx_request *req;
+ struct mbuf *mbuf;
+
+ mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
+ mbuf->m_flags |= M_PKTHDR;
+ mbuf->m_pkthdr.len = size;
+ mbuf->m_len = size;
+
+ xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
+ req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->gref = 7;
+
+ nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
+
+ XNB_ASSERT(nr_entries == 1);
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
+ /* flags should indicate gref's for dest */
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_dest_gref);
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == 0);
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == virt_to_offset(
+ mtod(mbuf, vm_offset_t)));
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.u.gmfn ==
+ virt_to_mfn(mtod(mbuf, vm_offset_t)));
+ XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
+
+ safe_m_freem(&mbuf);
+}
+
+/**
+ * xnb_rxpkt2gnttab on a packet with two different mbufs in a single chai
+ */
+static void
+xnb_rxpkt2gnttab_2req(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int nr_entries;
+ int i, num_mbufs;
+ size_t total_granted_size = 0;
+ size_t size = MJUMPAGESIZE + 1;
+ int free_slots = 60;
+ RING_IDX start = 11;
+ struct netif_rx_request *req;
+ struct mbuf *mbuf, *m;
+
+ mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
+ mbuf->m_flags |= M_PKTHDR;
+ mbuf->m_pkthdr.len = size;
+ mbuf->m_len = size;
+
+ xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
+
+ for (i = 0, m=mbuf; m != NULL; i++, m = m->m_next) {
+ req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
+ xnb_unit_pvt.txf.req_prod_pvt);
+ req->gref = i;
+ req->id = 5;
+ }
+ num_mbufs = i;
+
+ nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
+
+ XNB_ASSERT(nr_entries >= num_mbufs);
+ for (i = 0; i < nr_entries; i++) {
+ int end_offset = xnb_unit_pvt.gnttab[i].len +
+ xnb_unit_pvt.gnttab[i].dest.offset;
+ XNB_ASSERT(end_offset <= PAGE_SIZE);
+ total_granted_size += xnb_unit_pvt.gnttab[i].len;
+ }
+ XNB_ASSERT(total_granted_size == size);
+}
+
+/**
+ * xnb_rxpkt2rsp on an empty packet. Shouldn't make any response
+ */
+static void
+xnb_rxpkt2rsp_empty(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int nr_entries;
+ int nr_reqs;
+ int free_slots = 60;
+ netif_rx_back_ring_t rxb_backup = xnb_unit_pvt.rxb;
+ netif_rx_sring_t rxs_backup = *xnb_unit_pvt.rxs;
+ struct mbuf *mbuf;
+
+ mbuf = m_get(M_WAITOK, MT_DATA);
+
+ xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
+ nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
+
+ nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
+ &xnb_unit_pvt.rxb);
+ XNB_ASSERT(nr_reqs == 0);
+ XNB_ASSERT(
+ memcmp(&rxb_backup, &xnb_unit_pvt.rxb, sizeof(rxb_backup)) == 0);
+ XNB_ASSERT(
+ memcmp(&rxs_backup, xnb_unit_pvt.rxs, sizeof(rxs_backup)) == 0);
+
+ safe_m_freem(&mbuf);
+}
+
+/**
+ * xnb_rxpkt2rsp on a short packet with no extras
+ */
+static void
+xnb_rxpkt2rsp_short(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int nr_entries, nr_reqs;
+ size_t size = 128;
+ int free_slots = 60;
+ RING_IDX start = 5;
+ struct netif_rx_request *req;
+ struct netif_rx_response *rsp;
+ struct mbuf *mbuf;
+
+ mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
+ mbuf->m_flags |= M_PKTHDR;
+ mbuf->m_pkthdr.len = size;
+ mbuf->m_len = size;
+
+ xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
+ req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
+ req->gref = 7;
+ xnb_unit_pvt.rxb.req_cons = start;
+ xnb_unit_pvt.rxb.rsp_prod_pvt = start;
+ xnb_unit_pvt.rxs->req_prod = start + 1;
+ xnb_unit_pvt.rxs->rsp_prod = start;
+
+ nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
+
+ nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
+ &xnb_unit_pvt.rxb);
+
+ XNB_ASSERT(nr_reqs == 1);
+ XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
+ XNB_ASSERT(rsp->id == req->id);
+ XNB_ASSERT(rsp->offset == 0);
+ XNB_ASSERT((rsp->flags & (NETRXF_more_data | NETRXF_extra_info)) == 0);
+ XNB_ASSERT(rsp->status == size);
+
+ safe_m_freem(&mbuf);
+}
+
+/**
+ * xnb_rxpkt2rsp with extra data
+ */
+static void
+xnb_rxpkt2rsp_extra(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int nr_entries, nr_reqs;
+ size_t size = 14;
+ int free_slots = 15;
+ RING_IDX start = 3;
+ uint16_t id = 49;
+ uint16_t gref = 65;
+ uint16_t mss = TCP_MSS - 40;
+ struct mbuf *mbufc;
+ struct netif_rx_request *req;
+ struct netif_rx_response *rsp;
+ struct netif_extra_info *ext;
+
+ mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
+ if (mbufc == NULL) {
+ XNB_ASSERT(mbufc != NULL);
+ return;
+ }
+
+ mbufc->m_flags |= M_PKTHDR;
+ mbufc->m_pkthdr.len = size;
+ mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
+ mbufc->m_pkthdr.tso_segsz = mss;
+ mbufc->m_len = size;
+
+ xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
+ req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
+ req->id = id;
+ req->gref = gref;
+ req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
+ req->id = id + 1;
+ req->gref = gref + 1;
+ xnb_unit_pvt.rxb.req_cons = start;
+ xnb_unit_pvt.rxb.rsp_prod_pvt = start;
+ xnb_unit_pvt.rxs->req_prod = start + 2;
+ xnb_unit_pvt.rxs->rsp_prod = start;
+
+ nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
+
+ nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
+ &xnb_unit_pvt.rxb);
+
+ XNB_ASSERT(nr_reqs == 2);
+ XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
+ XNB_ASSERT(rsp->id == id);
+ XNB_ASSERT((rsp->flags & NETRXF_more_data) == 0);
+ XNB_ASSERT((rsp->flags & NETRXF_extra_info));
+ XNB_ASSERT((rsp->flags & NETRXF_data_validated));
+ XNB_ASSERT((rsp->flags & NETRXF_csum_blank));
+ XNB_ASSERT(rsp->status == size);
+
+ ext = (struct netif_extra_info*)
+ RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
+ XNB_ASSERT(ext->type == XEN_NETIF_EXTRA_TYPE_GSO);
+ XNB_ASSERT(! (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE));
+ XNB_ASSERT(ext->u.gso.size == mss);
+ XNB_ASSERT(ext->u.gso.type == XEN_NETIF_EXTRA_TYPE_GSO);
+
+ safe_m_freem(&mbufc);
+}
+
+/**
+ * xnb_rxpkt2rsp on a packet with more than a pages's worth of data. It should
+ * generate two response slot
+ */
+static void
+xnb_rxpkt2rsp_2slots(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int nr_entries, nr_reqs;
+ size_t size = PAGE_SIZE + 100;
+ int free_slots = 3;
+ uint16_t id1 = 17;
+ uint16_t id2 = 37;
+ uint16_t gref1 = 24;
+ uint16_t gref2 = 34;
+ RING_IDX start = 15;
+ struct netif_rx_request *req;
+ struct netif_rx_response *rsp;
+ struct mbuf *mbuf;
+
+ mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
+ mbuf->m_flags |= M_PKTHDR;
+ mbuf->m_pkthdr.len = size;
+ if (mbuf->m_next != NULL) {
+ size_t first_len = MIN(M_TRAILINGSPACE(mbuf), size);
+ mbuf->m_len = first_len;
+ mbuf->m_next->m_len = size - first_len;
+
+ } else {
+ mbuf->m_len = size;
+ }
+
+ xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
+ req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
+ req->gref = gref1;
+ req->id = id1;
+ req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
+ req->gref = gref2;
+ req->id = id2;
+ xnb_unit_pvt.rxb.req_cons = start;
+ xnb_unit_pvt.rxb.rsp_prod_pvt = start;
+ xnb_unit_pvt.rxs->req_prod = start + 2;
+ xnb_unit_pvt.rxs->rsp_prod = start;
+
+ nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
+
+ nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
+ &xnb_unit_pvt.rxb);
+
+ XNB_ASSERT(nr_reqs == 2);
+ XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
+ XNB_ASSERT(rsp->id == id1);
+ XNB_ASSERT(rsp->offset == 0);
+ XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
+ XNB_ASSERT(rsp->flags & NETRXF_more_data);
+ XNB_ASSERT(rsp->status == PAGE_SIZE);
+
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
+ XNB_ASSERT(rsp->id == id2);
+ XNB_ASSERT(rsp->offset == 0);
+ XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
+ XNB_ASSERT(! (rsp->flags & NETRXF_more_data));
+ XNB_ASSERT(rsp->status == size - PAGE_SIZE);
+
+ safe_m_freem(&mbuf);
+}
+
+/** xnb_rxpkt2rsp on a grant table with two sub-page entries */
+static void
+xnb_rxpkt2rsp_2short(char *buffer, size_t buflen) {
+ struct xnb_pkt pkt;
+ int nr_reqs, nr_entries;
+ size_t size1 = MHLEN - 5;
+ size_t size2 = MHLEN - 15;
+ int free_slots = 32;
+ RING_IDX start = 14;
+ uint16_t id = 47;
+ uint16_t gref = 54;
+ struct netif_rx_request *req;
+ struct netif_rx_response *rsp;
+ struct mbuf *mbufc;
+
+ mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
+ mbufc->m_flags |= M_PKTHDR;
+ if (mbufc == NULL) {
+ XNB_ASSERT(mbufc != NULL);
+ return;
+ }
+
+ m_getm(mbufc, size2, M_WAITOK, MT_DATA);
+ XNB_ASSERT(mbufc->m_next != NULL);
+ mbufc->m_pkthdr.len = size1 + size2;
+ mbufc->m_len = size1;
+ mbufc->m_next->m_len = size2;
+
+ xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
+
+ req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
+ req->gref = gref;
+ req->id = id;
+ xnb_unit_pvt.rxb.req_cons = start;
+ xnb_unit_pvt.rxb.rsp_prod_pvt = start;
+ xnb_unit_pvt.rxs->req_prod = start + 1;
+ xnb_unit_pvt.rxs->rsp_prod = start;
+
+ nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
+
+ nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
+ &xnb_unit_pvt.rxb);
+
+ XNB_ASSERT(nr_entries == 2);
+ XNB_ASSERT(nr_reqs == 1);
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
+ XNB_ASSERT(rsp->id == id);
+ XNB_ASSERT(rsp->status == size1 + size2);
+ XNB_ASSERT(rsp->offset == 0);
+ XNB_ASSERT(! (rsp->flags & (NETRXF_more_data | NETRXF_extra_info)));
+
+ safe_m_freem(&mbufc);
+}
+
+/**
+ * xnb_rxpkt2rsp on a long packet with a hypervisor gnttab_copy error
+ * Note: this test will result in an error message being printed to the console
+ * such as:
+ * xnb(xnb_rxpkt2rsp:1720): Got error -1 for hypervisor gnttab_copy status
+ */
+static void
+xnb_rxpkt2rsp_copyerror(char *buffer, size_t buflen)
+{
+ struct xnb_pkt pkt;
+ int nr_entries, nr_reqs;
+ int id = 7;
+ int gref = 42;
+ uint16_t canary = 6859;
+ size_t size = 7 * MCLBYTES;
+ int free_slots = 9;
+ RING_IDX start = 2;
+ struct netif_rx_request *req;
+ struct netif_rx_response *rsp;
+ struct mbuf *mbuf;
+
+ mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
+ mbuf->m_flags |= M_PKTHDR;
+ mbuf->m_pkthdr.len = size;
+ mbuf->m_len = size;
+
+ xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
+ req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
+ req->gref = gref;
+ req->id = id;
+ xnb_unit_pvt.rxb.req_cons = start;
+ xnb_unit_pvt.rxb.rsp_prod_pvt = start;
+ xnb_unit_pvt.rxs->req_prod = start + 1;
+ xnb_unit_pvt.rxs->rsp_prod = start;
+ req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
+ req->gref = canary;
+ req->id = canary;
+
+ nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
+ &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
+ /* Inject the error*/
+ xnb_unit_pvt.gnttab[2].status = GNTST_general_error;
+
+ nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
+ &xnb_unit_pvt.rxb);
+
+ XNB_ASSERT(nr_reqs == 1);
+ XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
+ rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
+ XNB_ASSERT(rsp->id == id);
+ XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
+ req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
+ XNB_ASSERT(req->gref == canary);
+ XNB_ASSERT(req->id == canary);
+
+ safe_m_freem(&mbuf);
+}
+
+/**
+ * xnb_add_mbuf_cksum on an ARP request packet
+ */
+static void
+xnb_add_mbuf_cksum_arp(char *buffer, size_t buflen)
+{
+ const size_t pkt_len = sizeof(struct ether_header) +
+ sizeof(struct ether_arp);
+ struct mbuf *mbufc;
+ struct ether_header *eh;
+ struct ether_arp *ep;
+ unsigned char pkt_orig[pkt_len];
+
+ mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
+ /* Fill in an example arp request */
+ eh = mtod(mbufc, struct ether_header*);
+ eh->ether_dhost[0] = 0xff;
+ eh->ether_dhost[1] = 0xff;
+ eh->ether_dhost[2] = 0xff;
+ eh->ether_dhost[3] = 0xff;
+ eh->ether_dhost[4] = 0xff;
+ eh->ether_dhost[5] = 0xff;
+ eh->ether_shost[0] = 0x00;
+ eh->ether_shost[1] = 0x15;
+ eh->ether_shost[2] = 0x17;
+ eh->ether_shost[3] = 0xe9;
+ eh->ether_shost[4] = 0x30;
+ eh->ether_shost[5] = 0x68;
+ eh->ether_type = htons(ETHERTYPE_ARP);
+ ep = (struct ether_arp*)(eh + 1);
+ ep->ea_hdr.ar_hrd = htons(ARPHRD_ETHER);
+ ep->ea_hdr.ar_pro = htons(ETHERTYPE_IP);
+ ep->ea_hdr.ar_hln = 6;
+ ep->ea_hdr.ar_pln = 4;
+ ep->ea_hdr.ar_op = htons(ARPOP_REQUEST);
+ ep->arp_sha[0] = 0x00;
+ ep->arp_sha[1] = 0x15;
+ ep->arp_sha[2] = 0x17;
+ ep->arp_sha[3] = 0xe9;
+ ep->arp_sha[4] = 0x30;
+ ep->arp_sha[5] = 0x68;
+ ep->arp_spa[0] = 0xc0;
+ ep->arp_spa[1] = 0xa8;
+ ep->arp_spa[2] = 0x0a;
+ ep->arp_spa[3] = 0x04;
+ bzero(&(ep->arp_tha), ETHER_ADDR_LEN);
+ ep->arp_tpa[0] = 0xc0;
+ ep->arp_tpa[1] = 0xa8;
+ ep->arp_tpa[2] = 0x0a;
+ ep->arp_tpa[3] = 0x06;
+
+ /* fill in the length field */
+ mbufc->m_len = pkt_len;
+ mbufc->m_pkthdr.len = pkt_len;
+ /* indicate that the netfront uses hw-assisted checksums */
+ mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+
+ /* Make a backup copy of the packet */
+ bcopy(mtod(mbufc, const void*), pkt_orig, pkt_len);
+
+ /* Function under test */
+ xnb_add_mbuf_cksum(mbufc);
+
+ /* Verify that the packet's data did not change */
+ XNB_ASSERT(bcmp(mtod(mbufc, const void*), pkt_orig, pkt_len) == 0);
+ m_freem(mbufc);
+}
+
+/**
+ * Helper function that populates the ethernet header and IP header used by
+ * some of the xnb_add_mbuf_cksum unit tests. m must already be allocated
+ * and must be large enough
+ */
+static void
+xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len, uint16_t ip_id,
+ uint16_t ip_p, uint16_t ip_off, uint16_t ip_sum)
+{
+ struct ether_header *eh;
+ struct ip *iph;
+
+ eh = mtod(m, struct ether_header*);
+ eh->ether_dhost[0] = 0x00;
+ eh->ether_dhost[1] = 0x16;
+ eh->ether_dhost[2] = 0x3e;
+ eh->ether_dhost[3] = 0x23;
+ eh->ether_dhost[4] = 0x50;
+ eh->ether_dhost[5] = 0x0b;
+ eh->ether_shost[0] = 0x00;
+ eh->ether_shost[1] = 0x16;
+ eh->ether_shost[2] = 0x30;
+ eh->ether_shost[3] = 0x00;
+ eh->ether_shost[4] = 0x00;
+ eh->ether_shost[5] = 0x00;
+ eh->ether_type = htons(ETHERTYPE_IP);
+ iph = (struct ip*)(eh + 1);
+ iph->ip_hl = 0x5; /* 5 dwords == 20 bytes */
+ iph->ip_v = 4; /* IP v4 */
+ iph->ip_tos = 0;
+ iph->ip_len = htons(ip_len);
+ iph->ip_id = htons(ip_id);
+ iph->ip_off = htons(ip_off);
+ iph->ip_ttl = 64;
+ iph->ip_p = ip_p;
+ iph->ip_sum = htons(ip_sum);
+ iph->ip_src.s_addr = htonl(0xc0a80a04);
+ iph->ip_dst.s_addr = htonl(0xc0a80a05);
+}
+
+/**
+ * xnb_add_mbuf_cksum on an ICMP packet, based on a tcpdump of an actual
+ * ICMP packet
+ */
+static void
+xnb_add_mbuf_cksum_icmp(char *buffer, size_t buflen)
+{
+ const size_t icmp_len = 64; /* set by ping(1) */
+ const size_t pkt_len = sizeof(struct ether_header) +
+ sizeof(struct ip) + icmp_len;
+ struct mbuf *mbufc;
+ struct ether_header *eh;
+ struct ip *iph;
+ struct icmp *icmph;
+ unsigned char pkt_orig[icmp_len];
+ uint32_t *tv_field;
+ uint8_t *data_payload;
+ int i;
+ const uint16_t ICMP_CSUM = 0xaed7;
+ const uint16_t IP_CSUM = 0xe533;
+
+ mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
+ /* Fill in an example ICMP ping request */
+ eh = mtod(mbufc, struct ether_header*);
+ xnb_fill_eh_and_ip(mbufc, 84, 28, IPPROTO_ICMP, 0, 0);
+ iph = (struct ip*)(eh + 1);
+ icmph = (struct icmp*)(iph + 1);
+ icmph->icmp_type = ICMP_ECHO;
+ icmph->icmp_code = 0;
+ icmph->icmp_cksum = htons(ICMP_CSUM);
+ icmph->icmp_id = htons(31492);
+ icmph->icmp_seq = htons(0);
+ /*
+ * ping(1) uses bcopy to insert a native-endian timeval after icmp_seq.
+ * For this test, we will set the bytes individually for portability.
+ */
+ tv_field = (uint32_t*)(&(icmph->icmp_hun));
+ tv_field[0] = 0x4f02cfac;
+ tv_field[1] = 0x0007c46a;
+ /*
+ * Remainder of packet is an incrmenting 8 bit integer, starting with 8
+ */
+ data_payload = (uint8_t*)(&tv_field[2]);
+ for (i = 8; i < 37; i++) {
+ *data_payload++ = i;
+ }
+
+ /* fill in the length field */
+ mbufc->m_len = pkt_len;
+ mbufc->m_pkthdr.len = pkt_len;
+ /* indicate that the netfront uses hw-assisted checksums */
+ mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+
+ bcopy(mtod(mbufc, const void*), pkt_orig, icmp_len);
+ /* Function under test */
+ xnb_add_mbuf_cksum(mbufc);
+
+ /* Check the IP checksum */
+ XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
+
+ /* Check that the ICMP packet did not change */
+ XNB_ASSERT(bcmp(icmph, pkt_orig, icmp_len));
+ m_freem(mbufc);
+}
+
+/**
+ * xnb_add_mbuf_cksum on a UDP packet, based on a tcpdump of an actual
+ * UDP packet
+ */
+static void
+xnb_add_mbuf_cksum_udp(char *buffer, size_t buflen)
+{
+ const size_t udp_len = 16;
+ const size_t pkt_len = sizeof(struct ether_header) +
+ sizeof(struct ip) + udp_len;
+ struct mbuf *mbufc;
+ struct ether_header *eh;
+ struct ip *iph;
+ struct udphdr *udp;
+ uint8_t *data_payload;
+ const uint16_t IP_CSUM = 0xe56b;
+ const uint16_t UDP_CSUM = 0xdde2;
+
+ mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
+ /* Fill in an example UDP packet made by 'uname | nc -u <host> 2222 */
+ eh = mtod(mbufc, struct ether_header*);
+ xnb_fill_eh_and_ip(mbufc, 36, 4, IPPROTO_UDP, 0, 0xbaad);
+ iph = (struct ip*)(eh + 1);
+ udp = (struct udphdr*)(iph + 1);
+ udp->uh_sport = htons(0x51ae);
+ udp->uh_dport = htons(0x08ae);
+ udp->uh_ulen = htons(udp_len);
+ udp->uh_sum = htons(0xbaad); /* xnb_add_mbuf_cksum will fill this in */
+ data_payload = (uint8_t*)(udp + 1);
+ data_payload[0] = 'F';
+ data_payload[1] = 'r';
+ data_payload[2] = 'e';
+ data_payload[3] = 'e';
+ data_payload[4] = 'B';
+ data_payload[5] = 'S';
+ data_payload[6] = 'D';
+ data_payload[7] = '\n';
+
+ /* fill in the length field */
+ mbufc->m_len = pkt_len;
+ mbufc->m_pkthdr.len = pkt_len;
+ /* indicate that the netfront uses hw-assisted checksums */
+ mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+
+ /* Function under test */
+ xnb_add_mbuf_cksum(mbufc);
+
+ /* Check the checksums */
+ XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
+ XNB_ASSERT(udp->uh_sum == htons(UDP_CSUM));
+
+ m_freem(mbufc);
+}
+
+/**
+ * Helper function that populates a TCP packet used by all of the
+ * xnb_add_mbuf_cksum tcp unit tests. m must already be allocated and must be
+ * large enough
+ */
+static void
+xnb_fill_tcp(struct mbuf *m)
+{
+ struct ether_header *eh;
+ struct ip *iph;
+ struct tcphdr *tcp;
+ uint32_t *options;
+ uint8_t *data_payload;
+
+ /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
+ eh = mtod(m, struct ether_header*);
+ xnb_fill_eh_and_ip(m, 60, 8, IPPROTO_TCP, IP_DF, 0);
+ iph = (struct ip*)(eh + 1);
+ tcp = (struct tcphdr*)(iph + 1);
+ tcp->th_sport = htons(0x9cd9);
+ tcp->th_dport = htons(2222);
+ tcp->th_seq = htonl(0x00f72b10);
+ tcp->th_ack = htonl(0x7f37ba6c);
+ tcp->th_x2 = 0;
+ tcp->th_off = 8;
+ tcp->th_flags = 0x18;
+ tcp->th_win = htons(0x410);
+ /* th_sum is incorrect; will be inserted by function under test */
+ tcp->th_sum = htons(0xbaad);
+ tcp->th_urp = htons(0);
+ /*
+ * The following 12 bytes of options encode:
+ * [nop, nop, TS val 33247 ecr 3457687679]
+ */
+ options = (uint32_t*)(tcp + 1);
+ options[0] = htonl(0x0101080a);
+ options[1] = htonl(0x000081df);
+ options[2] = htonl(0xce18207f);
+ data_payload = (uint8_t*)(&options[3]);
+ data_payload[0] = 'F';
+ data_payload[1] = 'r';
+ data_payload[2] = 'e';
+ data_payload[3] = 'e';
+ data_payload[4] = 'B';
+ data_payload[5] = 'S';
+ data_payload[6] = 'D';
+ data_payload[7] = '\n';
+}
+
+/**
+ * xnb_add_mbuf_cksum on a TCP packet, based on a tcpdump of an actual TCP
+ * packet
+ */
+static void
+xnb_add_mbuf_cksum_tcp(char *buffer, size_t buflen)
+{
+ const size_t payload_len = 8;
+ const size_t tcp_options_len = 12;
+ const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
+ sizeof(struct tcphdr) + tcp_options_len + payload_len;
+ struct mbuf *mbufc;
+ struct ether_header *eh;
+ struct ip *iph;
+ struct tcphdr *tcp;
+ const uint16_t IP_CSUM = 0xa55a;
+ const uint16_t TCP_CSUM = 0x2f64;
+
+ mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
+ /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
+ xnb_fill_tcp(mbufc);
+ eh = mtod(mbufc, struct ether_header*);
+ iph = (struct ip*)(eh + 1);
+ tcp = (struct tcphdr*)(iph + 1);
+
+ /* fill in the length field */
+ mbufc->m_len = pkt_len;
+ mbufc->m_pkthdr.len = pkt_len;
+ /* indicate that the netfront uses hw-assisted checksums */
+ mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+
+ /* Function under test */
+ xnb_add_mbuf_cksum(mbufc);
+
+ /* Check the checksums */
+ XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
+ XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
+
+ m_freem(mbufc);
+}
+
+/**
+ * xnb_add_mbuf_cksum on a TCP packet that does not use HW assisted checksums
+ */
+static void
+xnb_add_mbuf_cksum_tcp_swcksum(char *buffer, size_t buflen)
+{
+ const size_t payload_len = 8;
+ const size_t tcp_options_len = 12;
+ const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
+ sizeof(struct tcphdr) + tcp_options_len + payload_len;
+ struct mbuf *mbufc;
+ struct ether_header *eh;
+ struct ip *iph;
+ struct tcphdr *tcp;
+ /* Use deliberately bad checksums, and verify that they don't get */
+ /* corrected by xnb_add_mbuf_cksum */
+ const uint16_t IP_CSUM = 0xdead;
+ const uint16_t TCP_CSUM = 0xbeef;
+
+ mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
+ /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
+ xnb_fill_tcp(mbufc);
+ eh = mtod(mbufc, struct ether_header*);
+ iph = (struct ip*)(eh + 1);
+ iph->ip_sum = htons(IP_CSUM);
+ tcp = (struct tcphdr*)(iph + 1);
+ tcp->th_sum = htons(TCP_CSUM);
+
+ /* fill in the length field */
+ mbufc->m_len = pkt_len;
+ mbufc->m_pkthdr.len = pkt_len;
+ /* indicate that the netfront does not use hw-assisted checksums */
+ mbufc->m_pkthdr.csum_flags = 0;
+
+ /* Function under test */
+ xnb_add_mbuf_cksum(mbufc);
+
+ /* Check that the checksums didn't change */
+ XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
+ XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
+
+ m_freem(mbufc);
+}
+
+/**
+ * sscanf on unsigned chars
+ */
+static void
+xnb_sscanf_hhu(char *buffer, size_t buflen)
+{
+ const char mystr[] = "137";
+ uint8_t dest[12];
+ int i;
+
+ for (i = 0; i < 12; i++)
+ dest[i] = 'X';
+
+ sscanf(mystr, "%hhu", &dest[4]);
+ for (i = 0; i < 12; i++)
+ XNB_ASSERT(dest[i] == (i == 4 ? 137 : 'X'));
+}
+
+/**
+ * sscanf on signed chars
+ */
+static void
+xnb_sscanf_hhd(char *buffer, size_t buflen)
+{
+ const char mystr[] = "-27";
+ int8_t dest[12];
+ int i;
+
+ for (i = 0; i < 12; i++)
+ dest[i] = 'X';
+
+ sscanf(mystr, "%hhd", &dest[4]);
+ for (i = 0; i < 12; i++)
+ XNB_ASSERT(dest[i] == (i == 4 ? -27 : 'X'));
+}
+
+/**
+ * sscanf on signed long longs
+ */
+static void
+xnb_sscanf_lld(char *buffer, size_t buflen)
+{
+ const char mystr[] = "-123456789012345"; /* about -2**47 */
+ long long dest[3];
+ int i;
+
+ for (i = 0; i < 3; i++)
+ dest[i] = (long long)0xdeadbeefdeadbeef;
+
+ sscanf(mystr, "%lld", &dest[1]);
+ for (i = 0; i < 3; i++)
+ XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
+ -123456789012345));
+}
+
+/**
+ * sscanf on unsigned long longs
+ */
+static void
+xnb_sscanf_llu(char *buffer, size_t buflen)
+{
+ const char mystr[] = "12802747070103273189";
+ unsigned long long dest[3];
+ int i;
+
+ for (i = 0; i < 3; i++)
+ dest[i] = (long long)0xdeadbeefdeadbeef;
+
+ sscanf(mystr, "%llu", &dest[1]);
+ for (i = 0; i < 3; i++)
+ XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
+ 12802747070103273189ull));
+}
+
+/**
+ * sscanf on unsigned short short n's
+ */
+static void
+xnb_sscanf_hhn(char *buffer, size_t buflen)
+{
+ const char mystr[] =
+ "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
+ "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
+ "404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f";
+ unsigned char dest[12];
+ int i;
+
+ for (i = 0; i < 12; i++)
+ dest[i] = (unsigned char)'X';
+
+ sscanf(mystr,
+ "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
+ "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
+ "404142434445464748494a4b4c4d4e4f%hhn", &dest[4]);
+ for (i = 0; i < 12; i++)
+ XNB_ASSERT(dest[i] == (i == 4 ? 160 : 'X'));
+}
diff --git a/sys/fs/nfsclient/nfs_clbio.c b/sys/fs/nfsclient/nfs_clbio.c
index d64257c..82a3692 100644
--- a/sys/fs/nfsclient/nfs_clbio.c
+++ b/sys/fs/nfsclient/nfs_clbio.c
@@ -480,7 +480,7 @@ ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
/* No caching/ no readaheads. Just read data into the user buffer */
return ncl_readrpc(vp, uio, cred);
- biosize = vp->v_mount->mnt_stat.f_iosize;
+ biosize = vp->v_bufobj.bo_bsize;
seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
error = nfs_bioread_check_cons(vp, td, cred);
@@ -960,7 +960,7 @@ flush_and_restart:
if (vn_rlimit_fsize(vp, uio, td))
return (EFBIG);
- biosize = vp->v_mount->mnt_stat.f_iosize;
+ biosize = vp->v_bufobj.bo_bsize;
/*
* Find all of this file's B_NEEDCOMMIT buffers. If our writes
* would exceed the local maximum per-file write commit size when
@@ -1264,12 +1264,8 @@ nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
bp = getblk(vp, bn, size, 0, 0, 0);
}
- if (vp->v_type == VREG) {
- int biosize;
-
- biosize = mp->mnt_stat.f_iosize;
- bp->b_blkno = bn * (biosize / DEV_BSIZE);
- }
+ if (vp->v_type == VREG)
+ bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE);
return (bp);
}
@@ -1785,7 +1781,7 @@ ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad
{
struct nfsnode *np = VTONFS(vp);
u_quad_t tsize;
- int biosize = vp->v_mount->mnt_stat.f_iosize;
+ int biosize = vp->v_bufobj.bo_bsize;
int error = 0;
mtx_lock(&np->n_mtx);
diff --git a/sys/fs/nfsclient/nfs_clnode.c b/sys/fs/nfsclient/nfs_clnode.c
index 5e7185d..21de25e 100644
--- a/sys/fs/nfsclient/nfs_clnode.c
+++ b/sys/fs/nfsclient/nfs_clnode.c
@@ -136,6 +136,7 @@ ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp,
return (error);
}
vp = nvp;
+ KASSERT(vp->v_bufobj.bo_bsize != 0, ("ncl_nget: bo_bsize == 0"));
vp->v_bufobj.bo_ops = &buf_ops_newnfs;
vp->v_data = np;
np->n_vnode = vp;
diff --git a/sys/fs/nfsclient/nfs_clport.c b/sys/fs/nfsclient/nfs_clport.c
index bf48beb..53bf8b3 100644
--- a/sys/fs/nfsclient/nfs_clport.c
+++ b/sys/fs/nfsclient/nfs_clport.c
@@ -212,6 +212,7 @@ nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp,
return (error);
}
vp = nvp;
+ KASSERT(vp->v_bufobj.bo_bsize != 0, ("nfscl_nget: bo_bsize == 0"));
vp->v_bufobj.bo_ops = &buf_ops_newnfs;
vp->v_data = np;
np->n_vnode = vp;
diff --git a/sys/fs/nfsclient/nfs_clvfsops.c b/sys/fs/nfsclient/nfs_clvfsops.c
index e6d146f..e882d00 100644
--- a/sys/fs/nfsclient/nfs_clvfsops.c
+++ b/sys/fs/nfsclient/nfs_clvfsops.c
@@ -999,23 +999,6 @@ nfs_mount(struct mount *mp)
error = EIO;
goto out;
}
-
- /*
- * Cannot switch to UDP if current rsize/wsize/readdirsize is
- * too large, since there may be an I/O RPC in progress that
- * will get retried after the switch to the UDP socket. These
- * retries will fail over and over and over again.
- */
- if (args.sotype == SOCK_DGRAM &&
- (nmp->nm_rsize > NFS_MAXDGRAMDATA ||
- nmp->nm_wsize > NFS_MAXDGRAMDATA ||
- nmp->nm_readdirsize > NFS_MAXDGRAMDATA)) {
- vfs_mount_error(mp,
- "old rsize/wsize/readdirsize greater than UDP max");
- error = EINVAL;
- goto out;
- }
-
/*
* When doing an update, we can't change version,
* security, switch lockd strategies or change cookie
diff --git a/sys/geom/geom_bsd.c b/sys/geom/geom_bsd.c
index 60fc50c..5742509f 100644
--- a/sys/geom/geom_bsd.c
+++ b/sys/geom/geom_bsd.c
@@ -57,6 +57,7 @@ __FBSDID("$FreeBSD$");
#include <sys/errno.h>
#include <sys/disklabel.h>
#include <sys/gpt.h>
+#include <sys/proc.h>
#include <sys/sbuf.h>
#include <sys/uuid.h>
#include <geom/geom.h>
diff --git a/sys/geom/geom_mbr.c b/sys/geom/geom_mbr.c
index 6a736a7..42a9c8e 100644
--- a/sys/geom/geom_mbr.c
+++ b/sys/geom/geom_mbr.c
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/md5.h>
+#include <sys/proc.h>
#include <sys/diskmbr.h>
#include <sys/sbuf.h>
diff --git a/sys/geom/geom_pc98.c b/sys/geom/geom_pc98.c
index 71c194d..8e14f09 100644
--- a/sys/geom/geom_pc98.c
+++ b/sys/geom/geom_pc98.c
@@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <sys/bio.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/proc.h>
#include <sys/sbuf.h>
#include <sys/diskpc98.h>
diff --git a/sys/geom/mountver/g_mountver.c b/sys/geom/mountver/g_mountver.c
index 30af511..37cb924 100644
--- a/sys/geom/mountver/g_mountver.c
+++ b/sys/geom/mountver/g_mountver.c
@@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/bio.h>
#include <sys/disk.h>
+#include <sys/proc.h>
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <sys/malloc.h>
diff --git a/sys/i386/i386/initcpu.c b/sys/i386/i386/initcpu.c
index c2daf54..ec77adb 100644
--- a/sys/i386/i386/initcpu.c
+++ b/sys/i386/i386/initcpu.c
@@ -650,6 +650,8 @@ enable_sse(void)
#endif
}
+extern int elf32_nxstack;
+
void
initializecpu(void)
{
@@ -739,6 +741,7 @@ initializecpu(void)
msr = rdmsr(MSR_EFER) | EFER_NXE;
wrmsr(MSR_EFER, msr);
pg_nx = PG_NX;
+ elf32_nxstack = 1;
}
#endif
break;
diff --git a/sys/i386/include/signal.h b/sys/i386/include/signal.h
index c636c2c..892115b 100644
--- a/sys/i386/include/signal.h
+++ b/sys/i386/include/signal.h
@@ -115,13 +115,16 @@ struct sigcontext {
*/
int sc_fpformat;
int sc_ownedfp;
- int sc_spare1[1];
+ int sc_flags;
int sc_fpstate[128] __aligned(16);
int sc_fsbase;
int sc_gsbase;
- int sc_spare2[6];
+ int sc_xfpustate;
+ int sc_xfpustate_len;
+
+ int sc_spare2[4];
};
#define sc_sp sc_esp
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 4da2ad78..bde590c 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -118,7 +118,12 @@ static int elf_legacy_coredump = 0;
SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
&elf_legacy_coredump, 0, "");
-static int __elfN(nxstack) = 0;
+int __elfN(nxstack) =
+#if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */
+ 1;
+#else
+ 0;
+#endif
SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
nxstack, CTLFLAG_RW, &__elfN(nxstack), 0,
__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack");
diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c
index 55c77c4..9da342d 100644
--- a/sys/kern/kern_shutdown.c
+++ b/sys/kern/kern_shutdown.c
@@ -145,7 +145,6 @@ SYSCTL_INT(_kern_shutdown, OID_AUTO, show_busybufs, CTLFLAG_RW,
*/
const char *panicstr;
-int stop_scheduler; /* system stopped CPUs for panic */
int dumping; /* system is dumping */
int rebooting; /* system is rebooting */
static struct dumperinfo dumper; /* our selected dumper */
@@ -597,7 +596,7 @@ panic(const char *fmt, ...)
* stop_scheduler_on_panic is true, then stop_scheduler will
* always be set. Even if panic has been entered from kdb.
*/
- stop_scheduler = 1;
+ td->td_stopsched = 1;
}
#endif
diff --git a/sys/kern/subr_scanf.c b/sys/kern/subr_scanf.c
index 0814953..824e392 100644
--- a/sys/kern/subr_scanf.c
+++ b/sys/kern/subr_scanf.c
@@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#define POINTER 0x10 /* weird %p pointer (`fake hex') */
#define NOSKIP 0x20 /* do not skip blanks */
#define QUAD 0x400
+#define SHORTSHORT 0x4000 /** hh: char */
/*
* The following are used in numeric conversions only:
@@ -160,13 +161,23 @@ literal:
flags |= SUPPRESS;
goto again;
case 'l':
- flags |= LONG;
+ if (flags & LONG){
+ flags &= ~LONG;
+ flags |= QUAD;
+ } else {
+ flags |= LONG;
+ }
goto again;
case 'q':
flags |= QUAD;
goto again;
case 'h':
- flags |= SHORT;
+ if (flags & SHORT){
+ flags &= ~SHORT;
+ flags |= SHORTSHORT;
+ } else {
+ flags |= SHORT;
+ }
goto again;
case '0': case '1': case '2': case '3': case '4':
@@ -235,7 +246,9 @@ literal:
nconversions++;
if (flags & SUPPRESS) /* ??? */
continue;
- if (flags & SHORT)
+ if (flags & SHORTSHORT)
+ *va_arg(ap, char *) = nread;
+ else if (flags & SHORT)
*va_arg(ap, short *) = nread;
else if (flags & LONG)
*va_arg(ap, long *) = nread;
@@ -510,6 +523,8 @@ literal:
if (flags & POINTER)
*va_arg(ap, void **) =
(void *)(uintptr_t)res;
+ else if (flags & SHORTSHORT)
+ *va_arg(ap, char *) = res;
else if (flags & SHORT)
*va_arg(ap, short *) = res;
else if (flags & LONG)
diff --git a/sys/kern/subr_syscall.c b/sys/kern/subr_syscall.c
index bba4479..75328f6 100644
--- a/sys/kern/subr_syscall.c
+++ b/sys/kern/subr_syscall.c
@@ -212,7 +212,8 @@ syscallret(struct thread *td, int error, struct syscall_args *sa __unused)
* executes. If debugger requested tracing of syscall
* returns, do it now too.
*/
- if (traced && ((td->td_dbgflags & TDB_EXEC) != 0 ||
+ if (traced &&
+ ((td->td_dbgflags & (TDB_FORK | TDB_EXEC)) != 0 ||
(p->p_stops & S_PT_SCX) != 0))
ptracestop(td, SIGTRAP);
td->td_dbgflags &= ~(TDB_SCX | TDB_EXEC | TDB_FORK);
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 871132c..620246b 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -1410,6 +1410,11 @@ m_print(const struct mbuf *m, int maxlen)
int pdata;
const struct mbuf *m2;
+ if (m == NULL) {
+ printf("mbuf: %p\n", m);
+ return;
+ }
+
if (m->m_flags & M_PKTHDR)
len = m->m_pkthdr.len;
else
diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c
index 7af9f55..2d9994f 100644
--- a/sys/kern/vfs_aio.c
+++ b/sys/kern/vfs_aio.c
@@ -1552,6 +1552,12 @@ aio_aqueue(struct thread *td, struct aiocb *job, struct aioliojob *lj,
return (error);
}
+ /* XXX: aio_nbytes is later casted to signed types. */
+ if (aiocbe->uaiocb.aio_nbytes > INT_MAX) {
+ uma_zfree(aiocb_zone, aiocbe);
+ return (EINVAL);
+ }
+
if (aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
@@ -2529,10 +2535,13 @@ filt_aioattach(struct knote *kn)
static void
filt_aiodetach(struct knote *kn)
{
- struct aiocblist *aiocbe = kn->kn_ptr.p_aio;
+ struct knlist *knl;
- if (!knlist_empty(&aiocbe->klist))
- knlist_remove(&aiocbe->klist, kn, 0);
+ knl = &kn->kn_ptr.p_aio->klist;
+ knl->kl_lock(knl->kl_lockarg);
+ if (!knlist_empty(knl))
+ knlist_remove(knl, kn, 1);
+ knl->kl_unlock(knl->kl_lockarg);
}
/* kqueue filter function */
@@ -2574,10 +2583,13 @@ filt_lioattach(struct knote *kn)
static void
filt_liodetach(struct knote *kn)
{
- struct aioliojob * lj = kn->kn_ptr.p_lio;
+ struct knlist *knl;
- if (!knlist_empty(&lj->klist))
- knlist_remove(&lj->klist, kn, 0);
+ knl = &kn->kn_ptr.p_lio->klist;
+ knl->kl_lock(knl->kl_lockarg);
+ if (!knlist_empty(knl))
+ knlist_remove(knl, kn, 1);
+ knl->kl_unlock(knl->kl_lockarg);
}
/* kqueue filter function */
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index e33592a..f94bc12 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -519,6 +519,7 @@ vn_read(fp, uio, active_cred, flags, td)
int error, ioflag;
struct mtx *mtxp;
int advice, vfslocked;
+ off_t offset;
KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
uio->uio_td, td));
@@ -558,19 +559,14 @@ vn_read(fp, uio, active_cred, flags, td)
switch (advice) {
case POSIX_FADV_NORMAL:
case POSIX_FADV_SEQUENTIAL:
+ case POSIX_FADV_NOREUSE:
ioflag |= sequential_heuristic(uio, fp);
break;
case POSIX_FADV_RANDOM:
/* Disable read-ahead for random I/O. */
break;
- case POSIX_FADV_NOREUSE:
- /*
- * Request the underlying FS to discard the buffers
- * and pages after the I/O is complete.
- */
- ioflag |= IO_DIRECT;
- break;
}
+ offset = uio->uio_offset;
#ifdef MAC
error = mac_vnode_check_read(active_cred, fp->f_cred, vp);
@@ -587,6 +583,10 @@ vn_read(fp, uio, active_cred, flags, td)
}
fp->f_nextoff = uio->uio_offset;
VOP_UNLOCK(vp, 0);
+ if (error == 0 && advice == POSIX_FADV_NOREUSE &&
+ offset != uio->uio_offset)
+ error = VOP_ADVISE(vp, offset, uio->uio_offset - 1,
+ POSIX_FADV_DONTNEED);
VFS_UNLOCK_GIANT(vfslocked);
return (error);
}
diff --git a/sys/modules/ixgbe/Makefile b/sys/modules/ixgbe/Makefile
index 50efe65..c18821f 100644
--- a/sys/modules/ixgbe/Makefile
+++ b/sys/modules/ixgbe/Makefile
@@ -5,7 +5,7 @@ SRCS = device_if.h bus_if.h pci_if.h
SRCS += ixgbe.c ixv.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
-SRCS += ixgbe_82599.c ixgbe_82598.c
+SRCS += ixgbe_82599.c ixgbe_82598.c ixgbe_x540.c
CFLAGS+= -I${.CURDIR}/../../dev/ixgbe -DSMP -DIXGBE_FDIR
.include <bsd.kmod.mk>
diff --git a/sys/modules/mps/Makefile b/sys/modules/mps/Makefile
index 49e65da..040b04e 100644
--- a/sys/modules/mps/Makefile
+++ b/sys/modules/mps/Makefile
@@ -4,10 +4,11 @@
KMOD= mps
SRCS= mps_pci.c mps.c mps_sas.c mps_table.c mps_user.c
-SRCS+= opt_mps.h opt_cam.h opt_compat.h
+SRCS+= mps_config.c mps_mapping.c mps_sas_lsi.c
+SRCS+= opt_cam.h opt_compat.h
SRCS+= device_if.h bus_if.h pci_if.h
#CFLAGS += -DMPS_DEBUG
-DEBUG += -g
+DEBUG_FLAGS += -g
.include <bsd.kmod.mk>
diff --git a/sys/net/flowtable.c b/sys/net/flowtable.c
index 7814e3a..fac0f59 100644
--- a/sys/net/flowtable.c
+++ b/sys/net/flowtable.c
@@ -1186,12 +1186,14 @@ keycheck:
rt = __DEVOLATILE(struct rtentry *, fle->f_rt);
lle = __DEVOLATILE(struct llentry *, fle->f_lle);
if ((rt != NULL)
+ && lle != NULL
&& fle->f_fhash == hash
&& flowtable_key_equal(fle, key)
&& (proto == fle->f_proto)
&& (fibnum == fle->f_fibnum)
&& (rt->rt_flags & RTF_UP)
- && (rt->rt_ifp != NULL)) {
+ && (rt->rt_ifp != NULL)
+ && (lle->la_flags & LLE_VALID)) {
fs->ft_hits++;
fle->f_uptime = time_uptime;
fle->f_flags |= flags;
diff --git a/sys/net/if_llatbl.c b/sys/net/if_llatbl.c
index 559a174..8092f0f 100644
--- a/sys/net/if_llatbl.c
+++ b/sys/net/if_llatbl.c
@@ -122,6 +122,7 @@ llentry_free(struct llentry *lle)
("%s: la_numheld %d > 0, pkts_droped %zd", __func__,
lle->la_numheld, pkts_dropped));
+ lle->la_flags &= ~LLE_VALID;
LLE_FREE_LOCKED(lle);
return (pkts_dropped);
diff --git a/sys/net80211/ieee80211_hwmp.c b/sys/net80211/ieee80211_hwmp.c
index 86d8e36..319c862 100644
--- a/sys/net80211/ieee80211_hwmp.c
+++ b/sys/net80211/ieee80211_hwmp.c
@@ -1044,12 +1044,21 @@ hwmp_recv_prep(struct ieee80211vap *vap, struct ieee80211_node *ni,
* Sequence number validation.
*/
hr = IEEE80211_MESH_ROUTE_PRIV(rt, struct ieee80211_hwmp_route);
- if (HWMP_SEQ_LEQ(prep->prep_targetseq, hr->hr_seq)) {
- IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
- "discard PREP from %6D, old seq no %u <= %u",
- prep->prep_targetaddr, ":",
- prep->prep_targetseq, hr->hr_seq);
- return;
+ if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID)) {
+ if (HWMP_SEQ_LT(prep->prep_targetseq, hr->hr_seq)) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "discard PREP from %6D, old seq no %u < %u",
+ prep->prep_targetaddr, ":",
+ prep->prep_targetseq, hr->hr_seq);
+ return;
+ } else if (HWMP_SEQ_LEQ(prep->prep_targetseq, hr->hr_seq) &&
+ prep->prep_metric > rt->rt_metric) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "discard PREP from %6D, new metric %u > %u",
+ prep->prep_targetaddr, ":",
+ prep->prep_metric, rt->rt_metric);
+ return;
+ }
}
hr->hr_seq = prep->prep_targetseq;
diff --git a/sys/netinet/ipfw/dn_sched_qfq.c b/sys/netinet/ipfw/dn_sched_qfq.c
index c37b65e..be7fba3 100644
--- a/sys/netinet/ipfw/dn_sched_qfq.c
+++ b/sys/netinet/ipfw/dn_sched_qfq.c
@@ -608,7 +608,7 @@ static inline void
qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
{
unsigned long mask;
- uint32_t limit, roundedF;
+ uint64_t limit, roundedF;
int slot_shift = cl->grp->slot_shift;
roundedF = qfq_round_down(cl->F, slot_shift);
diff --git a/sys/netinet6/in6.h b/sys/netinet6/in6.h
index 2f986d4..f0cf4f4 100644
--- a/sys/netinet6/in6.h
+++ b/sys/netinet6/in6.h
@@ -235,37 +235,37 @@ extern const struct in6_addr in6addr_linklocal_allv2routers;
* Unspecified
*/
#define IN6_IS_ADDR_UNSPECIFIED(a) \
- ((*(const u_int32_t *)(const void *)(&(a)->s6_addr[0]) == 0) && \
- (*(const u_int32_t *)(const void *)(&(a)->s6_addr[4]) == 0) && \
- (*(const u_int32_t *)(const void *)(&(a)->s6_addr[8]) == 0) && \
- (*(const u_int32_t *)(const void *)(&(a)->s6_addr[12]) == 0))
+ ((a)->__u6_addr.__u6_addr32[0] == 0 && \
+ (a)->__u6_addr.__u6_addr32[1] == 0 && \
+ (a)->__u6_addr.__u6_addr32[2] == 0 && \
+ (a)->__u6_addr.__u6_addr32[3] == 0)
/*
* Loopback
*/
#define IN6_IS_ADDR_LOOPBACK(a) \
- ((*(const u_int32_t *)(const void *)(&(a)->s6_addr[0]) == 0) && \
- (*(const u_int32_t *)(const void *)(&(a)->s6_addr[4]) == 0) && \
- (*(const u_int32_t *)(const void *)(&(a)->s6_addr[8]) == 0) && \
- (*(const u_int32_t *)(const void *)(&(a)->s6_addr[12]) == ntohl(1)))
+ ((a)->__u6_addr.__u6_addr32[0] == 0 && \
+ (a)->__u6_addr.__u6_addr32[1] == 0 && \
+ (a)->__u6_addr.__u6_addr32[2] == 0 && \
+ (a)->__u6_addr.__u6_addr32[3] == ntohl(1))
/*
* IPv4 compatible
*/
#define IN6_IS_ADDR_V4COMPAT(a) \
- ((*(const u_int32_t *)(const void *)(&(a)->s6_addr[0]) == 0) && \
- (*(const u_int32_t *)(const void *)(&(a)->s6_addr[4]) == 0) && \
- (*(const u_int32_t *)(const void *)(&(a)->s6_addr[8]) == 0) && \
- (*(const u_int32_t *)(const void *)(&(a)->s6_addr[12]) != 0) && \
- (*(const u_int32_t *)(const void *)(&(a)->s6_addr[12]) != ntohl(1)))
+ ((a)->__u6_addr.__u6_addr32[0] == 0 && \
+ (a)->__u6_addr.__u6_addr32[1] == 0 && \
+ (a)->__u6_addr.__u6_addr32[2] == 0 && \
+ (a)->__u6_addr.__u6_addr32[3] != 0 && \
+ (a)->__u6_addr.__u6_addr32[3] != ntohl(1))
/*
* Mapped
*/
#define IN6_IS_ADDR_V4MAPPED(a) \
- ((*(const u_int32_t *)(const void *)(&(a)->s6_addr[0]) == 0) && \
- (*(const u_int32_t *)(const void *)(&(a)->s6_addr[4]) == 0) && \
- (*(const u_int32_t *)(const void *)(&(a)->s6_addr[8]) == ntohl(0x0000ffff)))
+ ((a)->__u6_addr.__u6_addr32[0] == 0 && \
+ (a)->__u6_addr.__u6_addr32[1] == 0 && \
+ (a)->__u6_addr.__u6_addr32[2] == ntohl(0x0000ffff))
/*
* KAME Scope Values
diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c
index 97e878a..d564c08 100644
--- a/sys/nfsclient/nfs_bio.c
+++ b/sys/nfsclient/nfs_bio.c
@@ -474,7 +474,7 @@ nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
/* No caching/ no readaheads. Just read data into the user buffer */
return nfs_readrpc(vp, uio, cred);
- biosize = vp->v_mount->mnt_stat.f_iosize;
+ biosize = vp->v_bufobj.bo_bsize;
seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
error = nfs_bioread_check_cons(vp, td, cred);
@@ -951,7 +951,7 @@ flush_and_restart:
if (vn_rlimit_fsize(vp, uio, td))
return (EFBIG);
- biosize = vp->v_mount->mnt_stat.f_iosize;
+ biosize = vp->v_bufobj.bo_bsize;
/*
* Find all of this file's B_NEEDCOMMIT buffers. If our writes
* would exceed the local maximum per-file write commit size when
@@ -1255,12 +1255,8 @@ nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
bp = getblk(vp, bn, size, 0, 0, 0);
}
- if (vp->v_type == VREG) {
- int biosize;
-
- biosize = mp->mnt_stat.f_iosize;
- bp->b_blkno = bn * (biosize / DEV_BSIZE);
- }
+ if (vp->v_type == VREG)
+ bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE);
return (bp);
}
@@ -1767,7 +1763,7 @@ nfs_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad
{
struct nfsnode *np = VTONFS(vp);
u_quad_t tsize;
- int biosize = vp->v_mount->mnt_stat.f_iosize;
+ int biosize = vp->v_bufobj.bo_bsize;
int error = 0;
mtx_lock(&np->n_mtx);
diff --git a/sys/nfsclient/nfs_vfsops.c b/sys/nfsclient/nfs_vfsops.c
index 5c257fc..82891f9 100644
--- a/sys/nfsclient/nfs_vfsops.c
+++ b/sys/nfsclient/nfs_vfsops.c
@@ -1116,23 +1116,6 @@ nfs_mount(struct mount *mp)
error = EIO;
goto out;
}
-
- /*
- * Cannot switch to UDP if current rsize/wsize/readdirsize is
- * too large, since there may be an I/O RPC in progress that
- * will get retried after the switch to the UDP socket. These
- * retries will fail over and over and over again.
- */
- if (args.sotype == SOCK_DGRAM &&
- (nmp->nm_rsize > NFS_MAXDGRAMDATA ||
- nmp->nm_wsize > NFS_MAXDGRAMDATA ||
- nmp->nm_readdirsize > NFS_MAXDGRAMDATA)) {
- vfs_mount_error(mp,
- "old rsize/wsize/readdirsize greater than UDP max");
- error = EINVAL;
- goto out;
- }
-
/*
* When doing an update, we can't change from or to
* v3, switch lockd strategies or change cookie translation
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index d58e8de..84680b6 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -114,6 +114,7 @@ __FBSDID("$FreeBSD$");
* correct.
*/
+#include "opt_compat.h"
#include "opt_kstack_pages.h"
#include <sys/param.h>
@@ -1445,6 +1446,8 @@ moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
return (void *)va;
}
+extern int elf32_nxstack;
+
void
moea64_init(mmu_t mmu)
{
@@ -1464,6 +1467,10 @@ moea64_init(mmu_t mmu)
uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc);
}
+#ifdef COMPAT_FREEBSD32
+ elf32_nxstack = 1;
+#endif
+
moea64_initialized = TRUE;
}
diff --git a/sys/powerpc/booke/machdep.c b/sys/powerpc/booke/machdep.c
index c2b5e6f..82a37e1 100644
--- a/sys/powerpc/booke/machdep.c
+++ b/sys/powerpc/booke/machdep.c
@@ -192,6 +192,8 @@ void print_kernel_section_addr(void);
void print_kenv(void);
u_int booke_init(uint32_t, uint32_t);
+extern int elf32_nxstack;
+
static void
cpu_e500_startup(void *dummy)
{
@@ -227,6 +229,9 @@ cpu_e500_startup(void *dummy)
/* Set up buffers, so they can be used to read disk labels. */
bufinit();
vm_pager_bufferinit();
+
+ /* Cpu supports execution permissions on the pages. */
+ elf32_nxstack = 1;
}
static char *
diff --git a/sys/sparc64/include/clock.h b/sys/sparc64/include/clock.h
index fd57731..34b59d8 100644
--- a/sys/sparc64/include/clock.h
+++ b/sys/sparc64/include/clock.h
@@ -1,27 +1,5 @@
/*-
- * Copyright (c) 2001 Jake Burkholder.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * This file is in the public domain.
*
* $FreeBSD$
*/
@@ -29,10 +7,4 @@
#ifndef _MACHINE_CLOCK_H_
#define _MACHINE_CLOCK_H_
-extern void (*delay_func)(int usec);
-extern u_long clock_boot;
-
-void delay_boot(int usec);
-void delay_tick(int usec);
-
#endif /* !_MACHINE_CLOCK_H_ */
diff --git a/sys/sparc64/include/cpu.h b/sys/sparc64/include/cpu.h
index 1634477..401f39b 100644
--- a/sys/sparc64/include/cpu.h
+++ b/sys/sparc64/include/cpu.h
@@ -53,8 +53,8 @@ extern char btext[];
extern char etext[];
void cheetah_init(u_int cpu_impl);
-void cpu_halt(void);
-void cpu_reset(void);
+void cpu_halt(void) __dead2;
+void cpu_reset(void) __dead2;
void fork_trampoline(void);
void swi_vm(void *v);
void zeus_init(u_int cpu_impl);
diff --git a/sys/sparc64/include/ofw_machdep.h b/sys/sparc64/include/ofw_machdep.h
index 658d9c7..bc69b3b 100644
--- a/sys/sparc64/include/ofw_machdep.h
+++ b/sys/sparc64/include/ofw_machdep.h
@@ -37,8 +37,9 @@ typedef uint64_t cell_t;
int OF_decode_addr(phandle_t, int, int *, bus_addr_t *);
void OF_getetheraddr(device_t, u_char *);
u_int OF_getscsinitid(device_t);
-void cpu_shutdown(void *);
+void OF_panic(const char *fmt, ...) __dead2 __printflike(1, 2);
+void cpu_shutdown(void *) __dead2;
int ofw_entry(void *);
-void ofw_exit(void *);
+void ofw_exit(void *) __dead2;
#endif /* _MACHINE_OFW_MACHDEP_H_ */
diff --git a/sys/sparc64/include/vmparam.h b/sys/sparc64/include/vmparam.h
index a0d5ac4..15741f9 100644
--- a/sys/sparc64/include/vmparam.h
+++ b/sys/sparc64/include/vmparam.h
@@ -218,7 +218,7 @@
* is the total KVA space allocated for kmem_map.
*/
#ifndef VM_KMEM_SIZE_SCALE
-#define VM_KMEM_SIZE_SCALE (tsb_kernel_ldd_phys == 0 ? 3 : 1)
+#define VM_KMEM_SIZE_SCALE (tsb_kernel_ldd_phys == 0 ? 3 : 2)
#endif
/*
diff --git a/sys/sparc64/pci/schizo.c b/sys/sparc64/pci/schizo.c
index 627018b..852c880 100644
--- a/sys/sparc64/pci/schizo.c
+++ b/sys/sparc64/pci/schizo.c
@@ -178,6 +178,8 @@ struct schizo_icarg {
bus_addr_t sica_clr;
};
+#define SCHIZO_CDMA_TIMEOUT 1 /* 1 second per try */
+#define SCHIZO_CDMA_TRIES 15
#define SCHIZO_PERF_CNT_QLTY 100
#define SCHIZO_SPC_BARRIER(spc, sc, offs, len, flags) \
@@ -706,13 +708,15 @@ schizo_attach(device_t dev)
i = INTINO(bus_get_resource_start(dev, SYS_RES_IRQ,
4));
if (i == STX_CDMA_A_INO || i == STX_CDMA_B_INO) {
- (void)schizo_get_intrmap(sc, i, NULL,
- &sc->sc_cdma_clr);
+ sc->sc_cdma_vec = INTMAP_VEC(sc->sc_ign, i);
+ (void)schizo_get_intrmap(sc, i,
+ &sc->sc_cdma_map, &sc->sc_cdma_clr);
schizo_set_intr(sc, 4, i, schizo_cdma);
} else {
i = STX_CDMA_A_INO + sc->sc_half;
+ sc->sc_cdma_vec = INTMAP_VEC(sc->sc_ign, i);
if (bus_set_resource(dev, SYS_RES_IRQ, 5,
- INTMAP_VEC(sc->sc_ign, i), 1) != 0)
+ sc->sc_cdma_vec, 1) != 0)
panic("%s: failed to add CDMA "
"interrupt", __func__);
j = schizo_intr_register(sc, i);
@@ -720,8 +724,8 @@ schizo_attach(device_t dev)
panic("%s: could not register "
"interrupt controller for CDMA "
"(%d)", __func__, j);
- (void)schizo_get_intrmap(sc, i, NULL,
- &sc->sc_cdma_clr);
+ (void)schizo_get_intrmap(sc, i,
+ &sc->sc_cdma_map, &sc->sc_cdma_clr);
schizo_set_intr(sc, 5, i, schizo_cdma);
}
} else {
@@ -988,7 +992,8 @@ schizo_cdma(void *arg)
{
struct schizo_softc *sc = arg;
- atomic_store_rel_32(&sc->sc_cdma_state, SCHIZO_CDMA_STATE_RECEIVED);
+ atomic_cmpset_32(&sc->sc_cdma_state, SCHIZO_CDMA_STATE_PENDING,
+ SCHIZO_CDMA_STATE_RECEIVED);
return (FILTER_HANDLED);
}
@@ -1153,7 +1158,10 @@ schizo_dmamap_sync(bus_dma_tag_t dt, bus_dmamap_t map, bus_dmasync_op_t op)
struct timeval cur, end;
struct schizo_iommu_state *sis = dt->dt_cookie;
struct schizo_softc *sc = sis->sis_sc;
- int res;
+ int i, res;
+#ifdef INVARIANTS
+ register_t pil;
+#endif
if ((map->dm_flags & DMF_STREAMED) != 0) {
iommu_dma_methods.dm_dmamap_sync(dt, map, op);
@@ -1170,20 +1178,36 @@ schizo_dmamap_sync(bus_dma_tag_t dt, bus_dmamap_t map, bus_dmasync_op_t op)
* but given that these disable interrupts we have to emulate
* one.
*/
+ critical_enter();
+ KASSERT((rdpr(pstate) & PSTATE_IE) != 0,
+ ("%s: interrupts disabled", __func__));
+ KASSERT((pil = rdpr(pil)) <= PIL_BRIDGE,
+ ("%s: PIL too low (%ld)", __func__, pil));
for (; atomic_cmpset_acq_32(&sc->sc_cdma_state,
SCHIZO_CDMA_STATE_IDLE, SCHIZO_CDMA_STATE_PENDING) == 0;)
;
- SCHIZO_PCI_WRITE_8(sc, sc->sc_cdma_clr, INTCLR_RECEIVED);
- microuptime(&cur);
- end.tv_sec = 15;
- end.tv_usec = 0;
- timevaladd(&end, &cur);
- for (; (res = atomic_cmpset_rel_32(&sc->sc_cdma_state,
- SCHIZO_CDMA_STATE_RECEIVED, SCHIZO_CDMA_STATE_IDLE)) ==
- 0 && timevalcmp(&cur, &end, <=);)
+ SCHIZO_PCI_WRITE_8(sc, sc->sc_cdma_map,
+ INTMAP_ENABLE(sc->sc_cdma_vec, PCPU_GET(mid)));
+ for (i = 0; i < SCHIZO_CDMA_TRIES; i++) {
+ if (i > 0)
+ printf("%s: try %d\n", __func__, i);
+ SCHIZO_PCI_WRITE_8(sc, sc->sc_cdma_clr,
+ INTCLR_RECEIVED);
microuptime(&cur);
+ end.tv_sec = SCHIZO_CDMA_TIMEOUT;
+ end.tv_usec = 0;
+ timevaladd(&end, &cur);
+ for (; (res = atomic_cmpset_rel_32(&sc->sc_cdma_state,
+ SCHIZO_CDMA_STATE_RECEIVED,
+ SCHIZO_CDMA_STATE_IDLE)) == 0 &&
+ timevalcmp(&cur, &end, <=);)
+ microuptime(&cur);
+ if (res != 0)
+ break;
+ }
if (res == 0)
panic("%s: DMA does not sync", __func__);
+ critical_exit();
}
if ((op & BUS_DMASYNC_PREWRITE) != 0)
@@ -1352,7 +1376,7 @@ schizo_alloc_resource(device_t bus, device_t child, int type, int *rid,
panic("%s: XXX: interrupt range", __func__);
start = end = INTMAP_VEC(sc->sc_ign, end);
return (bus_generic_alloc_resource(bus, child, type, rid,
- start, end, count, flags));
+ start, end, count, flags));
case SYS_RES_MEMORY:
rm = &sc->sc_pci_mem_rman;
break;
diff --git a/sys/sparc64/pci/schizovar.h b/sys/sparc64/pci/schizovar.h
index 3fe7cdc..ab339c8 100644
--- a/sys/sparc64/pci/schizovar.h
+++ b/sys/sparc64/pci/schizovar.h
@@ -59,7 +59,9 @@ struct schizo_softc {
#define SCHIZO_FLAGS_BSWAR (1 << 0)
#define SCHIZO_FLAGS_XMODE (1 << 1)
+ bus_addr_t sc_cdma_map;
bus_addr_t sc_cdma_clr;
+ uint32_t sc_cdma_vec;
uint32_t sc_cdma_state;
#define SCHIZO_CDMA_STATE_IDLE (1 << 0)
#define SCHIZO_CDMA_STATE_PENDING (1 << 1)
diff --git a/sys/sparc64/sparc64/cache.c b/sys/sparc64/sparc64/cache.c
index d29a294..0dc3aa1 100644
--- a/sys/sparc64/sparc64/cache.c
+++ b/sys/sparc64/sparc64/cache.c
@@ -142,24 +142,24 @@ cache_init(struct pcpu *pcpu)
"l2-cache-line-size", pcpu->pc_cache.ec_linesize) == -1 ||
OF_GET(pcpu->pc_node, !use_new_prop ? "ecache-associativity" :
"l2-cache-associativity", pcpu->pc_cache.ec_assoc) == -1)
- panic("cache_init: could not retrieve cache parameters");
+ OF_panic("%s: could not retrieve cache parameters", __func__);
set = pcpu->pc_cache.ic_size / pcpu->pc_cache.ic_assoc;
if ((set & ~(1UL << (ffs(set) - 1))) != 0)
- panic("cache_init: I$ set size not a power of 2");
+ OF_panic("%s: I$ set size not a power of 2", __func__);
if ((pcpu->pc_cache.dc_size &
~(1UL << (ffs(pcpu->pc_cache.dc_size) - 1))) != 0)
- panic("cache_init: D$ size not a power of 2");
+ OF_panic("%s: D$ size not a power of 2", __func__);
/*
* For CPUs which don't support unaliasing in hardware ensure that
* the data cache doesn't have too many virtual colors.
*/
if (dcache_color_ignore == 0 && ((pcpu->pc_cache.dc_size /
pcpu->pc_cache.dc_assoc) / PAGE_SIZE) != DCACHE_COLORS)
- panic("cache_init: too many D$ colors");
+ OF_panic("%s: too many D$ colors", __func__);
set = pcpu->pc_cache.ec_size / pcpu->pc_cache.ec_assoc;
if ((set & ~(1UL << (ffs(set) - 1))) != 0)
- panic("cache_init: E$ set size not a power of 2");
+ OF_panic("%s: E$ set size not a power of 2", __func__);
if (pcpu->pc_impl >= CPU_IMPL_ULTRASPARCIII) {
cache_enable = cheetah_cache_enable;
@@ -184,5 +184,5 @@ cache_init(struct pcpu *pcpu)
tlb_flush_nonlocked = spitfire_tlb_flush_nonlocked;
tlb_flush_user = spitfire_tlb_flush_user;
} else
- panic("cache_init: unknown CPU");
+ OF_panic("%s: unknown CPU", __func__);
}
diff --git a/sys/sparc64/sparc64/clock.c b/sys/sparc64/sparc64/clock.c
index e618f5d..f62f022 100644
--- a/sys/sparc64/sparc64/clock.c
+++ b/sys/sparc64/sparc64/clock.c
@@ -33,36 +33,12 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/sched.h>
-#include <machine/clock.h>
#include <machine/cpu.h>
#include <machine/cpufunc.h>
-void (*delay_func)(int usec);
-u_long clock_boot;
-
void
DELAY(int usec)
{
-
- (*delay_func)(usec);
-}
-
-void
-delay_boot(int usec)
-{
- u_long end;
-
- if (usec < 0)
- return;
-
- end = rd(tick) + (u_long)usec * clock_boot / 1000000;
- while (rd(tick) < end)
- cpu_spinwait();
-}
-
-void
-delay_tick(int usec)
-{
u_long end;
if (usec < 0)
diff --git a/sys/sparc64/sparc64/machdep.c b/sys/sparc64/sparc64/machdep.c
index 2720448..7c7c234 100644
--- a/sys/sparc64/sparc64/machdep.c
+++ b/sys/sparc64/sparc64/machdep.c
@@ -88,7 +88,6 @@ __FBSDID("$FreeBSD$");
#include <machine/bus.h>
#include <machine/cache.h>
-#include <machine/clock.h>
#include <machine/cmt.h>
#include <machine/cpu.h>
#include <machine/fireplane.h>
@@ -376,7 +375,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
/*
* Parse metadata if present and fetch parameters. Must be before the
- * console is inited so cninit gets the right value of boothowto.
+ * console is inited so cninit() gets the right value of boothowto.
*/
if (mdp != NULL) {
preload_metadata = mdp;
@@ -421,37 +420,19 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
root = OF_peer(0);
pc->pc_node = find_bsp(root, pc->pc_mid, cpu_impl);
if (pc->pc_node == 0)
- OF_exit();
+ OF_panic("%s: cannot find boot CPU node", __func__);
if (OF_getprop(pc->pc_node, "clock-frequency", &pc->pc_clock,
sizeof(pc->pc_clock)) <= 0)
- OF_exit();
-
- /*
- * Provide a DELAY() that works before PCPU_REG is set. We can't
- * set PCPU_REG without also taking over the trap table or the
- * firmware will overwrite it. Unfortunately, it's way to early
- * to also take over the trap table at this point.
- */
- clock_boot = pc->pc_clock;
- delay_func = delay_boot;
-
- /*
- * Initialize the console before printing anything.
- * NB: the low-level console drivers require a working DELAY() at
- * this point.
- */
- cninit();
+ OF_panic("%s: cannot determine boot CPU clock", __func__);
/*
* Panic if there is no metadata. Most likely the kernel was booted
* directly, instead of through loader(8).
*/
if (mdp == NULL || kmdp == NULL || end == 0 ||
- kernel_tlb_slots == 0 || kernel_tlbs == NULL) {
- printf("sparc64_init: missing loader metadata.\n"
- "This probably means you are not using loader(8).\n");
- panic("sparc64_init");
- }
+ kernel_tlb_slots == 0 || kernel_tlbs == NULL)
+ OF_panic("%s: missing loader metadata.\nThis probably means "
+ "you are not using loader(8).", __func__);
/*
* Work around the broken loader behavior of not demapping no
@@ -461,7 +442,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
for (va = KERNBASE + (kernel_tlb_slots - 1) * PAGE_SIZE_4M;
va >= roundup2(end, PAGE_SIZE_4M); va -= PAGE_SIZE_4M) {
if (bootverbose)
- printf("demapping unused kernel TLB slot "
+ OF_printf("demapping unused kernel TLB slot "
"(va %#lx - %#lx)\n", va, va + PAGE_SIZE_4M - 1);
stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
ASI_DMMU_DEMAP, 0);
@@ -479,13 +460,15 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
*/
if (OF_getprop(pc->pc_node, "#dtlb-entries", &dtlb_slots,
sizeof(dtlb_slots)) == -1)
- panic("sparc64_init: cannot determine number of dTLB slots");
+ OF_panic("%s: cannot determine number of dTLB slots",
+ __func__);
if (OF_getprop(pc->pc_node, "#itlb-entries", &itlb_slots,
sizeof(itlb_slots)) == -1)
- panic("sparc64_init: cannot determine number of iTLB slots");
+ OF_panic("%s: cannot determine number of iTLB slots",
+ __func__);
/*
- * Initialize and enable the caches. Note that his may include
+ * Initialize and enable the caches. Note that this may include
* applying workarounds.
*/
cache_init(pc);
@@ -573,9 +556,13 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
sun4u_set_traptable(tl0_base);
/*
- * It's now safe to use the real DELAY().
+ * Initialize the console.
+ * NB: the low-level console drivers require a working DELAY() and
+ * some compiler optimizations may cause the curthread accesses of
+ * mutex(9) to be factored out even if the latter aren't actually
+ * called, both requiring PCPU_REG to be set.
*/
- delay_func = delay_tick;
+ cninit();
/*
* Initialize the dynamic per-CPU area for the BSP and the message
diff --git a/sys/sparc64/sparc64/ofw_machdep.c b/sys/sparc64/sparc64/ofw_machdep.c
index f1ce64b..7fcf5c8 100644
--- a/sys/sparc64/sparc64/ofw_machdep.c
+++ b/sys/sparc64/sparc64/ofw_machdep.c
@@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <machine/bus.h>
#include <machine/idprom.h>
#include <machine/ofw_machdep.h>
+#include <machine/stdarg.h>
void
OF_getetheraddr(device_t dev, u_char *addr)
@@ -81,6 +82,19 @@ OF_getscsinitid(device_t dev)
return (7);
}
+void
+OF_panic(const char *fmt, ...)
+{
+ char buf[256];
+ va_list ap;
+
+ va_start(ap, fmt);
+ (void)vsnprintf(buf, sizeof(buf), fmt, ap);
+ OF_printf("OF_panic: %s\n", buf);
+ va_end(ap);
+ OF_exit();
+}
+
static __inline uint32_t
phys_hi_mask_space(const char *bus, uint32_t phys_hi)
{
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 496aff5..d7ba44b 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -333,16 +333,16 @@ pmap_bootstrap(u_int cpu_impl)
* pmap_bootstrap_alloc is called.
*/
if ((pmem = OF_finddevice("/memory")) == -1)
- panic("pmap_bootstrap: finddevice /memory");
+ OF_panic("%s: finddevice /memory", __func__);
if ((sz = OF_getproplen(pmem, "available")) == -1)
- panic("pmap_bootstrap: getproplen /memory/available");
+ OF_panic("%s: getproplen /memory/available", __func__);
if (sizeof(phys_avail) < sz)
- panic("pmap_bootstrap: phys_avail too small");
+ OF_panic("%s: phys_avail too small", __func__);
if (sizeof(mra) < sz)
- panic("pmap_bootstrap: mra too small");
+ OF_panic("%s: mra too small", __func__);
bzero(mra, sz);
if (OF_getprop(pmem, "available", mra, sz) == -1)
- panic("pmap_bootstrap: getprop /memory/available");
+ OF_panic("%s: getprop /memory/available", __func__);
sz /= sizeof(*mra);
CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
qsort(mra, sz, sizeof (*mra), mr_cmp);
@@ -414,7 +414,7 @@ pmap_bootstrap(u_int cpu_impl)
*/
pa = pmap_bootstrap_alloc(tsb_kernel_size, colors);
if (pa & PAGE_MASK_4M)
- panic("pmap_bootstrap: TSB unaligned\n");
+ OF_panic("%s: TSB unaligned", __func__);
tsb_kernel_phys = pa;
if (tsb_kernel_ldd_phys == 0) {
tsb_kernel =
@@ -461,7 +461,7 @@ pmap_bootstrap(u_int cpu_impl)
#define PATCH_ASI(addr, asi) do { \
if (addr[0] != WR_R_I(IF_F3_RD(addr[0]), 0x0, \
IF_F3_RS1(addr[0]))) \
- panic("%s: patched instructions have changed", \
+ OF_panic("%s: patched instructions have changed", \
__func__); \
addr[0] |= EIF_IMM((asi), 13); \
flush(addr); \
@@ -470,7 +470,7 @@ pmap_bootstrap(u_int cpu_impl)
#define PATCH_LDD(addr, asi) do { \
if (addr[0] != LDDA_R_I_R(IF_F3_RD(addr[0]), 0x0, \
IF_F3_RS1(addr[0]), IF_F3_RS2(addr[0]))) \
- panic("%s: patched instructions have changed", \
+ OF_panic("%s: patched instructions have changed", \
__func__); \
addr[0] |= EIF_F3_IMM_ASI(asi); \
flush(addr); \
@@ -481,7 +481,7 @@ pmap_bootstrap(u_int cpu_impl)
addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0, \
IF_F3_RS1(addr[1])) || \
addr[3] != SETHI(IF_F2_RD(addr[3]), 0x0)) \
- panic("%s: patched instructions have changed", \
+ OF_panic("%s: patched instructions have changed", \
__func__); \
addr[0] |= EIF_IMM((val) >> 42, 22); \
addr[1] |= EIF_IMM((val) >> 32, 10); \
@@ -495,7 +495,7 @@ pmap_bootstrap(u_int cpu_impl)
if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) || \
addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0, \
IF_F3_RS1(addr[1]))) \
- panic("%s: patched instructions have changed", \
+ OF_panic("%s: patched instructions have changed", \
__func__); \
addr[0] |= EIF_IMM((val) >> 10, 22); \
addr[1] |= EIF_IMM((val), 10); \
@@ -604,14 +604,15 @@ pmap_bootstrap(u_int cpu_impl)
* Add the PROM mappings to the kernel TSB.
*/
if ((vmem = OF_finddevice("/virtual-memory")) == -1)
- panic("pmap_bootstrap: finddevice /virtual-memory");
+ OF_panic("%s: finddevice /virtual-memory", __func__);
if ((sz = OF_getproplen(vmem, "translations")) == -1)
- panic("pmap_bootstrap: getproplen translations");
+ OF_panic("%s: getproplen translations", __func__);
if (sizeof(translations) < sz)
- panic("pmap_bootstrap: translations too small");
+ OF_panic("%s: translations too small", __func__);
bzero(translations, sz);
if (OF_getprop(vmem, "translations", translations, sz) == -1)
- panic("pmap_bootstrap: getprop /virtual-memory/translations");
+ OF_panic("%s: getprop /virtual-memory/translations",
+ __func__);
sz /= sizeof(*translations);
translations_size = sz;
CTR0(KTR_PMAP, "pmap_bootstrap: translations");
@@ -649,11 +650,11 @@ pmap_bootstrap(u_int cpu_impl)
* calls in that situation.
*/
if ((sz = OF_getproplen(pmem, "reg")) == -1)
- panic("pmap_bootstrap: getproplen /memory/reg");
+ OF_panic("%s: getproplen /memory/reg", __func__);
if (sizeof(sparc64_memreg) < sz)
- panic("pmap_bootstrap: sparc64_memreg too small");
+ OF_panic("%s: sparc64_memreg too small", __func__);
if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
- panic("pmap_bootstrap: getprop /memory/reg");
+ OF_panic("%s: getprop /memory/reg", __func__);
sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
/*
@@ -726,7 +727,7 @@ pmap_bootstrap_alloc(vm_size_t size, uint32_t colors)
phys_avail[i] += size;
return (pa);
}
- panic("pmap_bootstrap_alloc");
+ OF_panic("%s: no suitable region found", __func__);
}
/*
diff --git a/sys/sparc64/sparc64/support.S b/sys/sparc64/sparc64/support.S
index bb247d3..02bcfdd 100644
--- a/sys/sparc64/sparc64/support.S
+++ b/sys/sparc64/sparc64/support.S
@@ -580,8 +580,8 @@ fpu_fault_begin:
* void spitfire_block_copy(void *src, void *dst, size_t len)
*/
ENTRY(spitfire_block_copy)
- rdpr %pil, %o3
- wrpr %g0, PIL_TICK, %pil
+ rdpr %pstate, %o3
+ wrpr %g0, PSTATE_NORMAL, %pstate
wr %g0, ASI_BLK_S, %asi
wr %g0, FPRS_FEF, %fprs
@@ -603,7 +603,7 @@ ENTRY(spitfire_block_copy)
or %o4, PCB_FEF, %o4
stx %o4, [PCB_REG + PCB_FLAGS]
-1: wrpr %o3, 0, %pil
+1: wrpr %o3, 0, %pstate
ldda [%o0] %asi, %f0
add %o0, 64, %o0
@@ -653,8 +653,8 @@ END(spitfire_block_copy)
ENTRY(zeus_block_copy)
prefetch [%o0 + (0 * 64)], 0
- rdpr %pil, %o3
- wrpr %g0, PIL_TICK, %pil
+ rdpr %pstate, %o3
+ wrpr %g0, PSTATE_NORMAL, %pstate
wr %g0, ASI_BLK_S, %asi
wr %g0, FPRS_FEF, %fprs
@@ -676,7 +676,7 @@ ENTRY(zeus_block_copy)
or %o4, PCB_FEF, %o4
stx %o4, [PCB_REG + PCB_FLAGS]
-1: wrpr %o3, 0, %pil
+1: wrpr %o3, 0, %pstate
ldd [%o0 + (0 * 8)], %f0
prefetch [%o0 + (1 * 64)], 0
@@ -764,8 +764,8 @@ END(zeus_block_copy)
*/
ALTENTRY(zeus_block_zero)
ENTRY(spitfire_block_zero)
- rdpr %pil, %o3
- wrpr %g0, PIL_TICK, %pil
+ rdpr %pstate, %o3
+ wrpr %g0, PSTATE_NORMAL, %pstate
wr %g0, ASI_BLK_S, %asi
wr %g0, FPRS_FEF, %fprs
@@ -787,7 +787,7 @@ ENTRY(spitfire_block_zero)
or %o4, PCB_FEF, %o4
stx %o4, [PCB_REG + PCB_FLAGS]
-1: wrpr %o3, 0, %pil
+1: wrpr %o3, 0, %pstate
fzero %f0
fzero %f2
diff --git a/sys/sys/elf_common.h b/sys/sys/elf_common.h
index 489851a..12d79ca 100644
--- a/sys/sys/elf_common.h
+++ b/sys/sys/elf_common.h
@@ -328,6 +328,7 @@ typedef struct {
#define PT_SUNW_UNWIND 0x6464e550 /* amd64 UNWIND program header */
#define PT_GNU_EH_FRAME 0x6474e550
#define PT_GNU_STACK 0x6474e551
+#define PT_GNU_RELRO 0x6474e552
#define PT_LOSUNW 0x6ffffffa
#define PT_SUNWBSS 0x6ffffffa /* Sun Specific segment */
#define PT_SUNWSTACK 0x6ffffffb /* describes the stack segment */
diff --git a/sys/sys/malloc.h b/sys/sys/malloc.h
index 76e94be..a6450b3 100644
--- a/sys/sys/malloc.h
+++ b/sys/sys/malloc.h
@@ -50,6 +50,7 @@
#define M_ZERO 0x0100 /* bzero the allocation */
#define M_NOVM 0x0200 /* don't ask VM for pages */
#define M_USE_RESERVE 0x0400 /* can alloc out of reserve memory */
+#define M_NODUMP 0x0800 /* don't dump pages in this allocation */
#define M_MAGIC 877983977 /* time when first defined :-) */
diff --git a/sys/sys/param.h b/sys/sys/param.h
index 921ba46..35bca55 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -58,7 +58,7 @@
* in the range 5 to 9.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 1000005 /* Master, propagated to newvers */
+#define __FreeBSD_version 1000006 /* Master, propagated to newvers */
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index ba00a88..76f3355 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -235,6 +235,7 @@ struct thread {
short td_locks; /* (k) Count of non-spin locks. */
short td_rw_rlocks; /* (k) Count of rwlock read locks. */
short td_lk_slocks; /* (k) Count of lockmgr shared locks. */
+ short td_stopsched; /* (k) Scheduler stopped. */
struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */
const char *td_lockname; /* (t) Name of lock blocked on. */
LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */
diff --git a/sys/sys/systm.h b/sys/sys/systm.h
index e5d60d4..47b974a 100644
--- a/sys/sys/systm.h
+++ b/sys/sys/systm.h
@@ -47,7 +47,6 @@
extern int cold; /* nonzero if we are doing a cold boot */
extern int rebooting; /* kern_reboot() has been called. */
-extern int stop_scheduler; /* only one thread runs after panic */
extern const char *panicstr; /* panic message */
extern char version[]; /* system version */
extern char copyright[]; /* system copyright */
@@ -113,7 +112,7 @@ enum VM_GUEST { VM_GUEST_NO = 0, VM_GUEST_VM, VM_GUEST_XEN };
* Otherwise, the kernel will deadlock since the scheduler isn't
* going to run the thread that holds any lock we need.
*/
-#define SCHEDULER_STOPPED() __predict_false(stop_scheduler)
+#define SCHEDULER_STOPPED() __predict_false(curthread->td_stopsched)
/*
* XXX the hints declarations are even more misplaced than most declarations
diff --git a/sys/vm/uma.h b/sys/vm/uma.h
index fbba22f..e17e6ef 100644
--- a/sys/vm/uma.h
+++ b/sys/vm/uma.h
@@ -248,6 +248,10 @@ int uma_zsecond_add(uma_zone_t zone, uma_zone_t master);
* backend pages and can fail early.
*/
#define UMA_ZONE_VTOSLAB 0x2000 /* Zone uses vtoslab for lookup. */
+#define UMA_ZONE_NODUMP 0x4000 /*
+ * Zone's pages will not be included in
+ * mini-dumps.
+ */
/*
* These flags are shared between the keg and zone. In zones wishing to add
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 9fbea55..eaa2faf 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -845,6 +845,9 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
else
wait &= ~M_ZERO;
+ if (keg->uk_flags & UMA_ZONE_NODUMP)
+ wait |= M_NODUMP;
+
/* zone is passed for legacy reasons. */
mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait);
if (mem == NULL) {
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c
index ea2c904..4deba5b 100644
--- a/sys/vm/vm_contig.c
+++ b/sys/vm/vm_contig.c
@@ -315,6 +315,8 @@ kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
pflags = VM_ALLOC_SYSTEM | VM_ALLOC_NOBUSY;
if (flags & M_ZERO)
pflags |= VM_ALLOC_ZERO;
+ if (flags & M_NODUMP)
+ pflags |= VM_ALLOC_NODUMP;
VM_OBJECT_LOCK(object);
tries = 0;
retry:
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 2c23c48..54f86dd 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -382,6 +382,8 @@ kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
if (flags & M_ZERO)
pflags |= VM_ALLOC_ZERO;
+ if (flags & M_NODUMP)
+ pflags |= VM_ALLOC_NODUMP;
VM_OBJECT_LOCK(kmem_object);
for (i = 0; i < size; i += PAGE_SIZE) {
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index ee4f2a4..8fec8b0 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1305,6 +1305,7 @@ vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
* VM_ALLOC_IFNOTCACHED return NULL, do not reactivate if the page
* is cached
* VM_ALLOC_NOBUSY do not set the flag VPO_BUSY on the page
+ * VM_ALLOC_NODUMP do not include the page in a kernel core dump
* VM_ALLOC_NOOBJ page is not associated with an object and
* should not have the flag VPO_BUSY set
* VM_ALLOC_WIRED wire the allocated page
@@ -1429,6 +1430,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
* must be cleared before the free page queues lock is released.
*/
flags = 0;
+ if (req & VM_ALLOC_NODUMP)
+ flags |= PG_NODUMP;
if (m->flags & PG_ZERO) {
vm_page_zero_count--;
if (req & VM_ALLOC_ZERO)
@@ -1599,6 +1602,8 @@ retry:
flags = 0;
if ((req & VM_ALLOC_ZERO) != 0)
flags = PG_ZERO;
+ if ((req & VM_ALLOC_NODUMP) != 0)
+ flags |= PG_NODUMP;
if ((req & VM_ALLOC_WIRED) != 0)
atomic_add_int(&cnt.v_wire_count, npages);
oflags = VPO_UNMANAGED;
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 85df6fb..ca02de0 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -263,6 +263,7 @@ extern struct vpglocks pa_lock[];
#define PG_MARKER 0x10 /* special queue marker page */
#define PG_SLAB 0x20 /* object pointer is actually a slab */
#define PG_WINATCFLS 0x40 /* flush dirty page on inactive q */
+#define PG_NODUMP 0x80 /* don't include this page in the dump */
/*
* Misc constants.
@@ -350,6 +351,7 @@ extern struct vpglocks vm_page_queue_lock;
#define VM_ALLOC_IFCACHED 0x0400 /* Fail if the page is not cached */
#define VM_ALLOC_IFNOTCACHED 0x0800 /* Fail if the page is cached */
#define VM_ALLOC_IGN_SBUSY 0x1000 /* vm_page_grab() only */
+#define VM_ALLOC_NODUMP 0x2000 /* don't include in dump */
#define VM_ALLOC_COUNT_SHIFT 16
#define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT)
diff --git a/sys/xen/interface/io/netif.h b/sys/xen/interface/io/netif.h
index fbb5c27..261b7d9 100644
--- a/sys/xen/interface/io/netif.h
+++ b/sys/xen/interface/io/netif.h
@@ -42,7 +42,7 @@
* This is the 'wire' format for packets:
* Request 1: netif_tx_request -- NETTXF_* (any flags)
* [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info)
- * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE)
+ * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_FLAG_MORE)
* Request 4: netif_tx_request -- NETTXF_more_data
* Request 5: netif_tx_request -- NETTXF_more_data
* ...
@@ -70,7 +70,9 @@ struct netif_tx_request {
uint16_t offset; /* Offset within buffer page */
uint16_t flags; /* NETTXF_* */
uint16_t id; /* Echoed in response message. */
- uint16_t size; /* Packet size in bytes. */
+ uint16_t size; /* For the first request in a packet, the packet
+ size in bytes. For subsequent requests, the
+ size of that request's associated data in bytes*/
};
typedef struct netif_tx_request netif_tx_request_t;
@@ -175,7 +177,7 @@ struct netif_rx_response {
uint16_t id;
uint16_t offset; /* Offset in page of start of received packet */
uint16_t flags; /* NETRXF_* */
- int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
+ int16_t status; /* -ve: NETIF_RSP_* ; +ve: Rx'ed response size. */
};
typedef struct netif_rx_response netif_rx_response_t;
OpenPOWER on IntegriCloud