summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/minidump_machdep.c5
-rw-r--r--sys/amd64/amd64/pmap.c22
-rw-r--r--sys/amd64/include/md_var.h2
-rw-r--r--sys/amd64/include/pmap.h3
-rw-r--r--sys/amd64/include/vmm.h2
-rw-r--r--sys/amd64/vmm/intel/vmx.c8
-rw-r--r--sys/amd64/vmm/vmm.c4
-rw-r--r--sys/arm/altera/socfpga/files.socfpga1
-rw-r--r--sys/arm/altera/socfpga/socfpga_machdep.c3
-rw-r--r--sys/arm/arm/dump_machdep.c6
-rw-r--r--sys/arm/arm/minidump_machdep.c5
-rw-r--r--sys/arm/conf/EXYNOS5.common2
-rw-r--r--sys/arm/conf/SOCKIT2
-rw-r--r--sys/arm/include/md_var.h2
-rw-r--r--sys/arm/samsung/exynos/exynos5_machdep.c3
-rw-r--r--sys/arm/samsung/exynos/files.exynos52
-rw-r--r--sys/boot/common/disk.h6
-rw-r--r--sys/boot/fdt/dts/arm/exynos5420-arndale-octa.dts15
-rw-r--r--sys/boot/fdt/dts/arm/exynos5420-peach-pit.dts15
-rw-r--r--sys/boot/fdt/dts/arm/exynos5420.dtsi27
-rw-r--r--sys/boot/fdt/dts/arm/socfpga-sockit.dts13
-rw-r--r--sys/boot/fdt/dts/arm/socfpga.dtsi9
-rw-r--r--sys/boot/i386/gptboot/gptboot.c8
-rw-r--r--sys/cam/cam_xpt.c4
-rw-r--r--sys/cam/ctl/ctl.c89
-rw-r--r--sys/cam/ctl/ctl_backend.h3
-rw-r--r--sys/cam/ctl/ctl_backend_block.c3
-rw-r--r--sys/cam/ctl/ctl_backend_ramdisk.c1
-rw-r--r--sys/cam/ctl/ctl_cmd_table.c7
-rw-r--r--sys/cam/ctl/ctl_frontend_cam_sim.c6
-rw-r--r--sys/cam/ctl/ctl_frontend_iscsi.c81
-rw-r--r--sys/cam/ctl/ctl_frontend_iscsi.h1
-rw-r--r--sys/cam/ctl/ctl_private.h3
-rw-r--r--sys/cam/ctl/scsi_ctl.c1
-rw-r--r--sys/cam/scsi/scsi_all.h9
-rw-r--r--sys/cddl/compat/opensolaris/sys/assfail.h12
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c2
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c75
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c42
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c75
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h7
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c14
-rw-r--r--sys/compat/freebsd32/freebsd32_proto.h2
-rw-r--r--sys/compat/freebsd32/freebsd32_syscall.h2
-rw-r--r--sys/compat/freebsd32/freebsd32_syscalls.c2
-rw-r--r--sys/compat/freebsd32/freebsd32_sysent.c10
-rw-r--r--sys/compat/freebsd32/syscalls.master8
-rw-r--r--sys/conf/files2
-rw-r--r--sys/conf/newvers.sh2
-rw-r--r--sys/dev/acpica/acpi.c31
-rw-r--r--sys/dev/acpica/acpi_pci.c1
-rw-r--r--sys/dev/acpica/acpivar.h11
-rw-r--r--sys/dev/alc/if_alc.c1243
-rw-r--r--sys/dev/alc/if_alcreg.h311
-rw-r--r--sys/dev/alc/if_alcvar.h7
-rw-r--r--sys/dev/ale/if_ale.c16
-rw-r--r--sys/dev/ale/if_alereg.h1
-rw-r--r--sys/dev/cxgbe/common/t4_msg.h1
-rw-r--r--sys/dev/cxgbe/tom/t4_connect.c40
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c52
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.h6
-rw-r--r--sys/dev/cxgbe/tom/t4_tom_l2t.c28
-rw-r--r--sys/dev/drm2/i915/intel_ringbuffer.c4
-rw-r--r--sys/dev/hwpmc/hwpmc_core.c2
-rw-r--r--sys/dev/iscsi/icl.c35
-rw-r--r--sys/dev/iscsi/icl.h1
-rw-r--r--sys/dev/iscsi/iscsi.c1
-rw-r--r--sys/dev/mmc/host/dwmmc.c1101
-rw-r--r--sys/dev/mmc/host/dwmmc.h150
-rw-r--r--sys/dev/mmc/mmc.c1
-rw-r--r--sys/dev/mpr/mpr_sas.c3
-rw-r--r--sys/dev/mpr/mpr_table.c1
-rw-r--r--sys/dev/mrsas/mrsas.c6462
-rw-r--r--sys/dev/mrsas/mrsas.h3827
-rw-r--r--sys/dev/mrsas/mrsas_cam.c2023
-rw-r--r--sys/dev/mrsas/mrsas_fp.c2523
-rw-r--r--sys/dev/mrsas/mrsas_ioctl.c921
-rw-r--r--sys/dev/mrsas/mrsas_ioctl.h153
-rw-r--r--sys/dev/mrsas/mrsas_linux.c137
-rw-r--r--sys/dev/pci/pci.c21
-rw-r--r--sys/dev/usb/usb_freebsd.h1
-rw-r--r--sys/dev/usb/usb_freebsd_loader.h1
-rw-r--r--sys/dev/usb/usb_hub.c80
-rw-r--r--sys/dev/usb/usbdevs2
-rw-r--r--sys/dev/vt/vt_core.c7
-rw-r--r--sys/fs/autofs/autofs.c2
-rw-r--r--sys/geom/geom_dev.c76
-rw-r--r--sys/i386/i386/machdep.c2
-rw-r--r--sys/i386/i386/minidump_machdep.c5
-rw-r--r--sys/i386/i386/pmap.c22
-rw-r--r--sys/i386/i386/vm_machdep.c2
-rw-r--r--sys/i386/include/md_var.h2
-rw-r--r--sys/i386/include/pmap.h3
-rw-r--r--sys/kern/bus_if.m13
-rw-r--r--sys/kern/init_sysent.c10
-rw-r--r--sys/kern/kern_timeout.c14
-rw-r--r--sys/kern/subr_bus.c31
-rw-r--r--sys/kern/syscalls.c2
-rw-r--r--sys/kern/syscalls.master8
-rw-r--r--sys/kern/tty.c43
-rw-r--r--sys/kern/uipc_syscalls.c491
-rw-r--r--sys/kern/vfs_bio.c9
-rw-r--r--sys/libkern/explicit_bzero.c24
-rw-r--r--sys/mips/include/md_var.h2
-rw-r--r--sys/mips/mips/dump_machdep.c6
-rw-r--r--sys/mips/mips/minidump_machdep.c5
-rw-r--r--sys/modules/mrsas/Makefile20
-rw-r--r--sys/modules/mrsas/mrsas_linux/Makefile10
-rw-r--r--sys/net/if_enc.c4
-rw-r--r--sys/net/if_gif.c1
-rw-r--r--sys/netinet/ip_gre.c2
-rw-r--r--sys/netinet/sctp_output.c10
-rw-r--r--sys/netinet/sctp_output.h4
-rw-r--r--sys/netinet/sctp_syscalls.c588
-rw-r--r--sys/netinet/sctp_usrreq.c6
-rw-r--r--sys/netinet/tcp_output.c12
-rw-r--r--sys/netinet/tcp_timer.c148
-rw-r--r--sys/netinet/tcp_var.h9
-rw-r--r--sys/netinet/udp_usrreq.c9
-rw-r--r--sys/netinet6/udp6_usrreq.c46
-rw-r--r--sys/netpfil/ipfw/ip_fw2.c6
-rw-r--r--sys/ofed/drivers/net/mlx4/mlx4.h1
-rw-r--r--sys/ofed/include/linux/mlx4/device.h1
-rw-r--r--sys/sys/bus.h3
-rw-r--r--sys/sys/mbuf.h2
-rw-r--r--sys/sys/param.h2
-rw-r--r--sys/sys/socketvar.h5
-rw-r--r--sys/sys/syscall.h2
-rw-r--r--sys/sys/syscall.mk2
-rw-r--r--sys/sys/sysproto.h2
-rw-r--r--sys/sys/systm.h1
-rw-r--r--sys/ufs/ufs/dir.h2
-rw-r--r--sys/x86/acpica/srat.c36
-rw-r--r--sys/x86/x86/dump_machdep.c7
-rw-r--r--sys/x86/x86/local_apic.c2
135 files changed, 12793 insertions, 8755 deletions
diff --git a/sys/amd64/amd64/minidump_machdep.c b/sys/amd64/amd64/minidump_machdep.c
index 0ee8bcf..61b348e 100644
--- a/sys/amd64/amd64/minidump_machdep.c
+++ b/sys/amd64/amd64/minidump_machdep.c
@@ -215,7 +215,7 @@ blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
/* A fake page table page, to avoid having to handle both 4K and 2M pages */
static pd_entry_t fakepd[NPDEPG];
-void
+int
minidumpsys(struct dumperinfo *di)
{
uint32_t pmapsize;
@@ -441,7 +441,7 @@ minidumpsys(struct dumperinfo *di)
/* Signal completion, signoff and exit stage left. */
dump_write(di, NULL, 0, 0, 0);
printf("\nDump complete\n");
- return;
+ return (0);
fail:
if (error < 0)
@@ -462,6 +462,7 @@ minidumpsys(struct dumperinfo *di)
printf("Dump failed. Partition too small.\n");
else
printf("** DUMP FAILED (ERROR %d) **\n", error);
+ return (error);
}
void
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 3344b35..fadde28 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -1710,16 +1710,20 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
void
-pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
+pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
{
- KASSERT((sva & PAGE_MASK) == 0,
- ("pmap_invalidate_cache_range: sva not page-aligned"));
- KASSERT((eva & PAGE_MASK) == 0,
- ("pmap_invalidate_cache_range: eva not page-aligned"));
+ if (force) {
+ sva &= ~(vm_offset_t)cpu_clflush_line_size;
+ } else {
+ KASSERT((sva & PAGE_MASK) == 0,
+ ("pmap_invalidate_cache_range: sva not page-aligned"));
+ KASSERT((eva & PAGE_MASK) == 0,
+ ("pmap_invalidate_cache_range: eva not page-aligned"));
+ }
- if (cpu_feature & CPUID_SS)
- ; /* If "Self Snoop" is supported, do nothing. */
+ if ((cpu_feature & CPUID_SS) != 0 && !force)
+ ; /* If "Self Snoop" is supported and allowed, do nothing. */
else if ((cpu_feature & CPUID_CLFSH) != 0 &&
eva - sva < PMAP_CLFLUSH_THRESHOLD) {
@@ -6222,7 +6226,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
- pmap_invalidate_cache_range(va, va + tmpsize);
+ pmap_invalidate_cache_range(va, va + tmpsize, FALSE);
return ((void *)(va + offset));
}
@@ -6558,7 +6562,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
*/
if (changed) {
pmap_invalidate_range(kernel_pmap, base, tmpva);
- pmap_invalidate_cache_range(base, tmpva);
+ pmap_invalidate_cache_range(base, tmpva, FALSE);
}
return (error);
}
diff --git a/sys/amd64/include/md_var.h b/sys/amd64/include/md_var.h
index c7b89a6..e5e78f6 100644
--- a/sys/amd64/include/md_var.h
+++ b/sys/amd64/include/md_var.h
@@ -118,7 +118,7 @@ void pagezero(void *addr);
void printcpuinfo(void);
void setidt(int idx, alias_for_inthand_t *func, int typ, int dpl, int ist);
int user_dbreg_trap(void);
-void minidumpsys(struct dumperinfo *);
+int minidumpsys(struct dumperinfo *);
struct savefpu *get_pcb_user_save_td(struct thread *td);
struct savefpu *get_pcb_user_save_pcb(struct pcb *pcb);
struct pcb *get_pcb_td(struct thread *td);
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index e83e07e..ebf32c6 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -394,7 +394,8 @@ void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
void pmap_invalidate_all(pmap_t);
void pmap_invalidate_cache(void);
void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
-void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
+void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva,
+ boolean_t force);
void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num);
#endif /* _KERNEL */
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index b0d451d..0879ba2 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -485,6 +485,8 @@ enum vm_exitcode {
VM_EXITCODE_SUSPENDED,
VM_EXITCODE_INOUT_STR,
VM_EXITCODE_TASK_SWITCH,
+ VM_EXITCODE_MONITOR,
+ VM_EXITCODE_MWAIT,
VM_EXITCODE_MAX
};
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 00c85d7..2fe5a27 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -81,6 +81,8 @@ __FBSDID("$FreeBSD$");
#define PROCBASED_CTLS_ONE_SETTING \
(PROCBASED_SECONDARY_CONTROLS | \
+ PROCBASED_MWAIT_EXITING | \
+ PROCBASED_MONITOR_EXITING | \
PROCBASED_IO_EXITING | \
PROCBASED_MSR_BITMAPS | \
PROCBASED_CTLS_WINDOW_SETTING | \
@@ -2373,6 +2375,12 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
case EXIT_REASON_XSETBV:
handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
break;
+ case EXIT_REASON_MONITOR:
+ vmexit->exitcode = VM_EXITCODE_MONITOR;
+ break;
+ case EXIT_REASON_MWAIT:
+ vmexit->exitcode = VM_EXITCODE_MWAIT;
+ break;
default:
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
break;
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 7fc3a9b..8f987b7 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -1481,6 +1481,10 @@ restart:
case VM_EXITCODE_INOUT_STR:
error = vm_handle_inout(vm, vcpuid, vme, &retu);
break;
+ case VM_EXITCODE_MONITOR:
+ case VM_EXITCODE_MWAIT:
+ vm_inject_ud(vm, vcpuid);
+ break;
default:
retu = true; /* handled in userland */
break;
diff --git a/sys/arm/altera/socfpga/files.socfpga b/sys/arm/altera/socfpga/files.socfpga
index c420617..df92bff 100644
--- a/sys/arm/altera/socfpga/files.socfpga
+++ b/sys/arm/altera/socfpga/files.socfpga
@@ -19,3 +19,4 @@ arm/altera/socfpga/socfpga_manager.c standard
arm/altera/socfpga/socfpga_rstmgr.c standard
dev/dwc/if_dwc.c optional dwc
+dev/mmc/host/dwmmc.c optional dwmmc
diff --git a/sys/arm/altera/socfpga/socfpga_machdep.c b/sys/arm/altera/socfpga/socfpga_machdep.c
index b098663..703c88a 100644
--- a/sys/arm/altera/socfpga/socfpga_machdep.c
+++ b/sys/arm/altera/socfpga/socfpga_machdep.c
@@ -89,6 +89,9 @@ platform_devmap_init(void)
*/
arm_devmap_add_entry(0xffb00000, 0x100000);
+ /* dwmmc */
+ arm_devmap_add_entry(0xff700000, 0x100000);
+
return (0);
}
diff --git a/sys/arm/arm/dump_machdep.c b/sys/arm/arm/dump_machdep.c
index b5a37f2..d8104d1 100644
--- a/sys/arm/arm/dump_machdep.c
+++ b/sys/arm/arm/dump_machdep.c
@@ -280,10 +280,8 @@ dumpsys(struct dumperinfo *di)
size_t hdrsz;
int error;
- if (do_minidump) {
- minidumpsys(di);
- return (0);
- }
+ if (do_minidump)
+ return (minidumpsys(di));
bzero(&ehdr, sizeof(ehdr));
ehdr.e_ident[EI_MAG0] = ELFMAG0;
diff --git a/sys/arm/arm/minidump_machdep.c b/sys/arm/arm/minidump_machdep.c
index 13f3242..c12aefa 100644
--- a/sys/arm/arm/minidump_machdep.c
+++ b/sys/arm/arm/minidump_machdep.c
@@ -196,7 +196,7 @@ blk_write_cont(struct dumperinfo *di, vm_paddr_t pa, size_t sz)
/* A fake page table page, to avoid having to handle both 4K and 2M pages */
static pt_entry_t fakept[NPTEPG];
-void
+int
minidumpsys(struct dumperinfo *di)
{
struct minidumphdr mdhdr;
@@ -460,7 +460,7 @@ minidumpsys(struct dumperinfo *di)
/* Signal completion, signoff and exit stage left. */
dump_write(di, NULL, 0, 0, 0);
printf("\nDump complete\n");
- return;
+ return (0);
fail:
if (error < 0)
@@ -472,6 +472,7 @@ fail:
printf("\nDump failed. Partition too small.\n");
else
printf("\n** DUMP FAILED (ERROR %d) **\n", error);
+ return (error);
}
void
diff --git a/sys/arm/conf/EXYNOS5.common b/sys/arm/conf/EXYNOS5.common
index 89e978b..7cd577d 100644
--- a/sys/arm/conf/EXYNOS5.common
+++ b/sys/arm/conf/EXYNOS5.common
@@ -80,7 +80,7 @@ options NFS_ROOT # NFS usable as /, requires NFSCLIENT
device mmc # mmc/sd bus
device mmcsd # mmc/sd flash cards
-device sdhci # generic sdhci
+device dwmmc
options ROOTDEVNAME=\"ufs:/dev/da0\"
diff --git a/sys/arm/conf/SOCKIT b/sys/arm/conf/SOCKIT
index d56a7f2..a7c6a87 100644
--- a/sys/arm/conf/SOCKIT
+++ b/sys/arm/conf/SOCKIT
@@ -82,7 +82,7 @@ options NFS_ROOT # NFS usable as /, requires NFSCLIENT
device mmc # mmc/sd bus
device mmcsd # mmc/sd flash cards
-device sdhci # generic sdhci
+device dwmmc
options ROOTDEVNAME=\"ufs:/dev/da0\"
diff --git a/sys/arm/include/md_var.h b/sys/arm/include/md_var.h
index 7337b33..d54a425 100644
--- a/sys/arm/include/md_var.h
+++ b/sys/arm/include/md_var.h
@@ -68,6 +68,6 @@ extern int busdma_swi_pending;
void busdma_swi(void);
void dump_add_page(vm_paddr_t);
void dump_drop_page(vm_paddr_t);
-void minidumpsys(struct dumperinfo *);
+int minidumpsys(struct dumperinfo *);
#endif /* !_MACHINE_MD_VAR_H_ */
diff --git a/sys/arm/samsung/exynos/exynos5_machdep.c b/sys/arm/samsung/exynos/exynos5_machdep.c
index 170c01d..c5cf44e 100644
--- a/sys/arm/samsung/exynos/exynos5_machdep.c
+++ b/sys/arm/samsung/exynos/exynos5_machdep.c
@@ -78,6 +78,9 @@ platform_devmap_init(void)
/* UART */
arm_devmap_add_entry(0x12C00000, 0x100000);
+ /* DWMMC */
+ arm_devmap_add_entry(0x12200000, 0x100000);
+
return (0);
}
diff --git a/sys/arm/samsung/exynos/files.exynos5 b/sys/arm/samsung/exynos/files.exynos5
index 8dc6602..3883b28 100644
--- a/sys/arm/samsung/exynos/files.exynos5
+++ b/sys/arm/samsung/exynos/files.exynos5
@@ -33,4 +33,4 @@ arm/samsung/exynos/chrome_ec.c optional chrome_ec_i2c
arm/samsung/exynos/chrome_ec_spi.c optional chrome_ec_spi
arm/samsung/exynos/chrome_kb.c optional chrome_kb
-#dev/sdhci/sdhci_fdt.c optional sdhci
+dev/mmc/host/dwmmc.c optional dwmmc
diff --git a/sys/boot/common/disk.h b/sys/boot/common/disk.h
index 1aaa031..6a78fbf 100644
--- a/sys/boot/common/disk.h
+++ b/sys/boot/common/disk.h
@@ -43,12 +43,12 @@
* BSD disklabel partition within an MBR slice:
*
* d_slice = MBR slice number (typically 1..4)
- * d_partition = disklabel partition (typically 0..7)
+ * d_partition = disklabel partition (typically 0..19)
*
* BSD disklabel partition on the true dedicated disk:
*
* d_slice = -1
- * d_partition = disklabel partition (typically 0..7)
+ * d_partition = disklabel partition (typically 0..19)
*
* GPT partition:
*
@@ -71,7 +71,7 @@
* if there are multiple slices/partitions of a given type, the first one
* is chosen.
*
- * The low-level disk device will typically call slice_open() from its open
+ * The low-level disk device will typically call disk_open() from its open
* method to interpret the disk partition tables according to the rules above.
* This will initialize d_offset to the block offset of the start of the
* selected partition - this offset should be added to the offset passed to
diff --git a/sys/boot/fdt/dts/arm/exynos5420-arndale-octa.dts b/sys/boot/fdt/dts/arm/exynos5420-arndale-octa.dts
index 29542c8..8f09065 100644
--- a/sys/boot/fdt/dts/arm/exynos5420-arndale-octa.dts
+++ b/sys/boot/fdt/dts/arm/exynos5420-arndale-octa.dts
@@ -47,8 +47,19 @@
status = "okay";
};
- sdhci@12220000 {
- status = "disabled";
+ mmc2: dwmmc@12220000 {
+ status = "okay";
+ num-slots = <1>;
+ supports-highspeed;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <2 3>;
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ bus-frequency = <50000000>;
+
+ slot@0 {
+ reg = <0>;
+ bus-width = <4>;
+ };
};
};
diff --git a/sys/boot/fdt/dts/arm/exynos5420-peach-pit.dts b/sys/boot/fdt/dts/arm/exynos5420-peach-pit.dts
index ebe3dfcf..80aca89 100644
--- a/sys/boot/fdt/dts/arm/exynos5420-peach-pit.dts
+++ b/sys/boot/fdt/dts/arm/exynos5420-peach-pit.dts
@@ -68,5 +68,20 @@
usbdrd_phy1: phy@12500000 {
vbus-supply = < 218 >;
};
+
+ mmc2: dwmmc@12220000 {
+ status = "okay";
+ num-slots = <1>;
+ supports-highspeed;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <2 3>;
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ bus-frequency = <50000000>;
+
+ slot@0 {
+ reg = <0>;
+ bus-width = <4>;
+ };
+ };
};
};
diff --git a/sys/boot/fdt/dts/arm/exynos5420.dtsi b/sys/boot/fdt/dts/arm/exynos5420.dtsi
index 7feb6d9..38a5a88 100644
--- a/sys/boot/fdt/dts/arm/exynos5420.dtsi
+++ b/sys/boot/fdt/dts/arm/exynos5420.dtsi
@@ -81,5 +81,32 @@
xhci@12400000 {
status = "okay";
};
+
+ mmc0: dwmmc@12200000 {
+ compatible = "samsung,exynos5420-dw-mshc-smu";
+ reg = <0x12200000 0x10000>;
+ interrupts = <107>;
+ interrupt-parent = <&GIC>;
+ fifo-depth = <0x40>;
+ status = "disabled";
+ };
+
+ mmc1: dwmmc@12210000 {
+ compatible = "samsung,exynos5420-dw-mshc-smu";
+ reg = <0x12210000 0x10000>;
+ interrupts = <108>;
+ interrupt-parent = <&GIC>;
+ fifo-depth = <0x40>;
+ status = "disabled";
+ };
+
+ mmc2: dwmmc@12220000 {
+ compatible = "samsung,exynos5420-dw-mshc";
+ reg = <0x12220000 0x10000>;
+ interrupts = <109>;
+ interrupt-parent = <&GIC>;
+ fifo-depth = <0x40>;
+ status = "disabled";
+ };
};
};
diff --git a/sys/boot/fdt/dts/arm/socfpga-sockit.dts b/sys/boot/fdt/dts/arm/socfpga-sockit.dts
index d76b6cf..058b957 100644
--- a/sys/boot/fdt/dts/arm/socfpga-sockit.dts
+++ b/sys/boot/fdt/dts/arm/socfpga-sockit.dts
@@ -55,6 +55,19 @@
gmac1: ethernet@ff702000 {
status = "okay";
};
+
+ mmc: dwmmc@ff704000 {
+ status = "okay";
+ num-slots = <1>;
+ supports-highspeed;
+ broken-cd;
+ bus-frequency = <25000000>;
+
+ slot@0 {
+ reg = <0>;
+ bus-width = <4>;
+ };
+ };
};
chosen {
diff --git a/sys/boot/fdt/dts/arm/socfpga.dtsi b/sys/boot/fdt/dts/arm/socfpga.dtsi
index 2480484..1c8f149 100644
--- a/sys/boot/fdt/dts/arm/socfpga.dtsi
+++ b/sys/boot/fdt/dts/arm/socfpga.dtsi
@@ -152,5 +152,14 @@
phy-mode = "rgmii";
status = "disabled";
};
+
+ mmc: dwmmc@ff704000 {
+ compatible = "altr,socfpga-dw-mshc";
+ reg = <0xff704000 0x1000>;
+ interrupts = <171>;
+ interrupt-parent = <&GIC>;
+ fifo-depth = <0x400>;
+ status = "disabled";
+ };
};
};
diff --git a/sys/boot/i386/gptboot/gptboot.c b/sys/boot/i386/gptboot/gptboot.c
index 0596499..4fa5227 100644
--- a/sys/boot/i386/gptboot/gptboot.c
+++ b/sys/boot/i386/gptboot/gptboot.c
@@ -136,6 +136,7 @@ int
main(void)
{
char cmd[512], cmdtmp[512];
+ ssize_t sz;
int autoboot, dskupdated;
ufs_ino_t ino;
@@ -164,9 +165,10 @@ main(void)
for (;;) {
*kname = '\0';
if ((ino = lookup(PATH_CONFIG)) ||
- (ino = lookup(PATH_DOTCONFIG)))
- fsread(ino, cmd, sizeof(cmd));
-
+ (ino = lookup(PATH_DOTCONFIG))) {
+ sz = fsread(ino, cmd, sizeof(cmd) - 1);
+ cmd[(sz < 0) ? 0 : sz] = '\0';
+ }
if (*cmd != '\0') {
memcpy(cmdtmp, cmd, sizeof(cmdtmp));
if (parse(cmdtmp, &dskupdated))
diff --git a/sys/cam/cam_xpt.c b/sys/cam/cam_xpt.c
index 3cba1b0..5042d91 100644
--- a/sys/cam/cam_xpt.c
+++ b/sys/cam/cam_xpt.c
@@ -2195,7 +2195,7 @@ xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
next_periph = SLIST_NEXT(periph, periph_links);
while (next_periph != NULL &&
(next_periph->flags & CAM_PERIPH_FREE) != 0)
- next_periph = SLIST_NEXT(periph, periph_links);
+ next_periph = SLIST_NEXT(next_periph, periph_links);
if (next_periph)
next_periph->refcount++;
mtx_unlock(&bus->eb_mtx);
@@ -2269,7 +2269,7 @@ xptpdperiphtraverse(struct periph_driver **pdrv,
next_periph = TAILQ_NEXT(periph, unit_links);
while (next_periph != NULL &&
(next_periph->flags & CAM_PERIPH_FREE) != 0)
- next_periph = TAILQ_NEXT(periph, unit_links);
+ next_periph = TAILQ_NEXT(next_periph, unit_links);
if (next_periph)
next_periph->refcount++;
xpt_unlock_buses();
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index eaba507..4ed6a5a 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -293,7 +293,7 @@ static struct scsi_control_page control_page_changeable = {
/*page_length*/sizeof(struct scsi_control_page) - 2,
/*rlec*/SCP_DSENSE,
/*queue_flags*/SCP_QUEUE_ALG_MASK,
- /*eca_and_aen*/0,
+ /*eca_and_aen*/SCP_SWP,
/*flags4*/0,
/*aen_holdoff_period*/{0, 0},
/*busy_timeout_period*/{0, 0},
@@ -4447,7 +4447,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
struct ctl_port *port;
struct scsi_vpd_id_descriptor *desc;
struct scsi_vpd_id_t10 *t10id;
- const char *eui, *naa, *scsiname, *vendor;
+ const char *eui, *naa, *scsiname, *vendor, *value;
int lun_number, i, lun_malloced;
int devidlen, idlen1, idlen2 = 0, len;
@@ -4609,6 +4609,10 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
if (be_lun->flags & CTL_LUN_FLAG_PRIMARY)
lun->flags |= CTL_LUN_PRIMARY_SC;
+ value = ctl_get_opt(&be_lun->options, "readonly");
+ if (value != NULL && strcmp(value, "on") == 0)
+ lun->flags |= CTL_LUN_READONLY;
+
lun->ctl_softc = ctl_softc;
TAILQ_INIT(&lun->ooa_queue);
TAILQ_INIT(&lun->blocked_queue);
@@ -6219,6 +6223,14 @@ ctl_control_page_handler(struct ctl_scsiio *ctsio,
saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK;
set_ua = 1;
}
+ if ((current_cp->eca_and_aen & SCP_SWP) !=
+ (user_cp->eca_and_aen & SCP_SWP)) {
+ current_cp->eca_and_aen &= ~SCP_SWP;
+ current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP;
+ saved_cp->eca_and_aen &= ~SCP_SWP;
+ saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP;
+ set_ua = 1;
+ }
if (set_ua != 0) {
int i;
/*
@@ -7045,8 +7057,13 @@ ctl_mode_sense(struct ctl_scsiio *ctsio)
header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr;
header->datalen = ctl_min(total_len - 1, 254);
- if (control_dev == 0)
+ if (control_dev == 0) {
header->dev_specific = 0x10; /* DPOFUA */
+ if ((lun->flags & CTL_LUN_READONLY) ||
+ (lun->mode_pages.control_page[CTL_PAGE_CURRENT]
+ .eca_and_aen & SCP_SWP) != 0)
+ header->dev_specific |= 0x80; /* WP */
+ }
if (dbd)
header->block_descr_len = 0;
else
@@ -7063,8 +7080,13 @@ ctl_mode_sense(struct ctl_scsiio *ctsio)
datalen = ctl_min(total_len - 2, 65533);
scsi_ulto2b(datalen, header->datalen);
- if (control_dev == 0)
+ if (control_dev == 0) {
header->dev_specific = 0x10; /* DPOFUA */
+ if ((lun->flags & CTL_LUN_READONLY) ||
+ (lun->mode_pages.control_page[CTL_PAGE_CURRENT]
+ .eca_and_aen & SCP_SWP) != 0)
+ header->dev_specific |= 0x80; /* WP */
+ }
if (dbd)
scsi_ulto2b(0, header->block_descr_len);
else
@@ -9126,6 +9148,31 @@ ctl_read_write(struct ctl_scsiio *ctsio)
num_blocks = scsi_4btoul(cdb->length);
break;
}
+ case WRITE_ATOMIC_16: {
+ struct scsi_rw_16 *cdb;
+
+ if (lun->be_lun->atomicblock == 0) {
+ ctl_set_invalid_opcode(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ cdb = (struct scsi_rw_16 *)ctsio->cdb;
+ if (cdb->byte2 & SRW12_FUA)
+ flags |= CTL_LLF_FUA;
+ if (cdb->byte2 & SRW12_DPO)
+ flags |= CTL_LLF_DPO;
+ lba = scsi_8btou64(cdb->addr);
+ num_blocks = scsi_4btoul(cdb->length);
+ if (num_blocks > lun->be_lun->atomicblock) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+ /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ break;
+ }
case WRITE_VERIFY_16: {
struct scsi_write_verify_16 *cdb;
@@ -10299,6 +10346,10 @@ ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
bl_ptr->unmap_grain_align);
}
}
+ scsi_ulto4b(lun->be_lun->atomicblock,
+ bl_ptr->max_atomic_transfer_length);
+ scsi_ulto4b(0, bl_ptr->atomic_alignment);
+ scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity);
}
scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length);
@@ -10694,13 +10745,13 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
}
if (lun == NULL) {
- /* SBC-3 (no version claimed) */
- scsi_ulto2b(0x04C0, inq_ptr->version4);
+ /* SBC-4 (no version claimed) */
+ scsi_ulto2b(0x0600, inq_ptr->version4);
} else {
switch (lun->be_lun->lun_type) {
case T_DIRECT:
- /* SBC-3 (no version claimed) */
- scsi_ulto2b(0x04C0, inq_ptr->version4);
+ /* SBC-4 (no version claimed) */
+ scsi_ulto2b(0x0600, inq_ptr->version4);
break;
case T_PROCESSOR:
default:
@@ -10818,7 +10869,8 @@ ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len)
break;
}
case READ_16:
- case WRITE_16: {
+ case WRITE_16:
+ case WRITE_ATOMIC_16: {
struct scsi_rw_16 *cdb;
cdb = (struct scsi_rw_16 *)io->scsiio.cdb;
@@ -10832,7 +10884,6 @@ ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len)
cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb;
-
*lba = scsi_8btou64(cdb->addr);
*len = scsi_4btoul(cdb->length);
break;
@@ -11284,6 +11335,24 @@ ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
}
#endif
+ if (entry->pattern & CTL_LUN_PAT_WRITE) {
+ if (lun->flags & CTL_LUN_READONLY) {
+ ctl_set_sense(ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_DATA_PROTECT,
+ /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE);
+ retval = 1;
+ goto bailout;
+ }
+ if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT]
+ .eca_and_aen & SCP_SWP) != 0) {
+ ctl_set_sense(ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_DATA_PROTECT,
+ /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE);
+ retval = 1;
+ goto bailout;
+ }
+ }
+
/*
* Check for a reservation conflict. If this command isn't allowed
* even on reserved LUNs, and if this initiator isn't the one who
diff --git a/sys/cam/ctl/ctl_backend.h b/sys/cam/ctl/ctl_backend.h
index c2066c5..d8e78ab 100644
--- a/sys/cam/ctl/ctl_backend.h
+++ b/sys/cam/ctl/ctl_backend.h
@@ -144,6 +144,8 @@ typedef void (*be_lun_config_t)(void *be_lun,
*
* pblockoff is the lowest LBA on the LUN aligned ot physical sector.
*
+ * atomicblock is the number of blocks that can be written atomically.
+ *
* req_lun_id is the requested LUN ID. CTL only pays attention to this
* field if the CTL_LUN_FLAG_ID_REQ flag is set. If the requested LUN ID is
* not available, the LUN addition will fail. If a particular LUN ID isn't
@@ -188,6 +190,7 @@ struct ctl_be_lun {
uint32_t blocksize; /* passed to CTL */
uint16_t pblockexp; /* passed to CTL */
uint16_t pblockoff; /* passed to CTL */
+ uint32_t atomicblock; /* passed to CTL */
uint32_t req_lun_id; /* passed to CTL */
uint32_t lun_id; /* returned from CTL */
uint8_t serial_num[CTL_SN_LEN]; /* passed to CTL */
diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c
index 0ae8ecb..eb16474 100644
--- a/sys/cam/ctl/ctl_backend_block.c
+++ b/sys/cam/ctl/ctl_backend_block.c
@@ -2003,6 +2003,9 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
if (unmap)
be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP;
+ if (be_lun->dispatch == ctl_be_block_dispatch_zvol)
+ be_lun->ctl_be_lun.atomicblock = CTLBLK_MAX_IO_SIZE /
+ be_lun->blocksize;
be_lun->ctl_be_lun.be_lun = be_lun;
be_lun->ctl_be_lun.blocksize = be_lun->blocksize;
be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp;
diff --git a/sys/cam/ctl/ctl_backend_ramdisk.c b/sys/cam/ctl/ctl_backend_ramdisk.c
index 6613e8e..5f45035 100644
--- a/sys/cam/ctl/ctl_backend_ramdisk.c
+++ b/sys/cam/ctl/ctl_backend_ramdisk.c
@@ -595,6 +595,7 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
if (unmap)
be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP;
+ be_lun->ctl_be_lun.atomicblock = UINT32_MAX;
be_lun->ctl_be_lun.be_lun = be_lun;
if (params->flags & CTL_LUN_FLAG_ID_REQ) {
diff --git a/sys/cam/ctl/ctl_cmd_table.c b/sys/cam/ctl/ctl_cmd_table.c
index eb1b0ee..e2323a2 100644
--- a/sys/cam/ctl/ctl_cmd_table.c
+++ b/sys/cam/ctl/ctl_cmd_table.c
@@ -1117,8 +1117,11 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
/* 9B */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
-/* 9C */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+/* 9C WRITE ATOMIC (16) */
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 16, {0x18, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0x07}},
/* 9D */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
diff --git a/sys/cam/ctl/ctl_frontend_cam_sim.c b/sys/cam/ctl/ctl_frontend_cam_sim.c
index 72f8dd8..7cdd5b7 100644
--- a/sys/cam/ctl/ctl_frontend_cam_sim.c
+++ b/sys/cam/ctl/ctl_frontend_cam_sim.c
@@ -609,14 +609,16 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
bcopy(csio->cdb_io.cdb_bytes, io->scsiio.cdb,
io->scsiio.cdb_len);
+ ccb->ccb_h.status |= CAM_SIM_QUEUED;
err = ctl_queue(io);
if (err != CTL_RETVAL_COMPLETE) {
printf("%s: func %d: error %d returned by "
"ctl_queue()!\n", __func__,
ccb->ccb_h.func_code, err);
ctl_free_io(io);
- } else {
- ccb->ccb_h.status |= CAM_SIM_QUEUED;
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ return;
}
break;
}
diff --git a/sys/cam/ctl/ctl_frontend_iscsi.c b/sys/cam/ctl/ctl_frontend_iscsi.c
index 85a0667..448cc73 100644
--- a/sys/cam/ctl/ctl_frontend_iscsi.c
+++ b/sys/cam/ctl/ctl_frontend_iscsi.c
@@ -154,6 +154,8 @@ static uint32_t cfiscsi_lun_map(void *arg, uint32_t lun);
static int cfiscsi_ioctl(struct cdev *dev,
u_long cmd, caddr_t addr, int flag, struct thread *td);
static void cfiscsi_datamove(union ctl_io *io);
+static void cfiscsi_datamove_in(union ctl_io *io);
+static void cfiscsi_datamove_out(union ctl_io *io);
static void cfiscsi_done(union ctl_io *io);
static bool cfiscsi_pdu_update_cmdsn(const struct icl_pdu *request);
static void cfiscsi_pdu_handle_nop_out(struct icl_pdu *request);
@@ -824,7 +826,7 @@ cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *c
return (true);
}
- if (io->scsiio.ext_data_filled == io->scsiio.kern_data_len &&
+ if (io->scsiio.ext_data_filled == cdw->cdw_r2t_end &&
(bhsdo->bhsdo_flags & BHSDO_FLAGS_F) == 0) {
CFISCSI_SESSION_WARN(cs, "got the final packet without "
"the F flag; flags = 0x%x; dropping connection",
@@ -834,7 +836,7 @@ cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *c
return (true);
}
- if (io->scsiio.ext_data_filled != io->scsiio.kern_data_len &&
+ if (io->scsiio.ext_data_filled != cdw->cdw_r2t_end &&
(bhsdo->bhsdo_flags & BHSDO_FLAGS_F) != 0) {
if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) ==
ISCSI_BHS_OPCODE_SCSI_DATA_OUT) {
@@ -842,7 +844,7 @@ cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *c
"transmitted size was %zd bytes instead of %d; "
"dropping connection",
(size_t)io->scsiio.ext_data_filled,
- io->scsiio.kern_data_len);
+ cdw->cdw_r2t_end);
ctl_set_data_phase_error(&io->scsiio);
cfiscsi_session_terminate(cs);
return (true);
@@ -855,7 +857,7 @@ cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *c
}
}
- if (io->scsiio.ext_data_filled == io->scsiio.kern_data_len) {
+ if (io->scsiio.ext_data_filled == cdw->cdw_r2t_end) {
#if 0
CFISCSI_SESSION_DEBUG(cs, "no longer expecting Data-Out with target "
"transfer tag 0x%x", cdw->cdw_target_transfer_tag);
@@ -911,8 +913,13 @@ cfiscsi_pdu_handle_data_out(struct icl_pdu *request)
CFISCSI_SESSION_LOCK(cs);
TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next);
CFISCSI_SESSION_UNLOCK(cs);
+ done = (io->scsiio.ext_data_filled != cdw->cdw_r2t_end ||
+ io->scsiio.ext_data_filled == io->scsiio.kern_data_len);
uma_zfree(cfiscsi_data_wait_zone, cdw);
- io->scsiio.be_move_done(io);
+ if (done)
+ io->scsiio.be_move_done(io);
+ else
+ cfiscsi_datamove_out(io);
}
icl_pdu_free(request);
@@ -1128,7 +1135,6 @@ cfiscsi_maintenance_thread(void *arg)
* that anymore. We might need to revisit that.
*/
callout_drain(&cs->cs_callout);
- icl_conn_shutdown(cs->cs_conn);
icl_conn_close(cs->cs_conn);
/*
@@ -2567,6 +2573,8 @@ cfiscsi_datamove_out(union ctl_io *io)
const struct iscsi_bhs_scsi_command *bhssc;
struct iscsi_bhs_r2t *bhsr2t;
struct cfiscsi_data_wait *cdw;
+ struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
+ uint32_t expected_len, r2t_off, r2t_len;
uint32_t target_transfer_tag;
bool done;
@@ -2585,9 +2593,16 @@ cfiscsi_datamove_out(union ctl_io *io)
PDU_TOTAL_TRANSFER_LEN(request) = io->scsiio.kern_total_len;
/*
- * We hadn't received anything during this datamove yet.
+ * Report write underflow as error since CTL and backends don't
+ * really support it, and SCSI does not tell how to do it right.
*/
- io->scsiio.ext_data_filled = 0;
+ expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length);
+ if (io->scsiio.kern_rel_offset + io->scsiio.kern_data_len >
+ expected_len) {
+ io->scsiio.io_hdr.port_status = 43;
+ io->scsiio.be_move_done(io);
+ return;
+ }
target_transfer_tag =
atomic_fetchadd_32(&cs->cs_target_transfer_tag, 1);
@@ -2609,8 +2624,35 @@ cfiscsi_datamove_out(union ctl_io *io)
cdw->cdw_ctl_io = io;
cdw->cdw_target_transfer_tag = target_transfer_tag;
cdw->cdw_initiator_task_tag = bhssc->bhssc_initiator_task_tag;
+ cdw->cdw_r2t_end = io->scsiio.kern_data_len;
+
+ /* Set initial data pointer for the CDW respecting ext_data_filled. */
+ if (io->scsiio.kern_sg_entries > 0) {
+ ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
+ } else {
+ ctl_sglist = &ctl_sg_entry;
+ ctl_sglist->addr = io->scsiio.kern_data_ptr;
+ ctl_sglist->len = io->scsiio.kern_data_len;
+ }
+ cdw->cdw_sg_index = 0;
+ cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr;
+ cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len;
+ r2t_off = io->scsiio.ext_data_filled;
+ while (r2t_off > 0) {
+ if (r2t_off >= cdw->cdw_sg_len) {
+ r2t_off -= cdw->cdw_sg_len;
+ cdw->cdw_sg_index++;
+ cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr;
+ cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len;
+ continue;
+ }
+ cdw->cdw_sg_addr += r2t_off;
+ cdw->cdw_sg_len -= r2t_off;
+ r2t_off = 0;
+ }
- if (cs->cs_immediate_data && io->scsiio.kern_rel_offset <
+ if (cs->cs_immediate_data &&
+ io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled <
icl_pdu_data_segment_length(request)) {
done = cfiscsi_handle_data_segment(request, cdw);
if (done) {
@@ -2620,6 +2662,11 @@ cfiscsi_datamove_out(union ctl_io *io)
}
}
+ r2t_off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled;
+ r2t_len = MIN(io->scsiio.kern_data_len - io->scsiio.ext_data_filled,
+ cs->cs_max_burst_length);
+ cdw->cdw_r2t_end = io->scsiio.ext_data_filled + r2t_len;
+
CFISCSI_SESSION_LOCK(cs);
TAILQ_INSERT_TAIL(&cs->cs_waiting_for_data_out, cdw, cdw_next);
CFISCSI_SESSION_UNLOCK(cs);
@@ -2659,16 +2706,13 @@ cfiscsi_datamove_out(union ctl_io *io)
* The ext_data_filled is to account for unsolicited
* (immediate) data that might have already arrived.
*/
- bhsr2t->bhsr2t_buffer_offset =
- htonl(io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled);
+ bhsr2t->bhsr2t_buffer_offset = htonl(r2t_off);
/*
* This is the total length (sum of S/G lengths) this call
- * to cfiscsi_datamove() is supposed to handle.
- *
- * XXX: Limit it to MaxBurstLength.
+ * to cfiscsi_datamove() is supposed to handle, limited by
+ * MaxBurstLength.
*/
- bhsr2t->bhsr2t_desired_data_transfer_length =
- htonl(io->scsiio.kern_data_len - io->scsiio.ext_data_filled);
+ bhsr2t->bhsr2t_desired_data_transfer_length = htonl(r2t_len);
cfiscsi_pdu_queue(response);
}
@@ -2678,8 +2722,11 @@ cfiscsi_datamove(union ctl_io *io)
if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
cfiscsi_datamove_in(io);
- else
+ else {
+ /* We hadn't received anything during this datamove yet. */
+ io->scsiio.ext_data_filled = 0;
cfiscsi_datamove_out(io);
+ }
}
static void
diff --git a/sys/cam/ctl/ctl_frontend_iscsi.h b/sys/cam/ctl/ctl_frontend_iscsi.h
index 0ac0e98..af4de87 100644
--- a/sys/cam/ctl/ctl_frontend_iscsi.h
+++ b/sys/cam/ctl/ctl_frontend_iscsi.h
@@ -56,6 +56,7 @@ struct cfiscsi_data_wait {
int cdw_sg_index;
char *cdw_sg_addr;
size_t cdw_sg_len;
+ uint32_t cdw_r2t_end;
};
#define CFISCSI_SESSION_STATE_INVALID 0
diff --git a/sys/cam/ctl/ctl_private.h b/sys/cam/ctl/ctl_private.h
index 43ee394..4f22250 100644
--- a/sys/cam/ctl/ctl_private.h
+++ b/sys/cam/ctl/ctl_private.h
@@ -198,7 +198,8 @@ typedef enum {
CTL_LUN_OFFLINE = 0x080,
CTL_LUN_PR_RESERVED = 0x100,
CTL_LUN_PRIMARY_SC = 0x200,
- CTL_LUN_SENSE_DESC = 0x400
+ CTL_LUN_SENSE_DESC = 0x400,
+ CTL_LUN_READONLY = 0x800
} ctl_lun_flags;
typedef enum {
diff --git a/sys/cam/ctl/scsi_ctl.c b/sys/cam/ctl/scsi_ctl.c
index 3529683..9347ed1 100644
--- a/sys/cam/ctl/scsi_ctl.c
+++ b/sys/cam/ctl/scsi_ctl.c
@@ -1115,6 +1115,7 @@ ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset)
}
case READ_16:
case WRITE_16:
+ case WRITE_ATOMIC_16:
{
struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt;
lba = scsi_8btou64(cdb->addr);
diff --git a/sys/cam/scsi/scsi_all.h b/sys/cam/scsi/scsi_all.h
index 8eb58fe..fbb131e 100644
--- a/sys/cam/scsi/scsi_all.h
+++ b/sys/cam/scsi/scsi_all.h
@@ -1720,6 +1720,7 @@ struct ata_pass_16 {
#define VERIFY_16 0x8F
#define SYNCHRONIZE_CACHE_16 0x91
#define WRITE_SAME_16 0x93
+#define WRITE_ATOMIC_16 0x9C
#define SERVICE_ACTION_IN 0x9E
#define REPORT_LUNS 0xA0
#define ATA_PASS_12 0xA1
@@ -2437,8 +2438,7 @@ struct scsi_vpd_logical_block_prov
};
/*
- * Block Limits VDP Page based on
- * T10/1799-D Revision 31
+ * Block Limits VDP Page based on SBC-4 Revision 2
*/
struct scsi_vpd_block_limits
{
@@ -2459,7 +2459,10 @@ struct scsi_vpd_block_limits
u_int8_t opt_unmap_grain[4];
u_int8_t unmap_grain_align[4];
u_int8_t max_write_same_length[8];
- u_int8_t reserved2[20];
+ u_int8_t max_atomic_transfer_length[4];
+ u_int8_t atomic_alignment[4];
+ u_int8_t atomic_transfer_length_granularity[4];
+ u_int8_t reserved2[8];
};
struct scsi_read_capacity
diff --git a/sys/cddl/compat/opensolaris/sys/assfail.h b/sys/cddl/compat/opensolaris/sys/assfail.h
index e6ff258..553da69 100644
--- a/sys/cddl/compat/opensolaris/sys/assfail.h
+++ b/sys/cddl/compat/opensolaris/sys/assfail.h
@@ -46,20 +46,24 @@ void assfail3(const char *, uintmax_t, const char *, uintmax_t, const char *,
#else /* !defined(_KERNEL) */
#ifndef HAVE_ASSFAIL
+extern int aok;
+
static __inline int
__assfail(const char *expr, const char *file, int line)
{
(void)fprintf(stderr, "Assertion failed: (%s), file %s, line %d.\n",
expr, file, line);
- abort();
- /* NOTREACHED */
+ if (!aok)
+ abort();
return (0);
}
#define assfail __assfail
#endif
#ifndef HAVE_ASSFAIL3
+extern int aok;
+
static __inline void
__assfail3(const char *expr, uintmax_t lv, const char *op, uintmax_t rv,
const char *file, int line) {
@@ -67,8 +71,8 @@ __assfail3(const char *expr, uintmax_t lv, const char *op, uintmax_t rv,
(void)fprintf(stderr,
"Assertion failed: %s (0x%jx %s 0x%jx), file %s, line %d.\n",
expr, lv, op, rv, file, line);
- abort();
- /* NOTREACHED */
+ if (!aok)
+ abort();
}
#define assfail3 __assfail3
#endif
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
index 1a1d4d8..60d2ff9 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
@@ -5047,7 +5047,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
if (ab == NULL)
ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter);
- headroom = target_sz * l2arc_headroom;
+ headroom = target_sz * l2arc_headroom * 2 / ARC_BUFC_NUMLISTS;
if (do_headroom_boost)
headroom = (headroom * l2arc_headroom_boost) / 100;
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
index 4e67bf6..c86d3a7 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
@@ -1021,8 +1021,8 @@ xuio_stat_wbuf_nocopy()
}
#ifdef _KERNEL
-int
-dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
+static int
+dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size)
{
dmu_buf_t **dbp;
int numbufs, i, err;
@@ -1032,8 +1032,8 @@ dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
* NB: we could do this block-at-a-time, but it's nice
* to be reading in parallel.
*/
- err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, TRUE, FTAG,
- &numbufs, &dbp);
+ err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
+ TRUE, FTAG, &numbufs, &dbp, 0);
if (err)
return (err);
@@ -1080,6 +1080,58 @@ dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
return (err);
}
+/*
+ * Read 'size' bytes into the uio buffer.
+ * From object zdb->db_object.
+ * Starting at offset uio->uio_loffset.
+ *
+ * If the caller already has a dbuf in the target object
+ * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(),
+ * because we don't have to find the dnode_t for the object.
+ */
+int
+dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size)
+{
+ dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
+ dnode_t *dn;
+ int err;
+
+ if (size == 0)
+ return (0);
+
+ DB_DNODE_ENTER(db);
+ dn = DB_DNODE(db);
+ err = dmu_read_uio_dnode(dn, uio, size);
+ DB_DNODE_EXIT(db);
+
+ return (err);
+}
+
+/*
+ * Read 'size' bytes into the uio buffer.
+ * From the specified object
+ * Starting at offset uio->uio_loffset.
+ */
+int
+dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
+{
+ dnode_t *dn;
+ int err;
+
+ if (size == 0)
+ return (0);
+
+ err = dnode_hold(os, object, FTAG, &dn);
+ if (err)
+ return (err);
+
+ err = dmu_read_uio_dnode(dn, uio, size);
+
+ dnode_rele(dn, FTAG);
+
+ return (err);
+}
+
static int
dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
{
@@ -1132,6 +1184,15 @@ dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
return (err);
}
+/*
+ * Write 'size' bytes from the uio buffer.
+ * To object zdb->db_object.
+ * Starting at offset uio->uio_loffset.
+ *
+ * If the caller already has a dbuf in the target object
+ * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(),
+ * because we don't have to find the dnode_t for the object.
+ */
int
dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size,
dmu_tx_t *tx)
@@ -1151,6 +1212,11 @@ dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size,
return (err);
}
+/*
+ * Write 'size' bytes from the uio buffer.
+ * To the specified object.
+ * Starting at offset uio->uio_loffset.
+ */
int
dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size,
dmu_tx_t *tx)
@@ -1852,6 +1918,7 @@ dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
doi->doi_indirection = dn->dn_nlevels;
doi->doi_checksum = dn->dn_checksum;
doi->doi_compress = dn->dn_compress;
+ doi->doi_nblkptr = dn->dn_nblkptr;
doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
doi->doi_fill_count = 0;
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c
index 5b512ad..808864a 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c
@@ -20,7 +20,8 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2013 by Delphix. All rights reserved.
+ * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
+ * Copyright 2014 HybridCluster. All rights reserved.
*/
#include <sys/dmu.h>
@@ -107,11 +108,9 @@ dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot,
int
dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot,
- int blocksize, dmu_object_type_t bonustype, int bonuslen)
+ int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
dnode_t *dn;
- dmu_tx_t *tx;
- int nblkptr;
int err;
if (object == DMU_META_DNODE_OBJECT)
@@ -122,44 +121,9 @@ dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot,
if (err)
return (err);
- if (dn->dn_type == ot && dn->dn_datablksz == blocksize &&
- dn->dn_bonustype == bonustype && dn->dn_bonuslen == bonuslen) {
- /* nothing is changing, this is a noop */
- dnode_rele(dn, FTAG);
- return (0);
- }
-
- if (bonustype == DMU_OT_SA) {
- nblkptr = 1;
- } else {
- nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
- }
-
- /*
- * If we are losing blkptrs or changing the block size this must
- * be a new file instance. We must clear out the previous file
- * contents before we can change this type of metadata in the dnode.
- */
- if (dn->dn_nblkptr > nblkptr || dn->dn_datablksz != blocksize) {
- err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
- if (err)
- goto out;
- }
-
- tx = dmu_tx_create(os);
- dmu_tx_hold_bonus(tx, object);
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err) {
- dmu_tx_abort(tx);
- goto out;
- }
-
dnode_reallocate(dn, ot, blocksize, bonustype, bonuslen, tx);
- dmu_tx_commit(tx);
-out:
dnode_rele(dn, FTAG);
-
return (err);
}
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c
index 314e73c..1a0cab5 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c
@@ -24,6 +24,7 @@
* Copyright (c) 2011, 2014 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
+ * Copyright 2014 HybridCluster. All rights reserved.
*/
#include <sys/dmu.h>
@@ -1392,12 +1393,25 @@ backup_byteswap(dmu_replay_record_t *drr)
#undef DO32
}
+static inline uint8_t
+deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
+{
+ if (bonus_type == DMU_OT_SA) {
+ return (1);
+ } else {
+ return (1 +
+ ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT));
+ }
+}
+
static int
restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
{
- int err;
+ dmu_object_info_t doi;
dmu_tx_t *tx;
void *data = NULL;
+ uint64_t object;
+ int err;
if (drro->drr_type == DMU_OT_NONE ||
!DMU_OT_IS_VALID(drro->drr_type) ||
@@ -1411,10 +1425,11 @@ restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
return (SET_ERROR(EINVAL));
}
- err = dmu_object_info(os, drro->drr_object, NULL);
+ err = dmu_object_info(os, drro->drr_object, &doi);
if (err != 0 && err != ENOENT)
return (SET_ERROR(EINVAL));
+ object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
if (drro->drr_bonuslen) {
data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8), NULL);
@@ -1422,37 +1437,53 @@ restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
return (ra->err);
}
- if (err == ENOENT) {
- /* currently free, want to be allocated */
- tx = dmu_tx_create(os);
- dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err != 0) {
- dmu_tx_abort(tx);
- return (err);
+ /*
+ * If we are losing blkptrs or changing the block size this must
+ * be a new file instance. We must clear out the previous file
+ * contents before we can change this type of metadata in the dnode.
+ */
+ if (err == 0) {
+ int nblkptr;
+
+ nblkptr = deduce_nblkptr(drro->drr_bonustype,
+ drro->drr_bonuslen);
+
+ if (drro->drr_blksz != doi.doi_data_block_size ||
+ nblkptr < doi.doi_nblkptr) {
+ err = dmu_free_long_range(os, drro->drr_object,
+ 0, DMU_OBJECT_END);
+ if (err != 0)
+ return (SET_ERROR(EINVAL));
}
+ }
+
+ tx = dmu_tx_create(os);
+ dmu_tx_hold_bonus(tx, object);
+ err = dmu_tx_assign(tx, TXG_WAIT);
+ if (err != 0) {
+ dmu_tx_abort(tx);
+ return (err);
+ }
+
+ if (object == DMU_NEW_OBJECT) {
+ /* currently free, want to be allocated */
err = dmu_object_claim(os, drro->drr_object,
drro->drr_type, drro->drr_blksz,
drro->drr_bonustype, drro->drr_bonuslen, tx);
- dmu_tx_commit(tx);
- } else {
- /* currently allocated, want to be allocated */
+ } else if (drro->drr_type != doi.doi_type ||
+ drro->drr_blksz != doi.doi_data_block_size ||
+ drro->drr_bonustype != doi.doi_bonus_type ||
+ drro->drr_bonuslen != doi.doi_bonus_size) {
+ /* currently allocated, but with different properties */
err = dmu_object_reclaim(os, drro->drr_object,
drro->drr_type, drro->drr_blksz,
- drro->drr_bonustype, drro->drr_bonuslen);
+ drro->drr_bonustype, drro->drr_bonuslen, tx);
}
if (err != 0) {
+ dmu_tx_commit(tx);
return (SET_ERROR(EINVAL));
}
- tx = dmu_tx_create(os);
- dmu_tx_hold_bonus(tx, drro->drr_object);
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err != 0) {
- dmu_tx_abort(tx);
- return (err);
- }
-
dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
tx);
dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h
index 2c0b5ae..ad19266 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h
@@ -25,6 +25,7 @@
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright 2013 DEY Storage Systems, Inc.
+ * Copyright 2014 HybridCluster. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
@@ -341,7 +342,7 @@ uint64_t dmu_object_alloc(objset_t *os, dmu_object_type_t ot,
int dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx);
int dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot,
- int blocksize, dmu_object_type_t bonustype, int bonuslen);
+ int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *txp);
/*
* Free an object from this objset.
@@ -616,6 +617,7 @@ void dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx);
int dmu_read_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size);
+int dmu_read_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size);
int dmu_write_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size,
dmu_tx_t *tx);
int dmu_write_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size,
@@ -661,7 +663,8 @@ typedef struct dmu_object_info {
uint8_t doi_indirection; /* 2 = dnode->indirect->data */
uint8_t doi_checksum;
uint8_t doi_compress;
- uint8_t doi_pad[5];
+ uint8_t doi_nblkptr;
+ uint8_t doi_pad[4];
uint64_t doi_physical_blocks_512; /* data + metadata, 512b blks */
uint64_t doi_max_offset;
uint64_t doi_fill_count; /* number of non-empty blocks */
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
index 109a339..5fec709 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
@@ -582,7 +582,6 @@ static int
mappedread(vnode_t *vp, int nbytes, uio_t *uio)
{
znode_t *zp = VTOZ(vp);
- objset_t *os = zp->z_zfsvfs->z_os;
vm_object_t obj;
int64_t start;
caddr_t va;
@@ -613,7 +612,8 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
page_unhold(pp);
} else {
zfs_vmobject_wunlock(obj);
- error = dmu_read_uio(os, zp->z_id, uio, bytes);
+ error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
+ uio, bytes);
zfs_vmobject_wlock(obj);
}
len -= bytes;
@@ -650,7 +650,6 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- objset_t *os;
ssize_t n, nbytes;
int error = 0;
rl_t *rl;
@@ -658,7 +657,6 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
- os = zfsvfs->z_os;
if (zp->z_pflags & ZFS_AV_QUARANTINED) {
ZFS_EXIT(zfsvfs);
@@ -756,10 +754,12 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
error = mappedread_sf(vp, nbytes, uio);
else
#endif /* __FreeBSD__ */
- if (vn_has_cached_data(vp))
+ if (vn_has_cached_data(vp)) {
error = mappedread(vp, nbytes, uio);
- else
- error = dmu_read_uio(os, zp->z_id, uio, nbytes);
+ } else {
+ error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
+ uio, nbytes);
+ }
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
diff --git a/sys/compat/freebsd32/freebsd32_proto.h b/sys/compat/freebsd32/freebsd32_proto.h
index 9bffe80..d426e19 100644
--- a/sys/compat/freebsd32/freebsd32_proto.h
+++ b/sys/compat/freebsd32/freebsd32_proto.h
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/compat/freebsd32/syscalls.master 270691 2014-08-27 01:02:02Z kib
+ * created from FreeBSD: head/sys/compat/freebsd32/syscalls.master 272823 2014-10-09 15:16:52Z marcel
*/
#ifndef _FREEBSD32_SYSPROTO_H_
diff --git a/sys/compat/freebsd32/freebsd32_syscall.h b/sys/compat/freebsd32/freebsd32_syscall.h
index b1c45d9..e627ff3 100644
--- a/sys/compat/freebsd32/freebsd32_syscall.h
+++ b/sys/compat/freebsd32/freebsd32_syscall.h
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/compat/freebsd32/syscalls.master 270691 2014-08-27 01:02:02Z kib
+ * created from FreeBSD: head/sys/compat/freebsd32/syscalls.master 272823 2014-10-09 15:16:52Z marcel
*/
#define FREEBSD32_SYS_syscall 0
diff --git a/sys/compat/freebsd32/freebsd32_syscalls.c b/sys/compat/freebsd32/freebsd32_syscalls.c
index 1e6edf5..2bf45ea 100644
--- a/sys/compat/freebsd32/freebsd32_syscalls.c
+++ b/sys/compat/freebsd32/freebsd32_syscalls.c
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/compat/freebsd32/syscalls.master 270691 2014-08-27 01:02:02Z kib
+ * created from FreeBSD: head/sys/compat/freebsd32/syscalls.master 272823 2014-10-09 15:16:52Z marcel
*/
const char *freebsd32_syscallnames[] = {
diff --git a/sys/compat/freebsd32/freebsd32_sysent.c b/sys/compat/freebsd32/freebsd32_sysent.c
index c93e44a..9735e30 100644
--- a/sys/compat/freebsd32/freebsd32_sysent.c
+++ b/sys/compat/freebsd32/freebsd32_sysent.c
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/compat/freebsd32/syscalls.master 270691 2014-08-27 01:02:02Z kib
+ * created from FreeBSD: head/sys/compat/freebsd32/syscalls.master 272823 2014-10-09 15:16:52Z marcel
*/
#include "opt_compat.h"
@@ -518,10 +518,10 @@ struct sysent freebsd32_sysent[] = {
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 468 = nosys */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 469 = __getpath_fromfd */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 470 = __getpath_fromaddr */
- { AS(sctp_peeloff_args), (sy_call_t *)sys_sctp_peeloff, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 471 = sctp_peeloff */
- { AS(sctp_generic_sendmsg_args), (sy_call_t *)sys_sctp_generic_sendmsg, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 472 = sctp_generic_sendmsg */
- { AS(sctp_generic_sendmsg_iov_args), (sy_call_t *)sys_sctp_generic_sendmsg_iov, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 473 = sctp_generic_sendmsg_iov */
- { AS(sctp_generic_recvmsg_args), (sy_call_t *)sys_sctp_generic_recvmsg, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 474 = sctp_generic_recvmsg */
+ { AS(sctp_peeloff_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 471 = sctp_peeloff */
+ { AS(sctp_generic_sendmsg_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 472 = sctp_generic_sendmsg */
+ { AS(sctp_generic_sendmsg_iov_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 473 = sctp_generic_sendmsg_iov */
+ { AS(sctp_generic_recvmsg_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 474 = sctp_generic_recvmsg */
#ifdef PAD64_REQUIRED
{ AS(freebsd32_pread_args), (sy_call_t *)freebsd32_pread, AUE_PREAD, NULL, 0, 0, 0, SY_THR_STATIC }, /* 475 = freebsd32_pread */
{ AS(freebsd32_pwrite_args), (sy_call_t *)freebsd32_pwrite, AUE_PWRITE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 476 = freebsd32_pwrite */
diff --git a/sys/compat/freebsd32/syscalls.master b/sys/compat/freebsd32/syscalls.master
index 161f69d..2fafc0c 100644
--- a/sys/compat/freebsd32/syscalls.master
+++ b/sys/compat/freebsd32/syscalls.master
@@ -845,14 +845,14 @@
468 AUE_NULL UNIMPL nosys
469 AUE_NULL UNIMPL __getpath_fromfd
470 AUE_NULL UNIMPL __getpath_fromaddr
-471 AUE_NULL NOPROTO { int sctp_peeloff(int sd, uint32_t name); }
-472 AUE_NULL NOPROTO { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \
+471 AUE_NULL NOPROTO|NOSTD { int sctp_peeloff(int sd, uint32_t name); }
+472 AUE_NULL NOPROTO|NOSTD { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \
caddr_t to, __socklen_t tolen, \
struct sctp_sndrcvinfo *sinfo, int flags); }
-473 AUE_NULL NOPROTO { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \
+473 AUE_NULL NOPROTO|NOSTD { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \
caddr_t to, __socklen_t tolen, \
struct sctp_sndrcvinfo *sinfo, int flags); }
-474 AUE_NULL NOPROTO { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \
+474 AUE_NULL NOPROTO|NOSTD { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \
struct sockaddr * from, __socklen_t *fromlenaddr, \
struct sctp_sndrcvinfo *sinfo, int *msg_flags); }
#ifdef PAD64_REQUIRED
diff --git a/sys/conf/files b/sys/conf/files
index c885eca..0a59317 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -3163,6 +3163,7 @@ libkern/arc4random.c standard
libkern/bcd.c standard
libkern/bsearch.c standard
libkern/crc32.c standard
+libkern/explicit_bzero.c standard
libkern/fnmatch.c standard
libkern/iconv.c optional libiconv
libkern/iconv_converter_if.m optional libiconv
@@ -3428,6 +3429,7 @@ netinet/sctp_output.c optional inet sctp | inet6 sctp
netinet/sctp_pcb.c optional inet sctp | inet6 sctp
netinet/sctp_peeloff.c optional inet sctp | inet6 sctp
netinet/sctp_ss_functions.c optional inet sctp | inet6 sctp
+netinet/sctp_syscalls.c optional inet sctp | inet6 sctp
netinet/sctp_sysctl.c optional inet sctp | inet6 sctp
netinet/sctp_timer.c optional inet sctp | inet6 sctp
netinet/sctp_usrreq.c optional inet sctp | inet6 sctp
diff --git a/sys/conf/newvers.sh b/sys/conf/newvers.sh
index 73bf267..f323a0d 100644
--- a/sys/conf/newvers.sh
+++ b/sys/conf/newvers.sh
@@ -89,7 +89,7 @@ fi
touch version
v=`cat version` u=${USER:-root} d=`pwd` h=${HOSTNAME:-`hostname`} t=`date`
i=`${MAKE:-make} -V KERN_IDENT`
-compiler_v=$($(${MAKE:-make} -V CC) -v 2>&1 | grep 'version')
+compiler_v=$($(${MAKE:-make} -V CC) -v 2>&1 | grep -w 'version')
for dir in /usr/bin /usr/local/bin; do
if [ ! -z "${svnversion}" ] ; then
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index 37c79a5..abbc9b5 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -208,6 +208,7 @@ static device_method_t acpi_methods[] = {
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit),
+ DEVMETHOD(bus_get_domain, acpi_get_domain),
/* ACPI bus */
DEVMETHOD(acpi_id_probe, acpi_device_id_probe),
@@ -794,6 +795,7 @@ acpi_print_child(device_t bus, device_t child)
retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%ld");
if (device_get_flags(child))
retval += printf(" flags %#x", device_get_flags(child));
+ retval += bus_print_child_domain(bus, child);
retval += bus_print_child_footer(bus, child);
return (retval);
@@ -1067,6 +1069,35 @@ acpi_hint_device_unit(device_t acdev, device_t child, const char *name,
}
/*
+ * Fech the NUMA domain for the given device.
+ *
+ * If a device has a _PXM method, map that to a NUMA domain.
+ *
+ * If none is found, then it'll call the parent method.
+ * If there's no domain, return ENOENT.
+ */
+int
+acpi_get_domain(device_t dev, device_t child, int *domain)
+{
+#if MAXMEMDOM > 1
+ ACPI_HANDLE h;
+ int d, pxm;
+
+ h = acpi_get_handle(child);
+ if ((h != NULL) &&
+ ACPI_SUCCESS(acpi_GetInteger(h, "_PXM", &pxm))) {
+ d = acpi_map_pxm_to_vm_domainid(pxm);
+ if (d < 0)
+ return (ENOENT);
+ *domain = d;
+ return (0);
+ }
+#endif
+ /* No _PXM node; go up a level */
+ return (bus_generic_get_domain(dev, child, domain));
+}
+
+/*
* Pre-allocate/manage all memory and IO resources. Since rman can't handle
* duplicates, we merge any in the sysresource attach routine.
*/
diff --git a/sys/dev/acpica/acpi_pci.c b/sys/dev/acpica/acpi_pci.c
index d94b6f0..3d07205 100644
--- a/sys/dev/acpica/acpi_pci.c
+++ b/sys/dev/acpica/acpi_pci.c
@@ -94,6 +94,7 @@ static device_method_t acpi_pci_methods[] = {
DEVMETHOD(bus_write_ivar, acpi_pci_write_ivar),
DEVMETHOD(bus_child_location_str, acpi_pci_child_location_str_method),
DEVMETHOD(bus_get_dma_tag, acpi_pci_get_dma_tag),
+ DEVMETHOD(bus_get_domain, acpi_get_domain),
/* PCI interface */
DEVMETHOD(pci_set_powerstate, acpi_pci_set_powerstate_method),
diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h
index 1bc7ee0..a314098 100644
--- a/sys/dev/acpica/acpivar.h
+++ b/sys/dev/acpica/acpivar.h
@@ -489,5 +489,16 @@ ACPI_HANDLE acpi_GetReference(ACPI_HANDLE scope, ACPI_OBJECT *obj);
SYSCTL_DECL(_debug_acpi);
+/*
+ * Map a PXM to a VM domain.
+ *
+ * Returns the VM domain ID if found, or -1 if not found / invalid.
+ */
+#if MAXMEMDOM > 1
+extern int acpi_map_pxm_to_vm_domainid(int pxm);
+#endif
+
+extern int acpi_get_domain(device_t dev, device_t child, int *domain);
+
#endif /* _KERNEL */
#endif /* !_ACPIVAR_H_ */
diff --git a/sys/dev/alc/if_alc.c b/sys/dev/alc/if_alc.c
index ecb3b47..6fed142 100644
--- a/sys/dev/alc/if_alc.c
+++ b/sys/dev/alc/if_alc.c
@@ -111,17 +111,31 @@ static struct alc_ident alc_ident_table[] = {
"Atheros AR8152 v1.1 PCIe Fast Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B2, 6 * 1024,
"Atheros AR8152 v2.0 PCIe Fast Ethernet" },
+ { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8161, 9 * 1024,
+ "Atheros AR8161 PCIe Gigabit Ethernet" },
+ { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8162, 9 * 1024,
+ "Atheros AR8161 PCIe Fast Ethernet" },
+ { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8171, 9 * 1024,
+ "Atheros AR8161 PCIe Gigabit Ethernet" },
+ { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8172, 9 * 1024,
+ "Atheros AR8161 PCIe Fast Ethernet" },
+ { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2200, 9 * 1024,
+ "Killer E2200 Gigabit Ethernet" },
{ 0, 0, 0, NULL}
};
-static void alc_aspm(struct alc_softc *, int);
+static void alc_aspm(struct alc_softc *, int, int);
+static void alc_aspm_813x(struct alc_softc *, int);
+static void alc_aspm_816x(struct alc_softc *, int);
static int alc_attach(device_t);
static int alc_check_boundary(struct alc_softc *);
+static void alc_config_msi(struct alc_softc *);
static int alc_detach(device_t);
static void alc_disable_l0s_l1(struct alc_softc *);
static int alc_dma_alloc(struct alc_softc *);
static void alc_dma_free(struct alc_softc *);
static void alc_dmamap_cb(void *, bus_dma_segment_t *, int, int);
+static void alc_dsp_fixup(struct alc_softc *, int);
static int alc_encap(struct alc_softc *, struct mbuf **);
static struct alc_ident *
alc_find_ident(device_t);
@@ -130,6 +144,9 @@ static struct mbuf *
alc_fixup_rx(struct ifnet *, struct mbuf *);
#endif
static void alc_get_macaddr(struct alc_softc *);
+static void alc_get_macaddr_813x(struct alc_softc *);
+static void alc_get_macaddr_816x(struct alc_softc *);
+static void alc_get_macaddr_par(struct alc_softc *);
static void alc_init(void *);
static void alc_init_cmb(struct alc_softc *);
static void alc_init_locked(struct alc_softc *);
@@ -141,14 +158,26 @@ static void alc_int_task(void *, int);
static int alc_intr(void *);
static int alc_ioctl(struct ifnet *, u_long, caddr_t);
static void alc_mac_config(struct alc_softc *);
+static uint32_t alc_mii_readreg_813x(struct alc_softc *, int, int);
+static uint32_t alc_mii_readreg_816x(struct alc_softc *, int, int);
+static uint32_t alc_mii_writereg_813x(struct alc_softc *, int, int, int);
+static uint32_t alc_mii_writereg_816x(struct alc_softc *, int, int, int);
static int alc_miibus_readreg(device_t, int, int);
static void alc_miibus_statchg(device_t);
static int alc_miibus_writereg(device_t, int, int, int);
+static uint32_t alc_miidbg_readreg(struct alc_softc *, int);
+static uint32_t alc_miidbg_writereg(struct alc_softc *, int, int);
+static uint32_t alc_miiext_readreg(struct alc_softc *, int, int);
+static uint32_t alc_miiext_writereg(struct alc_softc *, int, int, int);
static int alc_mediachange(struct ifnet *);
+static int alc_mediachange_locked(struct alc_softc *);
static void alc_mediastatus(struct ifnet *, struct ifmediareq *);
static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *);
+static void alc_osc_reset(struct alc_softc *);
static void alc_phy_down(struct alc_softc *);
static void alc_phy_reset(struct alc_softc *);
+static void alc_phy_reset_813x(struct alc_softc *);
+static void alc_phy_reset_816x(struct alc_softc *);
static int alc_probe(device_t);
static void alc_reset(struct alc_softc *);
static int alc_resume(device_t);
@@ -158,6 +187,8 @@ static void alc_rxfilter(struct alc_softc *);
static void alc_rxvlan(struct alc_softc *);
static void alc_setlinkspeed(struct alc_softc *);
static void alc_setwol(struct alc_softc *);
+static void alc_setwol_813x(struct alc_softc *);
+static void alc_setwol_816x(struct alc_softc *);
static int alc_shutdown(device_t);
static void alc_start(struct ifnet *);
static void alc_start_locked(struct ifnet *);
@@ -230,10 +261,21 @@ static int
alc_miibus_readreg(device_t dev, int phy, int reg)
{
struct alc_softc *sc;
- uint32_t v;
- int i;
+ int v;
sc = device_get_softc(dev);
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ v = alc_mii_readreg_816x(sc, phy, reg);
+ else
+ v = alc_mii_readreg_813x(sc, phy, reg);
+ return (v);
+}
+
+static uint32_t
+alc_mii_readreg_813x(struct alc_softc *sc, int phy, int reg)
+{
+ uint32_t v;
+ int i;
/*
* For AR8132 fast ethernet controller, do not report 1000baseT
@@ -262,14 +304,52 @@ alc_miibus_readreg(device_t dev, int phy, int reg)
return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
}
+static uint32_t
+alc_mii_readreg_816x(struct alc_softc *sc, int phy, int reg)
+{
+ uint32_t clk, v;
+ int i;
+
+ if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
+ clk = MDIO_CLK_25_128;
+ else
+ clk = MDIO_CLK_25_4;
+ CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
+ MDIO_SUP_PREAMBLE | clk | MDIO_REG_ADDR(reg));
+ for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
+ DELAY(5);
+ v = CSR_READ_4(sc, ALC_MDIO);
+ if ((v & MDIO_OP_BUSY) == 0)
+ break;
+ }
+
+ if (i == 0) {
+ device_printf(sc->alc_dev, "phy read timeout : %d\n", reg);
+ return (0);
+ }
+
+ return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
+}
+
static int
alc_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct alc_softc *sc;
- uint32_t v;
- int i;
+ int v;
sc = device_get_softc(dev);
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ v = alc_mii_writereg_816x(sc, phy, reg, val);
+ else
+ v = alc_mii_writereg_813x(sc, phy, reg, val);
+ return (v);
+}
+
+static uint32_t
+alc_mii_writereg_813x(struct alc_softc *sc, int phy, int reg, int val)
+{
+ uint32_t v;
+ int i;
CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
(val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
@@ -287,6 +367,32 @@ alc_miibus_writereg(device_t dev, int phy, int reg, int val)
return (0);
}
+static uint32_t
+alc_mii_writereg_816x(struct alc_softc *sc, int phy, int reg, int val)
+{
+ uint32_t clk, v;
+ int i;
+
+ if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
+ clk = MDIO_CLK_25_128;
+ else
+ clk = MDIO_CLK_25_4;
+ CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
+ ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_REG_ADDR(reg) |
+ MDIO_SUP_PREAMBLE | clk);
+ for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
+ DELAY(5);
+ v = CSR_READ_4(sc, ALC_MDIO);
+ if ((v & MDIO_OP_BUSY) == 0)
+ break;
+ }
+
+ if (i == 0)
+ device_printf(sc->alc_dev, "phy write timeout : %d\n", reg);
+
+ return (0);
+}
+
static void
alc_miibus_statchg(device_t dev)
{
@@ -319,7 +425,6 @@ alc_miibus_statchg(device_t dev)
break;
}
}
- alc_stop_queue(sc);
/* Stop Rx/Tx MACs. */
alc_stop_mac(sc);
@@ -331,7 +436,159 @@ alc_miibus_statchg(device_t dev)
reg = CSR_READ_4(sc, ALC_MAC_CFG);
reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
- alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active));
+ }
+ alc_aspm(sc, 0, IFM_SUBTYPE(mii->mii_media_active));
+ alc_dsp_fixup(sc, IFM_SUBTYPE(mii->mii_media_active));
+}
+
+static uint32_t
+alc_miidbg_readreg(struct alc_softc *sc, int reg)
+{
+
+ alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
+ reg);
+ return (alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
+ ALC_MII_DBG_DATA));
+}
+
+static uint32_t
+alc_miidbg_writereg(struct alc_softc *sc, int reg, int val)
+{
+
+ alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
+ reg);
+ return (alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
+ ALC_MII_DBG_DATA, val));
+}
+
+static uint32_t
+alc_miiext_readreg(struct alc_softc *sc, int devaddr, int reg)
+{
+ uint32_t clk, v;
+ int i;
+
+ CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) |
+ EXT_MDIO_DEVADDR(devaddr));
+ if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
+ clk = MDIO_CLK_25_128;
+ else
+ clk = MDIO_CLK_25_4;
+ CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
+ MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT);
+ for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
+ DELAY(5);
+ v = CSR_READ_4(sc, ALC_MDIO);
+ if ((v & MDIO_OP_BUSY) == 0)
+ break;
+ }
+
+ if (i == 0) {
+ device_printf(sc->alc_dev, "phy ext read timeout : %d, %d\n",
+ devaddr, reg);
+ return (0);
+ }
+
+ return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
+}
+
+static uint32_t
+alc_miiext_writereg(struct alc_softc *sc, int devaddr, int reg, int val)
+{
+ uint32_t clk, v;
+ int i;
+
+ CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) |
+ EXT_MDIO_DEVADDR(devaddr));
+ if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
+ clk = MDIO_CLK_25_128;
+ else
+ clk = MDIO_CLK_25_4;
+ CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
+ ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) |
+ MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT);
+ for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
+ DELAY(5);
+ v = CSR_READ_4(sc, ALC_MDIO);
+ if ((v & MDIO_OP_BUSY) == 0)
+ break;
+ }
+
+ if (i == 0)
+ device_printf(sc->alc_dev, "phy ext write timeout : %d, %d\n",
+ devaddr, reg);
+
+ return (0);
+}
+
+static void
+alc_dsp_fixup(struct alc_softc *sc, int media)
+{
+ uint16_t agc, len, val;
+
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ return;
+ if (AR816X_REV(sc->alc_rev) >= AR816X_REV_C0)
+ return;
+
+ /*
+ * Vendor PHY magic.
+ * 1000BT/AZ, wrong cable length
+ */
+ if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
+ len = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL6);
+ len = (len >> EXT_CLDCTL6_CAB_LEN_SHIFT) &
+ EXT_CLDCTL6_CAB_LEN_MASK;
+ agc = alc_miidbg_readreg(sc, MII_DBG_AGC);
+ agc = (agc >> DBG_AGC_2_VGA_SHIFT) & DBG_AGC_2_VGA_MASK;
+ if ((media == IFM_1000_T && len > EXT_CLDCTL6_CAB_LEN_SHORT1G &&
+ agc > DBG_AGC_LONG1G_LIMT) ||
+ (media == IFM_100_TX && len > DBG_AGC_LONG100M_LIMT &&
+ agc > DBG_AGC_LONG1G_LIMT)) {
+ alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT,
+ DBG_AZ_ANADECT_LONG);
+ val = alc_miiext_readreg(sc, MII_EXT_ANEG,
+ MII_EXT_ANEG_AFE);
+ val |= ANEG_AFEE_10BT_100M_TH;
+ alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE,
+ val);
+ } else {
+ alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT,
+ DBG_AZ_ANADECT_DEFAULT);
+ val = alc_miiext_readreg(sc, MII_EXT_ANEG,
+ MII_EXT_ANEG_AFE);
+ val &= ~ANEG_AFEE_10BT_100M_TH;
+ alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE,
+ val);
+ }
+ if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 &&
+ AR816X_REV(sc->alc_rev) == AR816X_REV_B0) {
+ if (media == IFM_1000_T) {
+ /*
+ * Giga link threshold, raise the tolerance of
+ * noise 50%.
+ */
+ val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB);
+ val &= ~DBG_MSE20DB_TH_MASK;
+ val |= (DBG_MSE20DB_TH_HI <<
+ DBG_MSE20DB_TH_SHIFT);
+ alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val);
+ } else if (media == IFM_100_TX)
+ alc_miidbg_writereg(sc, MII_DBG_MSE16DB,
+ DBG_MSE16DB_UP);
+ }
+ } else {
+ val = alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE);
+ val &= ~ANEG_AFEE_10BT_100M_TH;
+ alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val);
+ if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 &&
+ AR816X_REV(sc->alc_rev) == AR816X_REV_B0) {
+ alc_miidbg_writereg(sc, MII_DBG_MSE16DB,
+ DBG_MSE16DB_DOWN);
+ val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB);
+ val &= ~DBG_MSE20DB_TH_MASK;
+ val |= (DBG_MSE20DB_TH_DEFAULT << DBG_MSE20DB_TH_SHIFT);
+ alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val);
+ }
}
}
@@ -359,17 +616,29 @@ static int
alc_mediachange(struct ifnet *ifp)
{
struct alc_softc *sc;
- struct mii_data *mii;
- struct mii_softc *miisc;
int error;
sc = ifp->if_softc;
ALC_LOCK(sc);
+ error = alc_mediachange_locked(sc);
+ ALC_UNLOCK(sc);
+
+ return (error);
+}
+
+static int
+alc_mediachange_locked(struct alc_softc *sc)
+{
+ struct mii_data *mii;
+ struct mii_softc *miisc;
+ int error;
+
+ ALC_LOCK_ASSERT(sc);
+
mii = device_get_softc(sc->alc_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
- ALC_UNLOCK(sc);
return (error);
}
@@ -407,7 +676,17 @@ alc_probe(device_t dev)
static void
alc_get_macaddr(struct alc_softc *sc)
{
- uint32_t ea[2], opt;
+
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ alc_get_macaddr_816x(sc);
+ else
+ alc_get_macaddr_813x(sc);
+}
+
+static void
+alc_get_macaddr_813x(struct alc_softc *sc)
+{
+ uint32_t opt;
uint16_t val;
int eeprom, i;
@@ -502,6 +781,73 @@ alc_get_macaddr(struct alc_softc *sc)
}
}
+ alc_get_macaddr_par(sc);
+}
+
+static void
+alc_get_macaddr_816x(struct alc_softc *sc)
+{
+ uint32_t reg;
+ int i, reloaded;
+
+ reloaded = 0;
+ /* Try to reload station address via TWSI. */
+ for (i = 100; i > 0; i--) {
+ reg = CSR_READ_4(sc, ALC_SLD);
+ if ((reg & (SLD_PROGRESS | SLD_START)) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (i != 0) {
+ CSR_WRITE_4(sc, ALC_SLD, reg | SLD_START);
+ for (i = 100; i > 0; i--) {
+ DELAY(1000);
+ reg = CSR_READ_4(sc, ALC_SLD);
+ if ((reg & SLD_START) == 0)
+ break;
+ }
+ if (i != 0)
+ reloaded++;
+ else if (bootverbose)
+ device_printf(sc->alc_dev,
+ "reloading station address via TWSI timed out!\n");
+ }
+
+ /* Try to reload station address from EEPROM or FLASH. */
+ if (reloaded == 0) {
+ reg = CSR_READ_4(sc, ALC_EEPROM_LD);
+ if ((reg & (EEPROM_LD_EEPROM_EXIST |
+ EEPROM_LD_FLASH_EXIST)) != 0) {
+ for (i = 100; i > 0; i--) {
+ reg = CSR_READ_4(sc, ALC_EEPROM_LD);
+ if ((reg & (EEPROM_LD_PROGRESS |
+ EEPROM_LD_START)) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (i != 0) {
+ CSR_WRITE_4(sc, ALC_EEPROM_LD, reg |
+ EEPROM_LD_START);
+ for (i = 100; i > 0; i--) {
+ DELAY(1000);
+ reg = CSR_READ_4(sc, ALC_EEPROM_LD);
+ if ((reg & EEPROM_LD_START) == 0)
+ break;
+ }
+ } else if (bootverbose)
+ device_printf(sc->alc_dev,
+ "reloading EEPROM/FLASH timed out!\n");
+ }
+ }
+
+ alc_get_macaddr_par(sc);
+}
+
+static void
+alc_get_macaddr_par(struct alc_softc *sc)
+{
+ uint32_t ea[2];
+
ea[0] = CSR_READ_4(sc, ALC_PAR0);
ea[1] = CSR_READ_4(sc, ALC_PAR1);
sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
@@ -517,19 +863,31 @@ alc_disable_l0s_l1(struct alc_softc *sc)
{
uint32_t pmcfg;
- /* Another magic from vendor. */
- pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
- pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
- PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
- PM_CFG_SERDES_PD_EX_L1);
- pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
- PM_CFG_SERDES_L1_ENB;
- CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
+ /* Another magic from vendor. */
+ pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
+ pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
+ PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
+ PM_CFG_MAC_ASPM_CHK | PM_CFG_SERDES_PD_EX_L1);
+ pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB |
+ PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_L1_ENB;
+ CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
+ }
}
static void
alc_phy_reset(struct alc_softc *sc)
{
+
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ alc_phy_reset_816x(sc);
+ else
+ alc_phy_reset_813x(sc);
+}
+
+static void
+alc_phy_reset_813x(struct alc_softc *sc)
+{
uint16_t data;
/* Reset magic from Linux. */
@@ -642,12 +1000,101 @@ alc_phy_reset(struct alc_softc *sc)
}
static void
+alc_phy_reset_816x(struct alc_softc *sc)
+{
+ uint32_t val;
+
+ val = CSR_READ_4(sc, ALC_GPHY_CFG);
+ val &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE |
+ GPHY_CFG_GATE_25M_ENB | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PHY_PLL_ON |
+ GPHY_CFG_PWDOWN_HW | GPHY_CFG_100AB_ENB);
+ val |= GPHY_CFG_SEL_ANA_RESET;
+#ifdef notyet
+ val |= GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN | GPHY_CFG_SEL_ANA_RESET;
+#else
+ /* Disable PHY hibernation. */
+ val &= ~(GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN);
+#endif
+ CSR_WRITE_4(sc, ALC_GPHY_CFG, val);
+ DELAY(10);
+ CSR_WRITE_4(sc, ALC_GPHY_CFG, val | GPHY_CFG_EXT_RESET);
+ DELAY(800);
+
+ /* Vendor PHY magic. */
+#ifdef notyet
+ alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, DBG_LEGCYPS_DEFAULT);
+ alc_miidbg_writereg(sc, MII_DBG_SYSMODCTL, DBG_SYSMODCTL_DEFAULT);
+ alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_VDRVBIAS,
+ EXT_VDRVBIAS_DEFAULT);
+#else
+ /* Disable PHY hibernation. */
+ alc_miidbg_writereg(sc, MII_DBG_LEGCYPS,
+ DBG_LEGCYPS_DEFAULT & ~DBG_LEGCYPS_ENB);
+ alc_miidbg_writereg(sc, MII_DBG_HIBNEG,
+ DBG_HIBNEG_DEFAULT & ~(DBG_HIBNEG_PSHIB_EN | DBG_HIBNEG_HIB_PULSE));
+ alc_miidbg_writereg(sc, MII_DBG_GREENCFG, DBG_GREENCFG_DEFAULT);
+#endif
+
+ /* XXX Disable EEE. */
+ val = CSR_READ_4(sc, ALC_LPI_CTL);
+ val &= ~LPI_CTL_ENB;
+ CSR_WRITE_4(sc, ALC_LPI_CTL, val);
+ alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_LOCAL_EEEADV, 0);
+
+ /* PHY power saving. */
+ alc_miidbg_writereg(sc, MII_DBG_TST10BTCFG, DBG_TST10BTCFG_DEFAULT);
+ alc_miidbg_writereg(sc, MII_DBG_SRDSYSMOD, DBG_SRDSYSMOD_DEFAULT);
+ alc_miidbg_writereg(sc, MII_DBG_TST100BTCFG, DBG_TST100BTCFG_DEFAULT);
+ alc_miidbg_writereg(sc, MII_DBG_ANACTL, DBG_ANACTL_DEFAULT);
+ val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2);
+ val &= ~DBG_GREENCFG2_GATE_DFSE_EN;
+ alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val);
+
+ /* RTL8139C, 120m issue. */
+ alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_NLP78,
+ ANEG_NLP78_120M_DEFAULT);
+ alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10,
+ ANEG_S3DIG10_DEFAULT);
+
+ if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0) {
+ /* Turn off half amplitude. */
+ val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3);
+ val |= EXT_CLDCTL3_BP_CABLE1TH_DET_GT;
+ alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, val);
+ /* Turn off Green feature. */
+ val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2);
+ val |= DBG_GREENCFG2_BP_GREEN;
+ alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val);
+ /* Turn off half bias. */
+ val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5);
+ val |= EXT_CLDCTL5_BP_VD_HLFBIAS;
+ alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, val);
+ }
+}
+
+static void
alc_phy_down(struct alc_softc *sc)
{
+ uint32_t gphy;
switch (sc->alc_ident->deviceid) {
+ case DEVICEID_ATHEROS_AR8161:
+ case DEVICEID_ATHEROS_E2200:
+ case DEVICEID_ATHEROS_AR8162:
+ case DEVICEID_ATHEROS_AR8171:
+ case DEVICEID_ATHEROS_AR8172:
+ gphy = CSR_READ_4(sc, ALC_GPHY_CFG);
+ gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE |
+ GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON);
+ gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
+ GPHY_CFG_SEL_ANA_RESET;
+ gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW;
+ CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy);
+ break;
case DEVICEID_ATHEROS_AR8151:
case DEVICEID_ATHEROS_AR8151_V2:
+ case DEVICEID_ATHEROS_AR8152_B:
+ case DEVICEID_ATHEROS_AR8152_B2:
/*
* GPHY power down caused more problems on AR8151 v2.0.
* When driver is reloaded after GPHY power down,
@@ -673,12 +1120,23 @@ alc_phy_down(struct alc_softc *sc)
}
static void
-alc_aspm(struct alc_softc *sc, int media)
+alc_aspm(struct alc_softc *sc, int init, int media)
+{
+
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ alc_aspm_816x(sc, init);
+ else
+ alc_aspm_813x(sc, media);
+}
+
+static void
+alc_aspm_813x(struct alc_softc *sc, int media)
{
uint32_t pmcfg;
uint16_t linkcfg;
- ALC_LOCK_ASSERT(sc);
+ if ((sc->alc_flags & ALC_FLAG_LINK) == 0)
+ return;
pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
@@ -759,71 +1217,61 @@ alc_aspm(struct alc_softc *sc, int media)
CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
}
-static int
-alc_attach(device_t dev)
+static void
+alc_aspm_816x(struct alc_softc *sc, int init)
{
- struct alc_softc *sc;
- struct ifnet *ifp;
- char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
- uint16_t burst;
- int base, error, i, msic, msixc, state;
- uint32_t cap, ctl, val;
-
- error = 0;
- sc = device_get_softc(dev);
- sc->alc_dev = dev;
-
- mtx_init(&sc->alc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
- MTX_DEF);
- callout_init_mtx(&sc->alc_tick_ch, &sc->alc_mtx, 0);
- TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc);
- sc->alc_ident = alc_find_ident(dev);
+ uint32_t pmcfg;
- /* Map the device. */
- pci_enable_busmaster(dev);
- sc->alc_res_spec = alc_res_spec_mem;
- sc->alc_irq_spec = alc_irq_spec_legacy;
- error = bus_alloc_resources(dev, sc->alc_res_spec, sc->alc_res);
- if (error != 0) {
- device_printf(dev, "cannot allocate memory resources.\n");
- goto fail;
+ pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
+ pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_816X_MASK;
+ pmcfg |= PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT;
+ pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
+ pmcfg |= PM_CFG_PM_REQ_TIMER_816X_DEFAULT;
+ pmcfg &= ~PM_CFG_LCKDET_TIMER_MASK;
+ pmcfg |= PM_CFG_LCKDET_TIMER_DEFAULT;
+ pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_CLK_SWH_L1 | PM_CFG_PCIE_RECV;
+ pmcfg &= ~(PM_CFG_RX_L1_AFTER_L0S | PM_CFG_TX_L1_AFTER_L0S |
+ PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB |
+ PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
+ PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SA_DLY_ENB |
+ PM_CFG_MAC_ASPM_CHK | PM_CFG_HOTRST);
+ if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
+ (sc->alc_rev & 0x01) != 0)
+ pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB;
+ if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
+ /* Link up, enable both L0s, L1s. */
+ pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
+ PM_CFG_MAC_ASPM_CHK;
+ } else {
+ if (init != 0)
+ pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
+ PM_CFG_MAC_ASPM_CHK;
+ else if ((sc->alc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
+ pmcfg |= PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK;
}
+ CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
+}
- /* Set PHY address. */
- sc->alc_phyaddr = ALC_PHY_ADDR;
+static void
+alc_init_pcie(struct alc_softc *sc)
+{
+ const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
+ uint32_t cap, ctl, val;
+ int state;
- /* Initialize DMA parameters. */
- sc->alc_dma_rd_burst = 0;
- sc->alc_dma_wr_burst = 0;
- sc->alc_rcb = DMA_CFG_RCB_64;
- if (pci_find_cap(dev, PCIY_EXPRESS, &base) == 0) {
- sc->alc_flags |= ALC_FLAG_PCIE;
- sc->alc_expcap = base;
- burst = CSR_READ_2(sc, base + PCIER_DEVICE_CTL);
- sc->alc_dma_rd_burst =
- (burst & PCIEM_CTL_MAX_READ_REQUEST) >> 12;
- sc->alc_dma_wr_burst = (burst & PCIEM_CTL_MAX_PAYLOAD) >> 5;
- if (bootverbose) {
- device_printf(dev, "Read request size : %u bytes.\n",
- alc_dma_burst[sc->alc_dma_rd_burst]);
- device_printf(dev, "TLP payload size : %u bytes.\n",
- alc_dma_burst[sc->alc_dma_wr_burst]);
- }
- if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024)
- sc->alc_dma_rd_burst = 3;
- if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
- sc->alc_dma_wr_burst = 3;
- /* Clear data link and flow-control protocol error. */
- val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
- val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
- CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
+ /* Clear data link and flow-control protocol error. */
+ val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
+ val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
+ CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
+
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
PCIE_PHYMISC_FORCE_RCV_DET);
if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B &&
- pci_get_revid(dev) == ATHEROS_AR8152_B_V10) {
+ sc->alc_rev == ATHEROS_AR8152_B_V10) {
val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
PCIE_PHYMISC2_SERDES_TH_MASK);
@@ -832,13 +1280,13 @@ alc_attach(device_t dev)
CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
}
/* Disable ASPM L0S and L1. */
- cap = CSR_READ_2(sc, base + PCIER_LINK_CAP);
+ cap = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CAP);
if ((cap & PCIEM_LINK_CAP_ASPM) != 0) {
- ctl = CSR_READ_2(sc, base + PCIER_LINK_CTL);
+ ctl = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CTL);
if ((ctl & PCIEM_LINK_CTL_RCB) != 0)
sc->alc_rcb = DMA_CFG_RCB_128;
if (bootverbose)
- device_printf(dev, "RCB %u bytes\n",
+ device_printf(sc->alc_dev, "RCB %u bytes\n",
sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
state = ctl & PCIEM_LINK_CTL_ASPMC;
if (state & PCIEM_LINK_CTL_ASPMC_L0S)
@@ -855,13 +1303,91 @@ alc_attach(device_t dev)
device_printf(sc->alc_dev,
"no ASPM support\n");
}
+ } else {
+ val = CSR_READ_4(sc, ALC_PDLL_TRNS1);
+ val &= ~PDLL_TRNS1_D3PLLOFF_ENB;
+ CSR_WRITE_4(sc, ALC_PDLL_TRNS1, val);
+ val = CSR_READ_4(sc, ALC_MASTER_CFG);
+ if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
+ (sc->alc_rev & 0x01) != 0) {
+ if ((val & MASTER_WAKEN_25M) == 0 ||
+ (val & MASTER_CLK_SEL_DIS) == 0) {
+ val |= MASTER_WAKEN_25M | MASTER_CLK_SEL_DIS;
+ CSR_WRITE_4(sc, ALC_MASTER_CFG, val);
+ }
+ } else {
+ if ((val & MASTER_WAKEN_25M) == 0 ||
+ (val & MASTER_CLK_SEL_DIS) != 0) {
+ val |= MASTER_WAKEN_25M;
+ val &= ~MASTER_CLK_SEL_DIS;
+ CSR_WRITE_4(sc, ALC_MASTER_CFG, val);
+ }
+ }
}
+ alc_aspm(sc, 1, IFM_UNKNOWN);
+}
- /* Reset PHY. */
- alc_phy_reset(sc);
+static void
+alc_config_msi(struct alc_softc *sc)
+{
+ uint32_t ctl, mod;
- /* Reset the ethernet controller. */
- alc_reset(sc);
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
+ /*
+ * It seems interrupt moderation is controlled by
+ * ALC_MSI_RETRANS_TIMER register if MSI/MSIX is active.
+ * Driver uses RX interrupt moderation parameter to
+ * program ALC_MSI_RETRANS_TIMER register.
+ */
+ ctl = CSR_READ_4(sc, ALC_MSI_RETRANS_TIMER);
+ ctl &= ~MSI_RETRANS_TIMER_MASK;
+ ctl &= ~MSI_RETRANS_MASK_SEL_LINE;
+ mod = ALC_USECS(sc->alc_int_rx_mod);
+ if (mod == 0)
+ mod = 1;
+ ctl |= mod;
+ if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
+ CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl |
+ MSI_RETRANS_MASK_SEL_STD);
+ else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
+ CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl |
+ MSI_RETRANS_MASK_SEL_LINE);
+ else
+ CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, 0);
+ }
+}
+
+static int
+alc_attach(device_t dev)
+{
+ struct alc_softc *sc;
+ struct ifnet *ifp;
+ int base, error, i, msic, msixc;
+ uint16_t burst;
+
+ error = 0;
+ sc = device_get_softc(dev);
+ sc->alc_dev = dev;
+ sc->alc_rev = pci_get_revid(dev);
+
+ mtx_init(&sc->alc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
+ MTX_DEF);
+ callout_init_mtx(&sc->alc_tick_ch, &sc->alc_mtx, 0);
+ TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc);
+ sc->alc_ident = alc_find_ident(dev);
+
+ /* Map the device. */
+ pci_enable_busmaster(dev);
+ sc->alc_res_spec = alc_res_spec_mem;
+ sc->alc_irq_spec = alc_irq_spec_legacy;
+ error = bus_alloc_resources(dev, sc->alc_res_spec, sc->alc_res);
+ if (error != 0) {
+ device_printf(dev, "cannot allocate memory resources.\n");
+ goto fail;
+ }
+
+ /* Set PHY address. */
+ sc->alc_phyaddr = ALC_PHY_ADDR;
/*
* One odd thing is AR8132 uses the same PHY hardware(F1
@@ -871,6 +1397,19 @@ alc_attach(device_t dev)
* shows the same PHY model/revision number of AR8131.
*/
switch (sc->alc_ident->deviceid) {
+ case DEVICEID_ATHEROS_AR8161:
+ if (pci_get_subvendor(dev) == VENDORID_ATHEROS &&
+ pci_get_subdevice(dev) == 0x0091 && sc->alc_rev == 0)
+ sc->alc_flags |= ALC_FLAG_LINK_WAR;
+ /* FALLTHROUGH */
+ case DEVICEID_ATHEROS_E2200:
+ case DEVICEID_ATHEROS_AR8171:
+ sc->alc_flags |= ALC_FLAG_AR816X_FAMILY;
+ break;
+ case DEVICEID_ATHEROS_AR8162:
+ case DEVICEID_ATHEROS_AR8172:
+ sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_AR816X_FAMILY;
+ break;
case DEVICEID_ATHEROS_AR8152_B:
case DEVICEID_ATHEROS_AR8152_B2:
sc->alc_flags |= ALC_FLAG_APS;
@@ -885,7 +1424,7 @@ alc_attach(device_t dev)
default:
break;
}
- sc->alc_flags |= ALC_FLAG_ASPM_MON | ALC_FLAG_JUMBO;
+ sc->alc_flags |= ALC_FLAG_JUMBO;
/*
* It seems that AR813x/AR815x has silicon bug for SMB. In
@@ -898,7 +1437,6 @@ alc_attach(device_t dev)
* Don't use Tx CMB. It is known to have silicon bug.
*/
sc->alc_flags |= ALC_FLAG_CMB_BUG;
- sc->alc_rev = pci_get_revid(dev);
sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
MASTER_CHIP_REV_SHIFT;
if (bootverbose) {
@@ -906,11 +1444,45 @@ alc_attach(device_t dev)
sc->alc_rev);
device_printf(dev, "Chip id/revision : 0x%04x\n",
sc->alc_chip_rev);
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ device_printf(dev, "AR816x revision : 0x%x\n",
+ AR816X_REV(sc->alc_rev));
}
device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n",
CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
+ /* Initialize DMA parameters. */
+ sc->alc_dma_rd_burst = 0;
+ sc->alc_dma_wr_burst = 0;
+ sc->alc_rcb = DMA_CFG_RCB_64;
+ if (pci_find_cap(dev, PCIY_EXPRESS, &base) == 0) {
+ sc->alc_flags |= ALC_FLAG_PCIE;
+ sc->alc_expcap = base;
+ burst = CSR_READ_2(sc, base + PCIER_DEVICE_CTL);
+ sc->alc_dma_rd_burst =
+ (burst & PCIEM_CTL_MAX_READ_REQUEST) >> 12;
+ sc->alc_dma_wr_burst = (burst & PCIEM_CTL_MAX_PAYLOAD) >> 5;
+ if (bootverbose) {
+ device_printf(dev, "Read request size : %u bytes.\n",
+ alc_dma_burst[sc->alc_dma_rd_burst]);
+ device_printf(dev, "TLP payload size : %u bytes.\n",
+ alc_dma_burst[sc->alc_dma_wr_burst]);
+ }
+ if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024)
+ sc->alc_dma_rd_burst = 3;
+ if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
+ sc->alc_dma_wr_burst = 3;
+ alc_init_pcie(sc);
+ }
+
+ /* Reset PHY. */
+ alc_phy_reset(sc);
+
+ /* Reset the ethernet controller. */
+ alc_stop_mac(sc);
+ alc_reset(sc);
+
/* Allocate IRQ resources. */
msixc = pci_msix_count(dev);
msic = pci_msi_count(dev);
@@ -918,11 +1490,20 @@ alc_attach(device_t dev)
device_printf(dev, "MSIX count : %d\n", msixc);
device_printf(dev, "MSI count : %d\n", msic);
}
- /* Prefer MSIX over MSI. */
+ if (msixc > 1)
+ msixc = 1;
+ if (msic > 1)
+ msic = 1;
+ /*
+ * Prefer MSIX over MSI.
+ * AR816x controller has a silicon bug that MSI interrupt
+ * does not assert if PCIM_CMD_INTxDIS bit of command
+ * register is set. pci(4) was taught to handle that case.
+ */
if (msix_disable == 0 || msi_disable == 0) {
- if (msix_disable == 0 && msixc == ALC_MSIX_MESSAGES &&
+ if (msix_disable == 0 && msixc > 0 &&
pci_alloc_msix(dev, &msixc) == 0) {
- if (msic == ALC_MSIX_MESSAGES) {
+ if (msic == 1) {
device_printf(dev,
"Using %d MSIX message(s).\n", msixc);
sc->alc_flags |= ALC_FLAG_MSIX;
@@ -931,9 +1512,8 @@ alc_attach(device_t dev)
pci_release_msi(dev);
}
if (msi_disable == 0 && (sc->alc_flags & ALC_FLAG_MSIX) == 0 &&
- msic == ALC_MSI_MESSAGES &&
- pci_alloc_msi(dev, &msic) == 0) {
- if (msic == ALC_MSI_MESSAGES) {
+ msic > 0 && pci_alloc_msi(dev, &msic) == 0) {
+ if (msic == 1) {
device_printf(dev,
"Using %d MSI message(s).\n", msic);
sc->alc_flags |= ALC_FLAG_MSI;
@@ -1007,9 +1587,13 @@ alc_attach(device_t dev)
* sample boards. To safety, don't enable Tx checksum offloading
* by default but give chance to users to toggle it if they know
* their controllers work without problems.
+ * Fortunately, Tx checksum offloading for AR816x family
+ * seems to work.
*/
- ifp->if_capenable &= ~IFCAP_TXCSUM;
- ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
+ ifp->if_capenable &= ~IFCAP_TXCSUM;
+ ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
+ }
/* Tell the upper layer(s) we support long frames. */
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
@@ -1026,6 +1610,7 @@ alc_attach(device_t dev)
taskqueue_start_threads(&sc->alc_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->alc_dev));
+ alc_config_msi(sc);
if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
msic = ALC_MSIX_MESSAGES;
else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
@@ -1287,8 +1872,6 @@ alc_sysctl_node(struct alc_softc *sc)
&stats->tx_late_colls, "Late collisions");
ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
&stats->tx_excess_colls, "Excessive collisions");
- ALC_SYSCTL_STAT_ADD32(ctx, child, "abort",
- &stats->tx_abort, "Aborted frames due to Excessive collisions");
ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns",
&stats->tx_underrun, "FIFO underruns");
ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns",
@@ -1600,7 +2183,7 @@ again:
/*
* Create Tx buffer parent tag.
- * AR813x/AR815x allows 64bit DMA addressing of Tx/Rx buffers
+ * AR81[3567]x allows 64bit DMA addressing of Tx/Rx buffers
* so it needs separate parent DMA tag as parent DMA address
* space could be restricted to be within 32bit address space
* by 4GB boundary crossing.
@@ -1902,6 +2485,16 @@ alc_setlinkspeed(struct alc_softc *sc)
static void
alc_setwol(struct alc_softc *sc)
{
+
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ alc_setwol_816x(sc);
+ else
+ alc_setwol_813x(sc);
+}
+
+static void
+alc_setwol_813x(struct alc_softc *sc)
+{
struct ifnet *ifp;
uint32_t reg, pmcs;
uint16_t pmstat;
@@ -1962,6 +2555,72 @@ alc_setwol(struct alc_softc *sc)
sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
}
+static void
+alc_setwol_816x(struct alc_softc *sc)
+{
+ struct ifnet *ifp;
+ uint32_t gphy, mac, master, pmcs, reg;
+ uint16_t pmstat;
+
+ ALC_LOCK_ASSERT(sc);
+
+ ifp = sc->alc_ifp;
+ master = CSR_READ_4(sc, ALC_MASTER_CFG);
+ master &= ~MASTER_CLK_SEL_DIS;
+ gphy = CSR_READ_4(sc, ALC_GPHY_CFG);
+ gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | GPHY_CFG_100AB_ENB |
+ GPHY_CFG_PHY_PLL_ON);
+ gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET;
+ if ((sc->alc_flags & ALC_FLAG_PM) == 0) {
+ CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
+ gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW;
+ mac = CSR_READ_4(sc, ALC_MAC_CFG);
+ } else {
+ if ((ifp->if_capenable & IFCAP_WOL) != 0) {
+ gphy |= GPHY_CFG_EXT_RESET;
+ if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
+ alc_setlinkspeed(sc);
+ }
+ pmcs = 0;
+ if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
+ pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
+ CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs);
+ mac = CSR_READ_4(sc, ALC_MAC_CFG);
+ mac &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
+ MAC_CFG_BCAST);
+ if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
+ mac |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
+ if ((ifp->if_capenable & IFCAP_WOL) != 0)
+ mac |= MAC_CFG_RX_ENB;
+ alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10,
+ ANEG_S3DIG10_SL);
+ }
+
+ /* Enable OSC. */
+ reg = CSR_READ_4(sc, ALC_MISC);
+ reg &= ~MISC_INTNLOSC_OPEN;
+ CSR_WRITE_4(sc, ALC_MISC, reg);
+ reg |= MISC_INTNLOSC_OPEN;
+ CSR_WRITE_4(sc, ALC_MISC, reg);
+ CSR_WRITE_4(sc, ALC_MASTER_CFG, master);
+ CSR_WRITE_4(sc, ALC_MAC_CFG, mac);
+ CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy);
+ reg = CSR_READ_4(sc, ALC_PDLL_TRNS1);
+ reg |= PDLL_TRNS1_D3PLLOFF_ENB;
+ CSR_WRITE_4(sc, ALC_PDLL_TRNS1, reg);
+
+ if ((sc->alc_flags & ALC_FLAG_PM) != 0) {
+ /* Request PME. */
+ pmstat = pci_read_config(sc->alc_dev,
+ sc->alc_pmcap + PCIR_POWER_STATUS, 2);
+ pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
+ if ((ifp->if_capenable & IFCAP_WOL) != 0)
+ pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
+ pci_write_config(sc->alc_dev,
+ sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
+ }
+}
+
static int
alc_suspend(device_t dev)
{
@@ -2032,7 +2691,7 @@ alc_encap(struct alc_softc *sc, struct mbuf **m_head)
ip_off = poff = 0;
if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) {
/*
- * AR813x/AR815x requires offset of TCP/UDP header in its
+ * AR81[3567]x requires offset of TCP/UDP header in its
* Tx descriptor to perform Tx checksum offloading. TSO
* also requires TCP header offset and modification of
* IP/TCP header. This kind of operation takes many CPU
@@ -2170,7 +2829,7 @@ alc_encap(struct alc_softc *sc, struct mbuf **m_head)
cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) &
TD_TCPHDR_OFFSET_MASK;
/*
- * AR813x/AR815x requires the first buffer should
+ * AR81[3567]x requires the first buffer should
* only hold IP/TCP header data. Payload should
* be handled in other descriptors.
*/
@@ -2301,10 +2960,14 @@ alc_start_locked(struct ifnet *ifp)
bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
/* Kick. Assume we're using normal Tx priority queue. */
- CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
- (sc->alc_cdata.alc_tx_prod <<
- MBOX_TD_PROD_LO_IDX_SHIFT) &
- MBOX_TD_PROD_LO_IDX_MASK);
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ CSR_WRITE_2(sc, ALC_MBOX_TD_PRI0_PROD_IDX,
+ (uint16_t)sc->alc_cdata.alc_tx_prod);
+ else
+ CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
+ (sc->alc_cdata.alc_tx_prod <<
+ MBOX_TD_PROD_LO_IDX_SHIFT) &
+ MBOX_TD_PROD_LO_IDX_MASK);
/* Set a timeout in case the chip goes out to lunch. */
sc->alc_watchdog_timer = ALC_TX_TIMEOUT;
}
@@ -2358,7 +3021,7 @@ alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
else if (ifp->if_mtu != ifr->ifr_mtu) {
ALC_LOCK(sc);
ifp->if_mtu = ifr->ifr_mtu;
- /* AR813x/AR815x has 13 bits MSS field. */
+ /* AR81[3567]x has 13 bits MSS field. */
if (ifp->if_mtu > ALC_TSO_MTU &&
(ifp->if_capenable & IFCAP_TSO4) != 0) {
ifp->if_capenable &= ~IFCAP_TSO4;
@@ -2409,7 +3072,7 @@ alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
(ifp->if_capabilities & IFCAP_TSO4) != 0) {
ifp->if_capenable ^= IFCAP_TSO4;
if ((ifp->if_capenable & IFCAP_TSO4) != 0) {
- /* AR813x/AR815x has 13 bits MSS field. */
+ /* AR81[3567]x has 13 bits MSS field. */
if (ifp->if_mtu > ALC_TSO_MTU) {
ifp->if_capenable &= ~IFCAP_TSO4;
ifp->if_hwassist &= ~CSUM_TSO;
@@ -2461,7 +3124,8 @@ alc_mac_config(struct alc_softc *sc)
reg = CSR_READ_4(sc, ALC_MAC_CFG);
reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
MAC_CFG_SPEED_MASK);
- if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
+ sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2)
reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
@@ -2599,7 +3263,6 @@ alc_stats_update(struct alc_softc *sc)
stat->tx_multi_colls += smb->tx_multi_colls;
stat->tx_late_colls += smb->tx_late_colls;
stat->tx_excess_colls += smb->tx_excess_colls;
- stat->tx_abort += smb->tx_abort;
stat->tx_underrun += smb->tx_underrun;
stat->tx_desc_underrun += smb->tx_desc_underrun;
stat->tx_lenerrs += smb->tx_lenerrs;
@@ -2612,17 +3275,10 @@ alc_stats_update(struct alc_softc *sc)
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, smb->tx_single_colls +
smb->tx_multi_colls * 2 + smb->tx_late_colls +
- smb->tx_abort * HDPX_CFG_RETRY_DEFAULT);
+ smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT);
- /*
- * XXX
- * tx_pkts_truncated counter looks suspicious. It constantly
- * increments with no sign of Tx errors. This may indicate
- * the counter name is not correct one so I've removed the
- * counter in output errors.
- */
- if_inc_counter(ifp, IFCOUNTER_OERRORS,
- smb->tx_abort + smb->tx_late_colls + smb->tx_underrun);
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, smb->tx_late_colls +
+ smb->tx_excess_colls + smb->tx_underrun + smb->tx_pkts_truncated);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, smb->rx_frames);
@@ -2751,11 +3407,16 @@ alc_txeof(struct alc_softc *sc)
bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD);
prod = sc->alc_rdata.alc_cmb->cons;
- } else
- prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
- /* Assume we're using normal Tx priority queue. */
- prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
- MBOX_TD_CONS_LO_IDX_SHIFT;
+ } else {
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ prod = CSR_READ_2(sc, ALC_MBOX_TD_PRI0_CONS_IDX);
+ else {
+ prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
+ /* Assume we're using normal Tx priority queue. */
+ prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
+ MBOX_TD_CONS_LO_IDX_SHIFT;
+ }
+ }
cons = sc->alc_cdata.alc_tx_cons;
/*
* Go through our Tx list and free mbufs for those
@@ -2891,8 +3552,12 @@ alc_rxintr(struct alc_softc *sc, int count)
* it still seems that pre-fetching needs more
* experimentation.
*/
- CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
- sc->alc_cdata.alc_rx_cons);
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ CSR_WRITE_2(sc, ALC_MBOX_RD0_PROD_IDX,
+ (uint16_t)sc->alc_cdata.alc_rx_cons);
+ else
+ CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
+ sc->alc_cdata.alc_rx_cons);
}
return (count > 0 ? 0 : EAGAIN);
@@ -3084,14 +3749,78 @@ alc_tick(void *arg)
}
static void
-alc_reset(struct alc_softc *sc)
+alc_osc_reset(struct alc_softc *sc)
{
uint32_t reg;
+
+ reg = CSR_READ_4(sc, ALC_MISC3);
+ reg &= ~MISC3_25M_BY_SW;
+ reg |= MISC3_25M_NOTO_INTNL;
+ CSR_WRITE_4(sc, ALC_MISC3, reg);
+
+ reg = CSR_READ_4(sc, ALC_MISC);
+ if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) {
+ /*
+ * Restore over-current protection default value.
+ * This value could be reset by MAC reset.
+ */
+ reg &= ~MISC_PSW_OCP_MASK;
+ reg |= (MISC_PSW_OCP_DEFAULT << MISC_PSW_OCP_SHIFT);
+ reg &= ~MISC_INTNLOSC_OPEN;
+ CSR_WRITE_4(sc, ALC_MISC, reg);
+ CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN);
+ reg = CSR_READ_4(sc, ALC_MISC2);
+ reg &= ~MISC2_CALB_START;
+ CSR_WRITE_4(sc, ALC_MISC2, reg);
+ CSR_WRITE_4(sc, ALC_MISC2, reg | MISC2_CALB_START);
+
+ } else {
+ reg &= ~MISC_INTNLOSC_OPEN;
+ /* Disable isolate for revision A devices. */
+ if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1)
+ reg &= ~MISC_ISO_ENB;
+ CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN);
+ CSR_WRITE_4(sc, ALC_MISC, reg);
+ }
+
+ DELAY(20);
+}
+
+static void
+alc_reset(struct alc_softc *sc)
+{
+ uint32_t pmcfg, reg;
int i;
- reg = CSR_READ_4(sc, ALC_MASTER_CFG) & 0xFFFF;
+ pmcfg = 0;
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
+ /* Reset workaround. */
+ CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1);
+ if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
+ (sc->alc_rev & 0x01) != 0) {
+ /* Disable L0s/L1s before reset. */
+ pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
+ if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB))
+ != 0) {
+ pmcfg &= ~(PM_CFG_ASPM_L0S_ENB |
+ PM_CFG_ASPM_L1_ENB);
+ CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
+ }
+ }
+ }
+ reg = CSR_READ_4(sc, ALC_MASTER_CFG);
reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
+
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
+ for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
+ DELAY(10);
+ if (CSR_READ_4(sc, ALC_MBOX_RD0_PROD_IDX) == 0)
+ break;
+ }
+ if (i == 0)
+ device_printf(sc->alc_dev, "MAC reset timeout!\n");
+ }
for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
DELAY(10);
if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
@@ -3101,13 +3830,45 @@ alc_reset(struct alc_softc *sc)
device_printf(sc->alc_dev, "master reset timeout!\n");
for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
- if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0)
+ reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
+ if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC |
+ IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
break;
DELAY(10);
}
-
if (i == 0)
device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg);
+
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
+ if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
+ (sc->alc_rev & 0x01) != 0) {
+ reg = CSR_READ_4(sc, ALC_MASTER_CFG);
+ reg |= MASTER_CLK_SEL_DIS;
+ CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
+ /* Restore L0s/L1s config. */
+ if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB))
+ != 0)
+ CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
+ }
+
+ alc_osc_reset(sc);
+ reg = CSR_READ_4(sc, ALC_MISC3);
+ reg &= ~MISC3_25M_BY_SW;
+ reg |= MISC3_25M_NOTO_INTNL;
+ CSR_WRITE_4(sc, ALC_MISC3, reg);
+ reg = CSR_READ_4(sc, ALC_MISC);
+ reg &= ~MISC_INTNLOSC_OPEN;
+ if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1)
+ reg &= ~MISC_ISO_ENB;
+ CSR_WRITE_4(sc, ALC_MISC, reg);
+ DELAY(20);
+ }
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
+ sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
+ sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2)
+ CSR_WRITE_4(sc, ALC_SERDES_LOCK,
+ CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN |
+ SERDES_PHY_CLK_SLOWDOWN);
}
static void
@@ -3158,7 +3919,16 @@ alc_init_locked(struct alc_softc *sc)
alc_init_smb(sc);
/* Enable all clocks. */
- CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0);
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
+ CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, CLK_GATING_DMAW_ENB |
+ CLK_GATING_DMAR_ENB | CLK_GATING_TXQ_ENB |
+ CLK_GATING_RXQ_ENB | CLK_GATING_TXMAC_ENB |
+ CLK_GATING_RXMAC_ENB);
+ if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0)
+ CSR_WRITE_4(sc, ALC_IDLE_DECISN_TIMER,
+ IDLE_DECISN_TIMER_DEFAULT_1MS);
+ } else
+ CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0);
/* Reprogram the station address. */
bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
@@ -3184,10 +3954,12 @@ alc_init_locked(struct alc_softc *sc)
paddr = sc->alc_rdata.alc_rx_ring_paddr;
CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
- /* We use one Rx ring. */
- CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
- CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
- CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
+ /* We use one Rx ring. */
+ CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
+ CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
+ CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
+ }
/* Set Rx descriptor counter. */
CSR_WRITE_4(sc, ALC_RD_RING_CNT,
(ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
@@ -3212,10 +3984,12 @@ alc_init_locked(struct alc_softc *sc)
paddr = sc->alc_rdata.alc_rr_ring_paddr;
/* Set Rx return descriptor base addresses. */
CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
- /* We use one Rx return ring. */
- CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
- CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
- CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
+ /* We use one Rx return ring. */
+ CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
+ CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
+ CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
+ }
/* Set Rx return descriptor counter. */
CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
(ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
@@ -3242,16 +4016,20 @@ alc_init_locked(struct alc_softc *sc)
/* Configure interrupt moderation timer. */
reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
- reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0)
+ reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
/*
* We don't want to automatic interrupt clear as task queue
* for the interrupt should know interrupt status.
*/
- reg = MASTER_SA_TIMER_ENB;
+ reg = CSR_READ_4(sc, ALC_MASTER_CFG);
+ reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
+ reg |= MASTER_SA_TIMER_ENB;
if (ALC_USECS(sc->alc_int_rx_mod) != 0)
reg |= MASTER_IM_RX_TIMER_ENB;
- if (ALC_USECS(sc->alc_int_tx_mod) != 0)
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0 &&
+ ALC_USECS(sc->alc_int_tx_mod) != 0)
reg |= MASTER_IM_TX_TIMER_ENB;
CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
/*
@@ -3260,11 +4038,17 @@ alc_init_locked(struct alc_softc *sc)
*/
CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
/* Configure CMB. */
- if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
- CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
- CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
- } else
- CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
+ CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, ALC_TX_RING_CNT / 3);
+ CSR_WRITE_4(sc, ALC_CMB_TX_TIMER,
+ ALC_USECS(sc->alc_int_tx_mod));
+ } else {
+ if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
+ CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
+ CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
+ } else
+ CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
+ }
/*
* Hardware can be configured to issue SMB interrupt based
* on programmed interval. Since there is a callout that is
@@ -3291,33 +4075,42 @@ alc_init_locked(struct alc_softc *sc)
*/
CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen);
- /* Disable header split(?) */
- CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
-
- /* Configure IPG/IFG parameters. */
- CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
- ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
- ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
- ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
- ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
- /* Set parameters for half-duplex media. */
- CSR_WRITE_4(sc, ALC_HDPX_CFG,
- ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
- HDPX_CFG_LCOL_MASK) |
- ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
- HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
- ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
- HDPX_CFG_ABEBT_MASK) |
- ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
- HDPX_CFG_JAMIPG_MASK));
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
+ /* Disable header split(?) */
+ CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
+
+ /* Configure IPG/IFG parameters. */
+ CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
+ ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) &
+ IPG_IFG_IPGT_MASK) |
+ ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) &
+ IPG_IFG_MIFG_MASK) |
+ ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) &
+ IPG_IFG_IPG1_MASK) |
+ ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) &
+ IPG_IFG_IPG2_MASK));
+ /* Set parameters for half-duplex media. */
+ CSR_WRITE_4(sc, ALC_HDPX_CFG,
+ ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
+ HDPX_CFG_LCOL_MASK) |
+ ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
+ HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
+ ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
+ HDPX_CFG_ABEBT_MASK) |
+ ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
+ HDPX_CFG_JAMIPG_MASK));
+ }
+
/*
* Set TSO/checksum offload threshold. For frames that is
* larger than this threshold, hardware wouldn't do
* TSO/checksum offloading.
*/
- CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH,
- (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
- TSO_OFFLOAD_THRESH_MASK);
+ reg = (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
+ TSO_OFFLOAD_THRESH_MASK;
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ reg |= TSO_OFFLOAD_ERRLGPKT_DROP_ENB;
+ CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, reg);
/* Configure TxQ. */
reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
@@ -3326,21 +4119,50 @@ alc_init_locked(struct alc_softc *sc)
reg >>= 1;
reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
TXQ_CFG_TD_BURST_MASK;
+ reg |= TXQ_CFG_IP_OPTION_ENB | TXQ_CFG_8023_ENB;
CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
-
- /* Configure Rx free descriptor pre-fetching. */
- CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
- ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) &
- RX_RD_FREE_THRESH_HI_MASK) |
- ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) &
- RX_RD_FREE_THRESH_LO_MASK));
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
+ reg = (TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q1_BURST_SHIFT |
+ TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q2_BURST_SHIFT |
+ TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q3_BURST_SHIFT |
+ HQTD_CFG_BURST_ENB);
+ CSR_WRITE_4(sc, ALC_HQTD_CFG, reg);
+ reg = WRR_PRI_RESTRICT_NONE;
+ reg |= (WRR_PRI_DEFAULT << WRR_PRI0_SHIFT |
+ WRR_PRI_DEFAULT << WRR_PRI1_SHIFT |
+ WRR_PRI_DEFAULT << WRR_PRI2_SHIFT |
+ WRR_PRI_DEFAULT << WRR_PRI3_SHIFT);
+ CSR_WRITE_4(sc, ALC_WRR, reg);
+ } else {
+ /* Configure Rx free descriptor pre-fetching. */
+ CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
+ ((RX_RD_FREE_THRESH_HI_DEFAULT <<
+ RX_RD_FREE_THRESH_HI_SHIFT) & RX_RD_FREE_THRESH_HI_MASK) |
+ ((RX_RD_FREE_THRESH_LO_DEFAULT <<
+ RX_RD_FREE_THRESH_LO_SHIFT) & RX_RD_FREE_THRESH_LO_MASK));
+ }
/*
* Configure flow control parameters.
* XON : 80% of Rx FIFO
* XOFF : 30% of Rx FIFO
*/
- if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 ||
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
+ reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
+ reg &= SRAM_RX_FIFO_LEN_MASK;
+ reg *= 8;
+ if (reg > 8 * 1024)
+ reg -= RX_FIFO_PAUSE_816X_RSVD;
+ else
+ reg -= RX_BUF_SIZE_MAX;
+ reg /= 8;
+ CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
+ ((reg << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
+ RX_FIFO_PAUSE_THRESH_LO_MASK) |
+ (((RX_FIFO_PAUSE_816X_RSVD / 8) <<
+ RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
+ RX_FIFO_PAUSE_THRESH_HI_MASK));
+ } else if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132) {
reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
rxf_hi = (reg * 8) / 10;
@@ -3352,21 +4174,22 @@ alc_init_locked(struct alc_softc *sc)
RX_FIFO_PAUSE_THRESH_HI_MASK));
}
- if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
- sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2)
- CSR_WRITE_4(sc, ALC_SERDES_LOCK,
- CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN |
- SERDES_PHY_CLK_SLOWDOWN);
-
- /* Disable RSS until I understand L1C/L2C's RSS logic. */
- CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
- CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
+ /* Disable RSS until I understand L1C/L2C's RSS logic. */
+ CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
+ CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
+ }
/* Configure RxQ. */
reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
RXQ_CFG_RD_BURST_MASK;
reg |= RXQ_CFG_RSS_MODE_DIS;
- if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0)
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT <<
+ RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) &
+ RXQ_CFG_816X_IDT_TBL_SIZE_MASK;
+ if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 &&
+ sc->alc_ident->deviceid != DEVICEID_ATHEROS_AR8151_V2)
reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M;
CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
@@ -3387,6 +4210,19 @@ alc_init_locked(struct alc_softc *sc)
DMA_CFG_RD_DELAY_CNT_MASK;
reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
DMA_CFG_WR_DELAY_CNT_MASK;
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
+ switch (AR816X_REV(sc->alc_rev)) {
+ case AR816X_REV_A0:
+ case AR816X_REV_A1:
+ reg |= DMA_CFG_RD_CHNL_SEL_1;
+ break;
+ case AR816X_REV_B0:
+ /* FALLTHROUGH */
+ default:
+ reg |= DMA_CFG_RD_CHNL_SEL_3;
+ break;
+ }
+ }
CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
/*
@@ -3405,7 +4241,8 @@ alc_init_locked(struct alc_softc *sc)
reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
MAC_CFG_PREAMBLE_MASK);
- if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
+ sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2)
reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
@@ -3424,14 +4261,14 @@ alc_init_locked(struct alc_softc *sc)
CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
sc->alc_flags &= ~ALC_FLAG_LINK;
/* Switch to the current media. */
- mii_mediachg(mii);
+ alc_mediachange_locked(sc);
callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
-
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
static void
@@ -3456,7 +4293,6 @@ alc_stop(struct alc_softc *sc)
/* Disable interrupts. */
CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
- alc_stop_queue(sc);
/* Disable DMA. */
reg = CSR_READ_4(sc, ALC_DMA_CFG);
reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
@@ -3467,7 +4303,8 @@ alc_stop(struct alc_softc *sc)
alc_stop_mac(sc);
/* Disable interrupts which might be touched in taskq handler. */
CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
-
+ /* Disable L0s/L1s */
+ alc_aspm(sc, 0, IFM_UNKNOWN);
/* Reclaim Rx buffers that have been processed. */
if (sc->alc_cdata.alc_rxhead != NULL)
m_freem(sc->alc_cdata.alc_rxhead);
@@ -3505,8 +4342,7 @@ alc_stop_mac(struct alc_softc *sc)
uint32_t reg;
int i;
- ALC_LOCK_ASSERT(sc);
-
+ alc_stop_queue(sc);
/* Disable Rx/Tx MAC. */
reg = CSR_READ_4(sc, ALC_MAC_CFG);
if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
@@ -3515,7 +4351,7 @@ alc_stop_mac(struct alc_softc *sc)
}
for (i = ALC_TIMEOUT; i > 0; i--) {
reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
- if (reg == 0)
+ if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC)) == 0)
break;
DELAY(10);
}
@@ -3540,8 +4376,11 @@ alc_start_queue(struct alc_softc *sc)
/* Enable RxQ. */
cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
- cfg &= ~RXQ_CFG_ENB;
- cfg |= qcfg[1];
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
+ cfg &= ~RXQ_CFG_ENB;
+ cfg |= qcfg[1];
+ } else
+ cfg |= RXQ_CFG_QUEUE0_ENB;
CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
/* Enable TxQ. */
cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
@@ -3559,9 +4398,16 @@ alc_stop_queue(struct alc_softc *sc)
/* Disable RxQ. */
reg = CSR_READ_4(sc, ALC_RXQ_CFG);
- if ((reg & RXQ_CFG_ENB) != 0) {
- reg &= ~RXQ_CFG_ENB;
- CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
+ if ((reg & RXQ_CFG_ENB) != 0) {
+ reg &= ~RXQ_CFG_ENB;
+ CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
+ }
+ } else {
+ if ((reg & RXQ_CFG_QUEUE0_ENB) != 0) {
+ reg &= ~RXQ_CFG_QUEUE0_ENB;
+ CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
+ }
}
/* Disable TxQ. */
reg = CSR_READ_4(sc, ALC_TXQ_CFG);
@@ -3569,6 +4415,7 @@ alc_stop_queue(struct alc_softc *sc)
reg &= ~TXQ_CFG_ENB;
CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
}
+ DELAY(40);
for (i = ALC_TIMEOUT; i > 0; i--) {
reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
diff --git a/sys/dev/alc/if_alcreg.h b/sys/dev/alc/if_alcreg.h
index 3011abf..1ad75a3 100644
--- a/sys/dev/alc/if_alcreg.h
+++ b/sys/dev/alc/if_alcreg.h
@@ -44,10 +44,26 @@
#define DEVICEID_ATHEROS_AR8151_V2 0x1083 /* L1D V2.0 */
#define DEVICEID_ATHEROS_AR8152_B 0x2060 /* L2C V1.1 */
#define DEVICEID_ATHEROS_AR8152_B2 0x2062 /* L2C V2.0 */
+#define DEVICEID_ATHEROS_AR8161 0x1091
+#define DEVICEID_ATHEROS_E2200 0xE091
+#define DEVICEID_ATHEROS_AR8162 0x1090
+#define DEVICEID_ATHEROS_AR8171 0x10A1
+#define DEVICEID_ATHEROS_AR8172 0x10A0
#define ATHEROS_AR8152_B_V10 0xC0
#define ATHEROS_AR8152_B_V11 0xC1
+/*
+ * Atheros AR816x/AR817x revisions
+ */
+#define AR816X_REV_A0 0
+#define AR816X_REV_A1 1
+#define AR816X_REV_B0 2
+#define AR816X_REV_C0 3
+
+#define AR816X_REV_SHIFT 3
+#define AR816X_REV(x) ((x) >> AR816X_REV_SHIFT)
+
/* 0x0000 - 0x02FF : PCIe configuration space */
#define ALC_PEX_UNC_ERR_SEV 0x10C
@@ -63,11 +79,34 @@
#define PEX_UNC_ERR_SEV_ECRC 0x00080000
#define PEX_UNC_ERR_SEV_UR 0x00100000
+#define ALC_EEPROM_LD 0x204 /* AR816x */
+#define EEPROM_LD_START 0x00000001
+#define EEPROM_LD_IDLE 0x00000010
+#define EEPROM_LD_DONE 0x00000000
+#define EEPROM_LD_PROGRESS 0x00000020
+#define EEPROM_LD_EXIST 0x00000100
+#define EEPROM_LD_EEPROM_EXIST 0x00000200
+#define EEPROM_LD_FLASH_EXIST 0x00000400
+#define EEPROM_LD_FLASH_END_ADDR_MASK 0x03FF0000
+#define EEPROM_LD_FLASH_END_ADDR_SHIFT 16
+
#define ALC_TWSI_CFG 0x218
#define TWSI_CFG_SW_LD_START 0x00000800
#define TWSI_CFG_HW_LD_START 0x00001000
#define TWSI_CFG_LD_EXIST 0x00400000
+#define ALC_SLD 0x218 /* AR816x */
+#define SLD_START 0x00000800
+#define SLD_PROGRESS 0x00001000
+#define SLD_IDLE 0x00002000
+#define SLD_SLVADDR_MASK 0x007F0000
+#define SLD_EXIST 0x00800000
+#define SLD_FREQ_MASK 0x03000000
+#define SLD_FREQ_100K 0x00000000
+#define SLD_FREQ_200K 0x01000000
+#define SLD_FREQ_300K 0x02000000
+#define SLD_FREQ_400K 0x03000000
+
#define ALC_PCIE_PHYMISC 0x1000
#define PCIE_PHYMISC_FORCE_RCV_DET 0x00000004
@@ -77,6 +116,9 @@
#define PCIE_PHYMISC2_SERDES_CDR_SHIFT 16
#define PCIE_PHYMISC2_SERDES_TH_SHIFT 18
+#define ALC_PDLL_TRNS1 0x1104
+#define PDLL_TRNS1_D3PLLOFF_ENB 0x00000800
+
#define ALC_TWSI_DEBUG 0x1108
#define TWSI_DEBUG_DEV_EXIST 0x20000000
@@ -103,11 +145,14 @@
#define PM_CFG_SERDES_PD_EX_L1 0x00000040
#define PM_CFG_SERDES_BUDS_RX_L1_ENB 0x00000080
#define PM_CFG_L0S_ENTRY_TIMER_MASK 0x00000F00
+#define PM_CFG_RX_L1_AFTER_L0S 0x00000800
#define PM_CFG_ASPM_L0S_ENB 0x00001000
#define PM_CFG_CLK_SWH_L1 0x00002000
#define PM_CFG_CLK_PWM_VER1_1 0x00004000
#define PM_CFG_PCIE_RECV 0x00008000
#define PM_CFG_L1_ENTRY_TIMER_MASK 0x000F0000
+#define PM_CFG_L1_ENTRY_TIMER_816X_MASK 0x00070000
+#define PM_CFG_TX_L1_AFTER_L0S 0x00080000
#define PM_CFG_PM_REQ_TIMER_MASK 0x00F00000
#define PM_CFG_LCKDET_TIMER_MASK 0x0F000000
#define PM_CFG_EN_BUFS_RX_L0S 0x10000000
@@ -121,8 +166,10 @@
#define PM_CFG_L0S_ENTRY_TIMER_DEFAULT 6
#define PM_CFG_L1_ENTRY_TIMER_DEFAULT 1
+#define PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT 4
#define PM_CFG_LCKDET_TIMER_DEFAULT 12
#define PM_CFG_PM_REQ_TIMER_DEFAULT 12
+#define PM_CFG_PM_REQ_TIMER_816X_DEFAULT 15
#define ALC_LTSSM_ID_CFG 0x12FC
#define LTSSM_ID_WRO_ENB 0x00001000
@@ -131,6 +178,7 @@
#define MASTER_RESET 0x00000001
#define MASTER_TEST_MODE_MASK 0x0000000C
#define MASTER_BERT_START 0x00000010
+#define MASTER_WAKEN_25M 0x00000020
#define MASTER_OOB_DIS_OFF 0x00000040
#define MASTER_SA_TIMER_ENB 0x00000080
#define MASTER_MTIMER_ENB 0x00000100
@@ -171,7 +219,7 @@
*/
#define ALC_IM_TX_TIMER_DEFAULT 1000 /* 1ms */
-#define ALC_GPHY_CFG 0x140C /* 16bits */
+#define ALC_GPHY_CFG 0x140C /* 16 bits, 32 bits on AR816x */
#define GPHY_CFG_EXT_RESET 0x0001
#define GPHY_CFG_RTL_MODE 0x0002
#define GPHY_CFG_LED_MODE 0x0004
@@ -188,6 +236,7 @@
#define GPHY_CFG_PHY_PLL_ON 0x2000
#define GPHY_CFG_PWDOWN_HW 0x4000
#define GPHY_CFG_PHY_PLL_BYPASS 0x8000
+#define GPHY_CFG_100AB_ENB 0x00020000
#define ALC_IDLE_STATUS 0x1410
#define IDLE_STATUS_RXMAC 0x00000001
@@ -212,9 +261,10 @@
#define MDIO_CLK_25_10 0x04000000
#define MDIO_CLK_25_14 0x05000000
#define MDIO_CLK_25_20 0x06000000
-#define MDIO_CLK_25_28 0x07000000
+#define MDIO_CLK_25_128 0x07000000
#define MDIO_OP_BUSY 0x08000000
#define MDIO_AP_ENB 0x10000000
+#define MDIO_MODE_EXT 0x40000000
#define MDIO_DATA_SHIFT 0
#define MDIO_REG_ADDR_SHIFT 16
@@ -248,6 +298,23 @@
#define SERDES_MAC_CLK_SLOWDOWN 0x00020000
#define SERDES_PHY_CLK_SLOWDOWN 0x00040000
+#define ALC_LPI_CTL 0x1440
+#define LPI_CTL_ENB 0x00000001
+
+#define ALC_EXT_MDIO 0x1448
+#define EXT_MDIO_REG_MASK 0x0000FFFF
+#define EXT_MDIO_DEVADDR_MASK 0x001F0000
+#define EXT_MDIO_REG_SHIFT 0
+#define EXT_MDIO_DEVADDR_SHIFT 16
+
+#define EXT_MDIO_REG(x) \
+ (((x) << EXT_MDIO_REG_SHIFT) & EXT_MDIO_REG_MASK)
+#define EXT_MDIO_DEVADDR(x) \
+ (((x) << EXT_MDIO_DEVADDR_SHIFT) & EXT_MDIO_DEVADDR_MASK)
+
+#define ALC_IDLE_DECISN_TIMER 0x1474
+#define IDLE_DECISN_TIMER_DEFAULT_1MS 0x400
+
#define ALC_MAC_CFG 0x1480
#define MAC_CFG_TX_ENB 0x00000001
#define MAC_CFG_RX_ENB 0x00000002
@@ -278,6 +345,7 @@
#define MAC_CFG_SINGLE_PAUSE_ENB 0x10000000
#define MAC_CFG_HASH_ALG_CRC32 0x20000000
#define MAC_CFG_SPEED_MODE_SW 0x40000000
+#define MAC_CFG_FAST_PAUSE 0x80000000
#define MAC_CFG_PREAMBLE_SHIFT 10
#define MAC_CFG_PREAMBLE_DEFAULT 7
@@ -378,8 +446,12 @@
#define ALC_RSS_IDT_TABLE0 0x14E0
+#define ALC_TD_PRI2_HEAD_ADDR_LO 0x14E0 /* AR816x */
+
#define ALC_RSS_IDT_TABLE1 0x14E4
+#define ALC_TD_PRI3_HEAD_ADDR_LO 0x14E4 /* AR816x */
+
#define ALC_RSS_IDT_TABLE2 0x14E8
#define ALC_RSS_IDT_TABLE3 0x14EC
@@ -422,6 +494,8 @@
#define ALC_SRAM_RX_FIFO_ADDR 0x1520
#define ALC_SRAM_RX_FIFO_LEN 0x1524
+#define SRAM_RX_FIFO_LEN_MASK 0x00000FFF
+#define SRAM_RX_FIFO_LEN_SHIFT 0
#define ALC_SRAM_TX_FIFO_ADDR 0x1528
@@ -478,8 +552,12 @@
#define ALC_TDH_HEAD_ADDR_LO 0x157C
+#define ALC_TD_PRI1_HEAD_ADDR_LO 0x157C /* AR816x */
+
#define ALC_TDL_HEAD_ADDR_LO 0x1580
+#define ALC_TD_PRI0_HEAD_ADDR_LO 0x1580 /* AR816x */
+
#define ALC_TD_RING_CNT 0x1584
#define TD_RING_CNT_MASK 0x0000FFFF
#define TD_RING_CNT_SHIFT 0
@@ -499,6 +577,7 @@
#define ALC_TSO_OFFLOAD_THRESH 0x1594 /* 8 bytes unit */
#define TSO_OFFLOAD_THRESH_MASK 0x000007FF
+#define TSO_OFFLOAD_ERRLGPKT_DROP_ENB 0x00000800
#define TSO_OFFLOAD_THRESH_SHIFT 0
#define TSO_OFFLOAD_THRESH_UNIT 8
#define TSO_OFFLOAD_THRESH_UNIT_SHIFT 3
@@ -546,6 +625,17 @@
(RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | \
RXQ_CFG_QUEUE2_ENB | RXQ_CFG_QUEUE3_ENB)
+/* AR816x specific bits */
+#define RXQ_CFG_816X_RSS_HASH_IPV4 0x00000004
+#define RXQ_CFG_816X_RSS_HASH_IPV4_TCP 0x00000008
+#define RXQ_CFG_816X_RSS_HASH_IPV6 0x00000010
+#define RXQ_CFG_816X_RSS_HASH_IPV6_TCP 0x00000020
+#define RXQ_CFG_816X_RSS_HASH_MASK 0x0000003C
+#define RXQ_CFG_816X_IPV6_PARSE_ENB 0x00000080
+#define RXQ_CFG_816X_IDT_TBL_SIZE_MASK 0x0001FF00
+#define RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT 8
+#define RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT 0x100
+
#define ALC_RX_RD_FREE_THRESH 0x15A4 /* 8 bytes unit. */
#define RX_RD_FREE_THRESH_HI_MASK 0x0000003F
#define RX_RD_FREE_THRESH_LO_MASK 0x00000FC0
@@ -559,6 +649,12 @@
#define RX_FIFO_PAUSE_THRESH_HI_MASK 0x0FFF0000
#define RX_FIFO_PAUSE_THRESH_LO_SHIFT 0
#define RX_FIFO_PAUSE_THRESH_HI_SHIFT 16
+/*
+ * Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) +
+ * rx-packet(1522) + delay-of-link(64)
+ * = 3212.
+ */
+#define RX_FIFO_PAUSE_816X_RSVD 3212
#define ALC_RD_DMA_CFG 0x15AC
#define RD_DMA_CFG_THRESH_MASK 0x00000FFF /* 8 bytes unit */
@@ -582,6 +678,7 @@
#define DMA_CFG_OUT_ORDER 0x00000004
#define DMA_CFG_RCB_64 0x00000000
#define DMA_CFG_RCB_128 0x00000008
+#define DMA_CFG_PEND_AUTO_RST 0x00000008
#define DMA_CFG_RD_BURST_128 0x00000000
#define DMA_CFG_RD_BURST_256 0x00000010
#define DMA_CFG_RD_BURST_512 0x00000020
@@ -601,6 +698,14 @@
#define DMA_CFG_SMB_ENB 0x00200000
#define DMA_CFG_CMB_NOW 0x00400000
#define DMA_CFG_SMB_DIS 0x01000000
+#define DMA_CFG_RD_CHNL_SEL_MASK 0x0C000000
+#define DMA_CFG_RD_CHNL_SEL_1 0x00000000
+#define DMA_CFG_RD_CHNL_SEL_2 0x04000000
+#define DMA_CFG_RD_CHNL_SEL_3 0x08000000
+#define DMA_CFG_RD_CHNL_SEL_4 0x0C000000
+#define DMA_CFG_WSRAM_RDCTL 0x10000000
+#define DMA_CFG_RD_PEND_CLR 0x20000000
+#define DMA_CFG_WR_PEND_CLR 0x40000000
#define DMA_CFG_SMB_NOW 0x80000000
#define DMA_CFG_RD_BURST_MASK 0x07
#define DMA_CFG_RD_BURST_SHIFT 4
@@ -623,6 +728,12 @@
#define CMB_TX_TIMER_MASK 0x0000FFFF
#define CMB_TX_TIMER_SHIFT 0
+#define ALC_MSI_MAP_TBL1 0x15D0
+
+#define ALC_MSI_ID_MAP 0x15D4
+
+#define ALC_MSI_MAP_TBL2 0x15D8
+
#define ALC_MBOX_RD0_PROD_IDX 0x15E0
#define ALC_MBOX_RD1_PROD_IDX 0x15E4
@@ -640,12 +751,20 @@
#define MBOX_TD_PROD_HI_IDX_SHIFT 0
#define MBOX_TD_PROD_LO_IDX_SHIFT 16
+#define ALC_MBOX_TD_PRI1_PROD_IDX 0x15F0 /* 16 bits AR816x */
+
+#define ALC_MBOX_TD_PRI0_PROD_IDX 0x15F2 /* 16 bits AR816x */
+
#define ALC_MBOX_TD_CONS_IDX 0x15F4
#define MBOX_TD_CONS_HI_IDX_MASK 0x0000FFFF
#define MBOX_TD_CONS_LO_IDX_MASK 0xFFFF0000
#define MBOX_TD_CONS_HI_IDX_SHIFT 0
#define MBOX_TD_CONS_LO_IDX_SHIFT 16
+#define ALC_MBOX_TD_PRI1_CONS_IDX 0x15F4 /* 16 bits AR816x */
+
+#define ALC_MBOX_TD_PRI0_CONS_IDX 0x15F6 /* 16 bits AR816x */
+
#define ALC_MBOX_RD01_CONS_IDX 0x15F8
#define MBOX_RD0_CONS_IDX_MASK 0x0000FFFF
#define MBOX_RD1_CONS_IDX_MASK 0xFFFF0000
@@ -674,7 +793,7 @@
#define INTR_GPHY 0x00001000
#define INTR_GPHY_LOW_PW 0x00002000
#define INTR_TXQ_TO_RST 0x00004000
-#define INTR_TX_PKT 0x00008000
+#define INTR_TX_PKT0 0x00008000
#define INTR_RX_PKT0 0x00010000
#define INTR_RX_PKT1 0x00020000
#define INTR_RX_PKT2 0x00040000
@@ -688,6 +807,15 @@
#define INTR_PHY_LINK_DOWN 0x04000000
#define INTR_DIS_INT 0x80000000
+/* INTR status for AR816x/AR817x 4 TX queues, 8 RX queues */
+#define INTR_TX_PKT1 0x00000020
+#define INTR_TX_PKT2 0x00000040
+#define INTR_TX_PKT3 0x00000080
+#define INTR_RX_PKT4 0x08000000
+#define INTR_RX_PKT5 0x10000000
+#define INTR_RX_PKT6 0x20000000
+#define INTR_RX_PKT7 0x40000000
+
/* Interrupt Mask Register */
#define ALC_INTR_MASK 0x1604
@@ -699,6 +827,7 @@
(INTR_RD0_UNDERRUN | INTR_RD1_UNDERRUN | \
INTR_RD2_UNDERRUN | INTR_RD3_UNDERRUN)
#else
+#define INTR_TX_PKT INTR_TX_PKT0
#define INTR_RX_PKT INTR_RX_PKT0
#define INTR_RD_UNDERRUN INTR_RD0_UNDERRUN
#endif
@@ -720,11 +849,54 @@
#define HDS_CFG_BACKFILLSIZE_SHIFT 8
#define HDS_CFG_MAX_HDRSIZE_SHIFT 20
+#define ALC_MBOX_TD_PRI3_PROD_IDX 0x1618 /* 16 bits AR816x */
+
+#define ALC_MBOX_TD_PRI2_PROD_IDX 0x161A /* 16 bits AR816x */
+
+#define ALC_MBOX_TD_PRI3_CONS_IDX 0x161C /* 16 bits AR816x */
+
+#define ALC_MBOX_TD_PRI2_CONS_IDX 0x161E /* 16 bits AR816x */
+
/* AR813x/AR815x registers for MAC statistics */
#define ALC_RX_MIB_BASE 0x1700
#define ALC_TX_MIB_BASE 0x1760
+#define ALC_DRV 0x1804 /* AR816x */
+#define DRV_ASPM_SPD10LMT_1M 0x00000000
+#define DRV_ASPM_SPD10LMT_10M 0x00000001
+#define DRV_ASPM_SPD10LMT_100M 0x00000002
+#define DRV_ASPM_SPD10LMT_NO 0x00000003
+#define DRV_ASPM_SPD10LMT_MASK 0x00000003
+#define DRV_ASPM_SPD100LMT_1M 0x00000000
+#define DRV_ASPM_SPD100LMT_10M 0x00000004
+#define DRV_ASPM_SPD100LMT_100M 0x00000008
+#define DRV_ASPM_SPD100LMT_NO 0x0000000C
+#define DRV_ASPM_SPD100LMT_MASK 0x0000000C
+#define DRV_ASPM_SPD1000LMT_100M 0x00000000
+#define DRV_ASPM_SPD1000LMT_NO 0x00000010
+#define DRV_ASPM_SPD1000LMT_1M 0x00000020
+#define DRV_ASPM_SPD1000LMT_10M 0x00000030
+#define DRV_ASPM_SPD1000LMT_MASK 0x00000000
+#define DRV_WOLCAP_BIOS_EN 0x00000100
+#define DRV_WOLMAGIC_EN 0x00000200
+#define DRV_WOLLINKUP_EN 0x00000400
+#define DRV_WOLPATTERN_EN 0x00000800
+#define DRV_AZ_EN 0x00001000
+#define DRV_WOLS5_BIOS_EN 0x00010000
+#define DRV_WOLS5_EN 0x00020000
+#define DRV_DISABLE 0x00040000
+#define DRV_PHY_MASK 0x1FE00000
+#define DRV_PHY_EEE 0x00200000
+#define DRV_PHY_APAUSE 0x00400000
+#define DRV_PHY_PAUSE 0x00800000
+#define DRV_PHY_DUPLEX 0x01000000
+#define DRV_PHY_10 0x02000000
+#define DRV_PHY_100 0x04000000
+#define DRV_PHY_1000 0x08000000
+#define DRV_PHY_AUTO 0x10000000
+#define DRV_PHY_SHIFT 21
+
#define ALC_CLK_GATING_CFG 0x1814
#define CLK_GATING_DMAW_ENB 0x0001
#define CLK_GATING_DMAR_ENB 0x0002
@@ -737,6 +909,52 @@
#define ALC_DEBUG_DATA1 0x1904
+#define ALC_MSI_RETRANS_TIMER 0x1920
+#define MSI_RETRANS_TIMER_MASK 0x0000FFFF
+#define MSI_RETRANS_MASK_SEL_STD 0x00000000
+#define MSI_RETRANS_MASK_SEL_LINE 0x00010000
+#define MSI_RETRANS_TIMER_SHIFT 0
+
+#define ALC_WRR 0x1938
+#define WRR_PRI0_MASK 0x0000001F
+#define WRR_PRI1_MASK 0x00001F00
+#define WRR_PRI2_MASK 0x001F0000
+#define WRR_PRI3_MASK 0x1F000000
+#define WRR_PRI_RESTRICT_MASK 0x60000000
+#define WRR_PRI_RESTRICT_ALL 0x00000000
+#define WRR_PRI_RESTRICT_HI 0x20000000
+#define WRR_PRI_RESTRICT_HI2 0x40000000
+#define WRR_PRI_RESTRICT_NONE 0x60000000
+#define WRR_PRI0_SHIFT 0
+#define WRR_PRI1_SHIFT 8
+#define WRR_PRI2_SHIFT 16
+#define WRR_PRI3_SHIFT 24
+#define WRR_PRI_DEFAULT 4
+#define WRR_PRI_RESTRICT_SHIFT 29
+
+#define ALC_HQTD_CFG 0x193C
+#define HQTD_CFG_Q1_BURST_MASK 0x0000000F
+#define HQTD_CFG_Q2_BURST_MASK 0x000000F0
+#define HQTD_CFG_Q3_BURST_MASK 0x00000F00
+#define HQTD_CFG_BURST_ENB 0x80000000
+#define HQTD_CFG_Q1_BURST_SHIFT 0
+#define HQTD_CFG_Q2_BURST_SHIFT 4
+#define HQTD_CFG_Q3_BURST_SHIFT 8
+
+#define ALC_MISC 0x19C0
+#define MISC_INTNLOSC_OPEN 0x00000008
+#define MISC_ISO_ENB 0x00001000
+#define MISC_PSW_OCP_MASK 0x00E00000
+#define MISC_PSW_OCP_SHIFT 21
+#define MISC_PSW_OCP_DEFAULT 7
+
+#define ALC_MISC2 0x19C8
+#define MISC2_CALB_START 0x00000001
+
+#define ALC_MISC3 0x19CC
+#define MISC3_25M_NOTO_INTNL 0x00000001
+#define MISC3_25M_BY_SW 0x00000002
+
#define ALC_MII_DBG_ADDR 0x1D
#define ALC_MII_DBG_DATA 0x1E
@@ -756,6 +974,9 @@
#define ANA_SEL_CLK125M_DSP 0x8000
#define ANA_MANUL_SWICH_ON_SHIFT 1
+#define MII_DBG_ANACTL 0x00
+#define DBG_ANACTL_DEFAULT 0x02EF
+
#define MII_ANA_CFG4 0x04
#define ANA_IECHO_ADJ_MASK 0x0F
#define ANA_IECHO_ADJ_3_MASK 0x000F
@@ -767,6 +988,9 @@
#define ANA_IECHO_ADJ_1_SHIFT 8
#define ANA_IECHO_ADJ_0_SHIFT 12
+#define MII_DBG_SYSMODCTL 0x04
+#define DBG_SYSMODCTL_DEFAULT 0xBB8B
+
#define MII_ANA_CFG5 0x05
#define ANA_SERDES_CDR_BW_MASK 0x0003
#define ANA_MS_PAD_DBG 0x0004
@@ -783,9 +1007,17 @@
#define ANA_SERDES_CDR_BW_SHIFT 0
#define ANA_SERDES_TH_LOS_SHIFT 4
+#define MII_DBG_SRDSYSMOD 0x05
+#define DBG_SRDSYSMOD_DEFAULT 0x2C46
+
#define MII_ANA_CFG11 0x0B
#define ANA_PS_HIB_EN 0x8000
+#define MII_DBG_HIBNEG 0x0B
+#define DBG_HIBNEG_HIB_PULSE 0x1000
+#define DBG_HIBNEG_PSHIB_EN 0x8000
+#define DBG_HIBNEG_DEFAULT 0xBC40
+
#define MII_ANA_CFG18 0x12
#define ANA_TEST_MODE_10BT_01MASK 0x0003
#define ANA_LOOP_SEL_10BT 0x0004
@@ -800,9 +1032,36 @@
#define ANA_TRIGGER_SEL_TIMER_SHIFT 12
#define ANA_INTERVAL_SEL_TIMER_SHIFT 14
+#define MII_DBG_TST10BTCFG 0x12
+#define DBG_TST10BTCFG_DEFAULT 0x4C04
+
+#define MII_DBG_AZ_ANADECT 0x15
+#define DBG_AZ_ANADECT_DEFAULT 0x3220
+#define DBG_AZ_ANADECT_LONG 0x3210
+
+#define MII_DBG_MSE16DB 0x18
+#define DBG_MSE16DB_UP 0x05EA
+#define DBG_MSE16DB_DOWN 0x02EA
+
+#define MII_DBG_MSE20DB 0x1C
+#define DBG_MSE20DB_TH_MASK 0x01FC
+#define DBG_MSE20DB_TH_DEFAULT 0x2E
+#define DBG_MSE20DB_TH_HI 0x54
+#define DBG_MSE20DB_TH_SHIFT 2
+
+#define MII_DBG_AGC 0x23
+#define DBG_AGC_2_VGA_MASK 0x3F00
+#define DBG_AGC_2_VGA_SHIFT 8
+#define DBG_AGC_LONG1G_LIMT 40
+#define DBG_AGC_LONG100M_LIMT 44
+
#define MII_ANA_CFG41 0x29
#define ANA_TOP_PS_EN 0x8000
+#define MII_DBG_LEGCYPS 0x29
+#define DBG_LEGCYPS_ENB 0x8000
+#define DBG_LEGCYPS_DEFAULT 0x129D
+
#define MII_ANA_CFG54 0x36
#define ANA_LONG_CABLE_TH_100_MASK 0x003F
#define ANA_DESERVED 0x0040
@@ -813,6 +1072,51 @@
#define ANA_LONG_CABLE_TH_100_SHIFT 0
#define ANA_SHORT_CABLE_TH_100_SHIFT 8
+#define MII_DBG_TST100BTCFG 0x36
+#define DBG_TST100BTCFG_DEFAULT 0xE12C
+
+#define MII_DBG_GREENCFG 0x3B
+#define DBG_GREENCFG_DEFAULT 0x7078
+
+#define MII_DBG_GREENCFG2 0x3D
+#define DBG_GREENCFG2_GATE_DFSE_EN 0x0080
+#define DBG_GREENCFG2_BP_GREEN 0x8000
+
+/* Device addr 3 */
+#define MII_EXT_PCS 3
+
+#define MII_EXT_CLDCTL3 0x8003
+#define EXT_CLDCTL3_BP_CABLE1TH_DET_GT 0x8000
+
+#define MII_EXT_CLDCTL5 0x8005
+#define EXT_CLDCTL5_BP_VD_HLFBIAS 0x4000
+
+#define MII_EXT_CLDCTL6 0x8006
+#define EXT_CLDCTL6_CAB_LEN_MASK 0x00FF
+#define EXT_CLDCTL6_CAB_LEN_SHIFT 0
+#define EXT_CLDCTL6_CAB_LEN_SHORT1G 116
+#define EXT_CLDCTL6_CAB_LEN_SHORT100M 152
+
+#define MII_EXT_VDRVBIAS 0x8062
+#define EXT_VDRVBIAS_DEFAULT 3
+
+/* Device addr 7 */
+#define MII_EXT_ANEG 7
+
+#define MII_EXT_ANEG_LOCAL_EEEADV 0x3C
+#define ANEG_LOCA_EEEADV_100BT 0x0002
+#define ANEG_LOCA_EEEADV_1000BT 0x0004
+
+#define MII_EXT_ANEG_AFE 0x801A
+#define ANEG_AFEE_10BT_100M_TH 0x0040
+
+#define MII_EXT_ANEG_S3DIG10 0x8023
+#define ANEG_S3DIG10_SL 0x0001
+#define ANEG_S3DIG10_DEFAULT 0
+
+#define MII_EXT_ANEG_NLP78 0x8027
+#define ANEG_NLP78_120M_DEFAULT 0x8A05
+
/* Statistics counters collected by the MAC. */
struct smb {
/* Rx stats. */
@@ -860,7 +1164,6 @@ struct smb {
uint32_t tx_multi_colls;
uint32_t tx_late_colls;
uint32_t tx_excess_colls;
- uint32_t tx_abort;
uint32_t tx_underrun;
uint32_t tx_desc_underrun;
uint32_t tx_lenerrs;
diff --git a/sys/dev/alc/if_alcvar.h b/sys/dev/alc/if_alcvar.h
index f2d806f..9a73ef4 100644
--- a/sys/dev/alc/if_alcvar.h
+++ b/sys/dev/alc/if_alcvar.h
@@ -52,6 +52,10 @@
/* Water mark to kick reclaiming Tx buffers. */
#define ALC_TX_DESC_HIWAT ((ALC_TX_RING_CNT * 6) / 10)
+/*
+ * AR816x controllers support up to 16 messages but this driver
+ * uses single message.
+ */
#define ALC_MSI_MESSAGES 1
#define ALC_MSIX_MESSAGES 1
@@ -224,12 +228,13 @@ struct alc_softc {
#define ALC_FLAG_PM 0x0010
#define ALC_FLAG_FASTETHER 0x0020
#define ALC_FLAG_JUMBO 0x0040
-#define ALC_FLAG_ASPM_MON 0x0080
#define ALC_FLAG_CMB_BUG 0x0100
#define ALC_FLAG_SMB_BUG 0x0200
#define ALC_FLAG_L0S 0x0400
#define ALC_FLAG_L1S 0x0800
#define ALC_FLAG_APS 0x1000
+#define ALC_FLAG_AR816X_FAMILY 0x2000
+#define ALC_FLAG_LINK_WAR 0x4000
#define ALC_FLAG_LINK 0x8000
struct callout alc_tick_ch;
diff --git a/sys/dev/ale/if_ale.c b/sys/dev/ale/if_ale.c
index 78dfa0e..3a728f4 100644
--- a/sys/dev/ale/if_ale.c
+++ b/sys/dev/ale/if_ale.c
@@ -946,8 +946,6 @@ ale_sysctl_node(struct ale_softc *sc)
&stats->tx_late_colls, "Late collisions");
ALE_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
&stats->tx_excess_colls, "Excessive collisions");
- ALE_SYSCTL_STAT_ADD32(ctx, child, "abort",
- &stats->tx_abort, "Aborted frames due to Excessive collisions");
ALE_SYSCTL_STAT_ADD32(ctx, child, "underruns",
&stats->tx_underrun, "FIFO underruns");
ALE_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns",
@@ -2197,7 +2195,6 @@ ale_stats_update(struct ale_softc *sc)
stat->tx_multi_colls += smb->tx_multi_colls;
stat->tx_late_colls += smb->tx_late_colls;
stat->tx_excess_colls += smb->tx_excess_colls;
- stat->tx_abort += smb->tx_abort;
stat->tx_underrun += smb->tx_underrun;
stat->tx_desc_underrun += smb->tx_desc_underrun;
stat->tx_lenerrs += smb->tx_lenerrs;
@@ -2210,17 +2207,10 @@ ale_stats_update(struct ale_softc *sc)
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, smb->tx_single_colls +
smb->tx_multi_colls * 2 + smb->tx_late_colls +
- smb->tx_abort * HDPX_CFG_RETRY_DEFAULT);
+ smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT);
- /*
- * XXX
- * tx_pkts_truncated counter looks suspicious. It constantly
- * increments with no sign of Tx errors. This may indicate
- * the counter name is not correct one so I've removed the
- * counter in output errors.
- */
- if_inc_counter(ifp, IFCOUNTER_OERRORS,
- smb->tx_abort + smb->tx_late_colls + smb->tx_underrun);
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, smb->tx_late_colls +
+ smb->tx_excess_colls + smb->tx_underrun + smb->tx_pkts_truncated);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, smb->rx_frames);
diff --git a/sys/dev/ale/if_alereg.h b/sys/dev/ale/if_alereg.h
index 445af99..5804c53 100644
--- a/sys/dev/ale/if_alereg.h
+++ b/sys/dev/ale/if_alereg.h
@@ -605,7 +605,6 @@ struct smb {
uint32_t tx_multi_colls;
uint32_t tx_late_colls;
uint32_t tx_excess_colls;
- uint32_t tx_abort;
uint32_t tx_underrun;
uint32_t tx_desc_underrun;
uint32_t tx_lenerrs;
diff --git a/sys/dev/cxgbe/common/t4_msg.h b/sys/dev/cxgbe/common/t4_msg.h
index ef8fbe6..c30b6f1f 100644
--- a/sys/dev/cxgbe/common/t4_msg.h
+++ b/sys/dev/cxgbe/common/t4_msg.h
@@ -273,6 +273,7 @@ union opcode_tid {
/* extract the TID from a CPL command */
#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+#define GET_OPCODE(cmd) ((cmd)->ot.opcode)
/* partitioning of TID fields that also carry a queue id */
#define S_TID_TID 0
diff --git a/sys/dev/cxgbe/tom/t4_connect.c b/sys/dev/cxgbe/tom/t4_connect.c
index 9973fa5..425c563 100644
--- a/sys/dev/cxgbe/tom/t4_connect.c
+++ b/sys/dev/cxgbe/tom/t4_connect.c
@@ -115,8 +115,8 @@ do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
{
struct adapter *sc = iq->adapter;
const struct cpl_act_establish *cpl = (const void *)(rss + 1);
- unsigned int tid = GET_TID(cpl);
- unsigned int atid = G_TID_TID(ntohl(cpl->tos_atid));
+ u_int tid = GET_TID(cpl);
+ u_int atid = G_TID_TID(ntohl(cpl->tos_atid));
struct toepcb *toep = lookup_atid(sc, atid);
struct inpcb *inp = toep->inp;
@@ -178,17 +178,34 @@ act_open_rpl_status_to_errno(int status)
}
}
+void
+act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status)
+{
+ struct toepcb *toep = lookup_atid(sc, atid);
+ struct inpcb *inp = toep->inp;
+ struct toedev *tod = &toep->td->tod;
+
+ free_atid(sc, atid);
+ toep->tid = -1;
+
+ if (status != EAGAIN)
+ INP_INFO_WLOCK(&V_tcbinfo);
+ INP_WLOCK(inp);
+ toe_connect_failed(tod, inp, status);
+ final_cpl_received(toep); /* unlocks inp */
+ if (status != EAGAIN)
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+}
+
static int
do_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
struct mbuf *m)
{
struct adapter *sc = iq->adapter;
const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
- unsigned int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
- unsigned int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
+ u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
+ u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
struct toepcb *toep = lookup_atid(sc, atid);
- struct inpcb *inp = toep->inp;
- struct toedev *tod = &toep->td->tod;
int rc;
KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
@@ -200,20 +217,11 @@ do_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
if (negative_advice(status))
return (0);
- free_atid(sc, atid);
- toep->tid = -1;
-
if (status && act_open_has_tid(status))
release_tid(sc, GET_TID(cpl), toep->ctrlq);
rc = act_open_rpl_status_to_errno(status);
- if (rc != EAGAIN)
- INP_INFO_WLOCK(&V_tcbinfo);
- INP_WLOCK(inp);
- toe_connect_failed(tod, inp, rc);
- final_cpl_received(toep); /* unlocks inp */
- if (rc != EAGAIN)
- INP_INFO_WUNLOCK(&V_tcbinfo);
+ act_open_failure_cleanup(sc, atid, rc);
return (0);
}
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index 109543a6..71ea1df 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -98,6 +98,7 @@ static void t4_clip_task(void *, int);
static void update_clip_table(struct adapter *, struct tom_data *);
static void destroy_clip_table(struct adapter *, struct tom_data *);
static void free_tom_data(struct adapter *, struct tom_data *);
+static void reclaim_wr_resources(void *, int);
static int in6_ifaddr_gen;
static eventhandler_tag ifaddr_evhandler;
@@ -903,6 +904,8 @@ free_tom_data(struct adapter *sc, struct tom_data *td)
if (td->listen_mask != 0)
hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask);
+ if (mtx_initialized(&td->unsent_wr_lock))
+ mtx_destroy(&td->unsent_wr_lock);
if (mtx_initialized(&td->lctx_hash_lock))
mtx_destroy(&td->lctx_hash_lock);
if (mtx_initialized(&td->toep_list_lock))
@@ -912,6 +915,44 @@ free_tom_data(struct adapter *sc, struct tom_data *td)
free(td, M_CXGBE);
}
+static void
+reclaim_wr_resources(void *arg, int count)
+{
+ struct tom_data *td = arg;
+ STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list);
+ struct cpl_act_open_req *cpl;
+ u_int opcode, atid;
+ struct wrqe *wr;
+ struct adapter *sc;
+
+ mtx_lock(&td->unsent_wr_lock);
+ STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe);
+ mtx_unlock(&td->unsent_wr_lock);
+
+ while ((wr = STAILQ_FIRST(&twr_list)) != NULL) {
+ STAILQ_REMOVE_HEAD(&twr_list, link);
+
+ cpl = wrtod(wr);
+ opcode = GET_OPCODE(cpl);
+
+ switch (opcode) {
+ case CPL_ACT_OPEN_REQ:
+ case CPL_ACT_OPEN_REQ6:
+ atid = G_TID_TID(be32toh(OPCODE_TID(cpl)));
+ sc = td_adapter(td);
+
+ CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid);
+ act_open_failure_cleanup(sc, atid, EHOSTUNREACH);
+ free(wr, M_CXGBE);
+ break;
+ default:
+ log(LOG_ERR, "%s: leaked work request %p, wr_len %d, "
+ "opcode %x\n", __func__, wr, wr->wr_len, opcode);
+ /* WR not freed here; go look at it with a debugger. */
+ }
+ }
+}
+
/*
* Ground control to Major TOM
* Commencing countdown, engines on
@@ -939,6 +980,11 @@ t4_tom_activate(struct adapter *sc)
td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE,
&td->listen_mask, HASH_NOWAIT);
+ /* List of WRs for which L2 resolution failed */
+ mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF);
+ STAILQ_INIT(&td->unsent_wr_list);
+ TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td);
+
/* TID tables */
rc = alloc_tid_tabs(&sc->tids);
if (rc != 0)
@@ -1012,6 +1058,12 @@ t4_tom_deactivate(struct adapter *sc)
rc = EBUSY;
mtx_unlock(&td->lctx_hash_lock);
+ taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources);
+ mtx_lock(&td->unsent_wr_lock);
+ if (!STAILQ_EMPTY(&td->unsent_wr_list))
+ rc = EBUSY;
+ mtx_unlock(&td->unsent_wr_lock);
+
if (rc == 0) {
unregister_toedev(sc->tom_softc);
free_tom_data(sc, td);
diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h
index 1d883fe..5b68074 100644
--- a/sys/dev/cxgbe/tom/t4_tom.h
+++ b/sys/dev/cxgbe/tom/t4_tom.h
@@ -210,6 +210,11 @@ struct tom_data {
struct mtx clip_table_lock;
struct clip_head clip_table;
int clip_gen;
+
+ /* WRs that will not be sent to the chip because L2 resolution failed */
+ struct mtx unsent_wr_lock;
+ STAILQ_HEAD(, wrqe) unsent_wr_list;
+ struct task reclaim_wr_resources;
};
static inline struct tom_data *
@@ -252,6 +257,7 @@ void release_lip(struct tom_data *, struct clip_entry *);
void t4_init_connect_cpl_handlers(struct adapter *);
int t4_connect(struct toedev *, struct socket *, struct rtentry *,
struct sockaddr *);
+void act_open_failure_cleanup(struct adapter *, u_int, u_int);
/* t4_listen.c */
void t4_init_listen_cpl_handlers(struct adapter *);
diff --git a/sys/dev/cxgbe/tom/t4_tom_l2t.c b/sys/dev/cxgbe/tom/t4_tom_l2t.c
index 7a75394..65f7d23 100644
--- a/sys/dev/cxgbe/tom/t4_tom_l2t.c
+++ b/sys/dev/cxgbe/tom/t4_tom_l2t.c
@@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <sys/rwlock.h>
#include <sys/socket.h>
#include <sys/sbuf.h>
+#include <sys/taskqueue.h>
#include <net/if.h>
#include <net/if_types.h>
#include <net/ethernet.h>
@@ -161,25 +162,17 @@ send_pending(struct adapter *sc, struct l2t_entry *e)
}
static void
-resolution_failed_for_wr(struct wrqe *wr)
+resolution_failed(struct adapter *sc, struct l2t_entry *e)
{
- log(LOG_ERR, "%s: leaked work request %p, wr_len %d\n", __func__, wr,
- wr->wr_len);
-
- /* free(wr, M_CXGBE); */
-}
-
-static void
-resolution_failed(struct l2t_entry *e)
-{
- struct wrqe *wr;
+ struct tom_data *td = sc->tom_softc;
mtx_assert(&e->lock, MA_OWNED);
- while ((wr = STAILQ_FIRST(&e->wr_list)) != NULL) {
- STAILQ_REMOVE_HEAD(&e->wr_list, link);
- resolution_failed_for_wr(wr);
- }
+ mtx_lock(&td->unsent_wr_lock);
+ STAILQ_CONCAT(&td->unsent_wr_list, &e->wr_list);
+ mtx_unlock(&td->unsent_wr_lock);
+
+ taskqueue_enqueue(taskqueue_thread, &td->reclaim_wr_resources);
}
static void
@@ -203,7 +196,7 @@ update_entry(struct adapter *sc, struct l2t_entry *e, uint8_t *lladdr,
* need to wlock the table).
*/
e->state = L2T_STATE_FAILED;
- resolution_failed(e);
+ resolution_failed(sc, e);
return;
} else if (lladdr == NULL) {
@@ -305,12 +298,11 @@ again:
if (e->state == L2T_STATE_VALID && !STAILQ_EMPTY(&e->wr_list))
send_pending(sc, e);
if (e->state == L2T_STATE_FAILED)
- resolution_failed(e);
+ resolution_failed(sc, e);
mtx_unlock(&e->lock);
break;
case L2T_STATE_FAILED:
- resolution_failed_for_wr(wr);
return (EHOSTUNREACH);
}
diff --git a/sys/dev/drm2/i915/intel_ringbuffer.c b/sys/dev/drm2/i915/intel_ringbuffer.c
index 26bc695..89a5c94 100644
--- a/sys/dev/drm2/i915/intel_ringbuffer.c
+++ b/sys/dev/drm2/i915/intel_ringbuffer.c
@@ -366,7 +366,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
goto err_unpin;
pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1);
pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page,
- (vm_offset_t)pc->cpu_page + PAGE_SIZE);
+ (vm_offset_t)pc->cpu_page + PAGE_SIZE, FALSE);
pc->obj = obj;
ring->private = pc;
@@ -1014,7 +1014,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0],
1);
pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr,
- (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
+ (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE, FALSE);
ring->status_page.obj = obj;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
diff --git a/sys/dev/hwpmc/hwpmc_core.c b/sys/dev/hwpmc/hwpmc_core.c
index 792f303..a2bcf73 100644
--- a/sys/dev/hwpmc/hwpmc_core.c
+++ b/sys/dev/hwpmc/hwpmc_core.c
@@ -1796,7 +1796,7 @@ iap_is_event_architectural(enum pmc_event pe, enum pmc_event *map)
switch (pe) {
case PMC_EV_IAP_ARCH_UNH_COR_CYC:
ae = CORE_AE_UNHALTED_CORE_CYCLES;
- *map = PMC_EV_IAP_EVENT_C4H_00H;
+ *map = PMC_EV_IAP_EVENT_3CH_00H;
break;
case PMC_EV_IAP_ARCH_INS_RET:
ae = CORE_AE_INSTRUCTION_RETIRED;
diff --git a/sys/dev/iscsi/icl.c b/sys/dev/iscsi/icl.c
index af59407..5a6436a 100644
--- a/sys/dev/iscsi/icl.c
+++ b/sys/dev/iscsi/icl.c
@@ -771,6 +771,7 @@ icl_receive_thread(void *arg)
ICL_CONN_LOCK(ic);
ic->ic_receive_running = false;
+ cv_signal(&ic->ic_send_cv);
ICL_CONN_UNLOCK(ic);
kthread_exit();
}
@@ -872,8 +873,6 @@ icl_conn_send_pdus(struct icl_conn *ic, struct icl_pdu_stailq *queue)
SOCKBUF_UNLOCK(&so->so_snd);
while (!STAILQ_EMPTY(queue)) {
- if (ic->ic_disconnecting)
- return;
request = STAILQ_FIRST(queue);
size = icl_pdu_size(request);
if (available < size) {
@@ -970,11 +969,6 @@ icl_send_thread(void *arg)
ic->ic_send_running = true;
for (;;) {
- if (ic->ic_disconnecting) {
- //ICL_DEBUG("terminating");
- break;
- }
-
for (;;) {
/*
* If the local queue is empty, populate it from
@@ -1013,6 +1007,11 @@ icl_send_thread(void *arg)
break;
}
+ if (ic->ic_disconnecting) {
+ //ICL_DEBUG("terminating");
+ break;
+ }
+
cv_wait(&ic->ic_send_cv, ic->ic_lock);
}
@@ -1023,6 +1022,7 @@ icl_send_thread(void *arg)
STAILQ_CONCAT(&ic->ic_to_send, &queue);
ic->ic_send_running = false;
+ cv_signal(&ic->ic_send_cv);
ICL_CONN_UNLOCK(ic);
kthread_exit();
}
@@ -1296,21 +1296,6 @@ icl_conn_handoff(struct icl_conn *ic, int fd)
}
void
-icl_conn_shutdown(struct icl_conn *ic)
-{
- ICL_CONN_LOCK_ASSERT_NOT(ic);
-
- ICL_CONN_LOCK(ic);
- if (ic->ic_socket == NULL) {
- ICL_CONN_UNLOCK(ic);
- return;
- }
- ICL_CONN_UNLOCK(ic);
-
- soshutdown(ic->ic_socket, SHUT_RDWR);
-}
-
-void
icl_conn_close(struct icl_conn *ic)
{
struct icl_pdu *pdu;
@@ -1342,15 +1327,11 @@ icl_conn_close(struct icl_conn *ic)
/*
* Wake up the threads, so they can properly terminate.
*/
- cv_signal(&ic->ic_receive_cv);
- cv_signal(&ic->ic_send_cv);
while (ic->ic_receive_running || ic->ic_send_running) {
//ICL_DEBUG("waiting for send/receive threads to terminate");
- ICL_CONN_UNLOCK(ic);
cv_signal(&ic->ic_receive_cv);
cv_signal(&ic->ic_send_cv);
- pause("icl_close", 1 * hz);
- ICL_CONN_LOCK(ic);
+ cv_wait(&ic->ic_send_cv, ic->ic_lock);
}
//ICL_DEBUG("send/receive threads terminated");
diff --git a/sys/dev/iscsi/icl.h b/sys/dev/iscsi/icl.h
index 5f03434..ca5ee8f 100644
--- a/sys/dev/iscsi/icl.h
+++ b/sys/dev/iscsi/icl.h
@@ -107,7 +107,6 @@ struct icl_conn {
struct icl_conn *icl_conn_new(const char *name, struct mtx *lock);
void icl_conn_free(struct icl_conn *ic);
int icl_conn_handoff(struct icl_conn *ic, int fd);
-void icl_conn_shutdown(struct icl_conn *ic);
void icl_conn_close(struct icl_conn *ic);
bool icl_conn_connected(struct icl_conn *ic);
diff --git a/sys/dev/iscsi/iscsi.c b/sys/dev/iscsi/iscsi.c
index e11b2b6..1576f7d 100644
--- a/sys/dev/iscsi/iscsi.c
+++ b/sys/dev/iscsi/iscsi.c
@@ -367,7 +367,6 @@ static void
iscsi_maintenance_thread_reconnect(struct iscsi_session *is)
{
- icl_conn_shutdown(is->is_conn);
icl_conn_close(is->is_conn);
ISCSI_SESSION_LOCK(is);
diff --git a/sys/dev/mmc/host/dwmmc.c b/sys/dev/mmc/host/dwmmc.c
new file mode 100644
index 0000000..79fe69b
--- /dev/null
+++ b/sys/dev/mmc/host/dwmmc.c
@@ -0,0 +1,1101 @@
+/*-
+ * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Synopsys DesignWare Mobile Storage Host Controller
+ * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/rman.h>
+#include <sys/timeet.h>
+#include <sys/timetc.h>
+
+#include <dev/mmc/bridge.h>
+#include <dev/mmc/mmcreg.h>
+#include <dev/mmc/mmcbrvar.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <machine/bus.h>
+#include <machine/fdt.h>
+#include <machine/cpu.h>
+#include <machine/intr.h>
+
+#include <dev/mmc/host/dwmmc.h>
+
+#include "mmcbr_if.h"
+
+#define dprintf(x, arg...)
+
+#define READ4(_sc, _reg) \
+ bus_read_4((_sc)->res[0], _reg)
+#define WRITE4(_sc, _reg, _val) \
+ bus_write_4((_sc)->res[0], _reg, _val)
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
+#define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
+#define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
+#define DWMMC_LOCK_INIT(_sc) \
+ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
+ "dwmmc", MTX_DEF)
+#define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
+#define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
+#define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
+
+#define PENDING_CMD 0x01
+#define PENDING_STOP 0x02
+#define CARD_INIT_DONE 0x04
+
+#define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
+ |SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \
+ |SDMMC_INTMASK_EBE)
+#define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
+ |SDMMC_INTMASK_RE)
+#define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
+ |SDMMC_INTMASK_HLE)
+
+#define DES0_DIC (1 << 1)
+#define DES0_LD (1 << 2)
+#define DES0_FS (1 << 3)
+#define DES0_CH (1 << 4)
+#define DES0_ER (1 << 5)
+#define DES0_CES (1 << 30)
+#define DES0_OWN (1 << 31)
+
+#define DES1_BS1_MASK 0xfff
+#define DES1_BS1_SHIFT 0
+
+struct idmac_desc {
+ uint32_t des0; /* control */
+ uint32_t des1; /* bufsize */
+ uint32_t des2; /* buf1 phys addr */
+ uint32_t des3; /* buf2 phys addr or next descr */
+};
+
+#define DESC_COUNT 256
+#define DESC_SIZE (sizeof(struct idmac_desc) * DESC_COUNT)
+#define DEF_MSIZE 0x2 /* Burst size of multiple transaction */
+
+struct dwmmc_softc {
+ struct resource *res[2];
+ bus_space_tag_t bst;
+ bus_space_handle_t bsh;
+ device_t dev;
+ void *intr_cookie;
+ struct mmc_host host;
+ struct mtx sc_mtx;
+ struct mmc_request *req;
+ struct mmc_command *curcmd;
+ uint32_t flags;
+ uint32_t hwtype;
+ uint32_t use_auto_stop;
+
+ bus_dma_tag_t desc_tag;
+ bus_dmamap_t desc_map;
+ struct idmac_desc *desc_ring;
+ bus_addr_t desc_ring_paddr;
+ bus_dma_tag_t buf_tag;
+ bus_dmamap_t buf_map;
+
+ uint32_t bus_busy;
+ uint32_t dto_rcvd;
+ uint32_t acd_rcvd;
+ uint32_t cmd_done;
+ uint32_t bus_hz;
+ uint32_t fifo_depth;
+ uint32_t num_slots;
+ uint32_t sdr_timing;
+ uint32_t ddr_timing;
+};
+
+static void dwmmc_next_operation(struct dwmmc_softc *);
+static int dwmmc_setup_bus(struct dwmmc_softc *, int);
+static int dma_done(struct dwmmc_softc *, struct mmc_command *);
+static int dma_stop(struct dwmmc_softc *);
+
+static struct resource_spec dwmmc_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE },
+ { -1, 0 }
+};
+
+enum {
+ HWTYPE_NONE,
+ HWTYPE_ALTERA,
+ HWTYPE_EXYNOS,
+};
+
+#define HWTYPE_MASK (0x0000ffff)
+#define HWFLAG_MASK (0xffff << 16)
+
+static struct ofw_compat_data compat_data[] = {
+ {"altr,socfpga-dw-mshc", HWTYPE_ALTERA},
+ {"samsung,exynos5420-dw-mshc", HWTYPE_EXYNOS},
+ {NULL, HWTYPE_NONE},
+};
+
+static void
+dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+
+ if (error != 0)
+ return;
+ *(bus_addr_t *)arg = segs[0].ds_addr;
+}
+
+static void
+dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct dwmmc_softc *sc;
+ int idx;
+
+ if (error != 0)
+ return;
+
+ sc = arg;
+
+ dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
+
+ for (idx = 0; idx < nsegs; idx++) {
+ sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH);
+ sc->desc_ring[idx].des1 = segs[idx].ds_len;
+ sc->desc_ring[idx].des2 = segs[idx].ds_addr;
+
+ if (idx == 0)
+ sc->desc_ring[idx].des0 |= DES0_FS;
+
+ if (idx == (nsegs - 1)) {
+ sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
+ sc->desc_ring[idx].des0 |= DES0_LD;
+ }
+ }
+}
+
+static int
+dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
+{
+ int reg;
+ int i;
+
+ reg = READ4(sc, SDMMC_CTRL);
+ reg |= (reset_bits);
+ WRITE4(sc, SDMMC_CTRL, reg);
+
+ /* Wait reset done */
+ for (i = 0; i < 100; i++) {
+ if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
+ return (0);
+ DELAY(10);
+ };
+
+ device_printf(sc->dev, "Reset failed\n");
+
+ return (1);
+}
+
+static int
+dma_setup(struct dwmmc_softc *sc)
+{
+ int error;
+ int nidx;
+ int idx;
+
+ /*
+ * Set up TX descriptor ring, descriptors, and dma maps.
+ */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* Parent tag. */
+ 4096, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ DESC_SIZE, 1, /* maxsize, nsegments */
+ DESC_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->desc_tag);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "could not create ring DMA tag.\n");
+ return (1);
+ }
+
+ error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
+ BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
+ &sc->desc_map);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "could not allocate descriptor ring.\n");
+ return (1);
+ }
+
+ error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
+ sc->desc_ring, DESC_SIZE, dwmmc_get1paddr,
+ &sc->desc_ring_paddr, 0);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "could not load descriptor ring map.\n");
+ return (1);
+ }
+
+ for (idx = 0; idx < DESC_COUNT; idx++) {
+ sc->desc_ring[idx].des0 = DES0_CH;
+ sc->desc_ring[idx].des1 = 0;
+ nidx = (idx + 1) % DESC_COUNT;
+ sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
+ (nidx * sizeof(struct idmac_desc));
+ }
+
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* Parent tag. */
+ 4096, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ DESC_COUNT*MMC_SECTOR_SIZE, /* maxsize */
+ DESC_COUNT, /* nsegments */
+ MMC_SECTOR_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->buf_tag);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "could not create ring DMA tag.\n");
+ return (1);
+ }
+
+ error = bus_dmamap_create(sc->buf_tag, 0,
+ &sc->buf_map);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "could not create TX buffer DMA map.\n");
+ return (1);
+ }
+
+ return (0);
+}
+
+static void
+dwmmc_cmd_done(struct dwmmc_softc *sc)
+{
+ struct mmc_command *cmd;
+
+ cmd = sc->curcmd;
+ if (cmd == NULL)
+ return;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[3] = READ4(sc, SDMMC_RESP0);
+ cmd->resp[2] = READ4(sc, SDMMC_RESP1);
+ cmd->resp[1] = READ4(sc, SDMMC_RESP2);
+ cmd->resp[0] = READ4(sc, SDMMC_RESP3);
+ } else {
+ cmd->resp[3] = 0;
+ cmd->resp[2] = 0;
+ cmd->resp[1] = 0;
+ cmd->resp[0] = READ4(sc, SDMMC_RESP0);
+ }
+ }
+}
+
+static void
+dwmmc_tasklet(struct dwmmc_softc *sc)
+{
+ struct mmc_command *cmd;
+
+ cmd = sc->curcmd;
+ if (cmd == NULL)
+ return;
+
+ if (!sc->cmd_done)
+ return;
+
+ if (cmd->error != MMC_ERR_NONE || !cmd->data) {
+ dwmmc_next_operation(sc);
+ } else if (cmd->data && sc->dto_rcvd) {
+ if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
+ sc->use_auto_stop) {
+ if (sc->acd_rcvd)
+ dwmmc_next_operation(sc);
+ } else {
+ dwmmc_next_operation(sc);
+ }
+ }
+}
+
+static void
+dwmmc_intr(void *arg)
+{
+ struct mmc_command *cmd;
+ struct dwmmc_softc *sc;
+ uint32_t reg;
+
+ sc = arg;
+
+ DWMMC_LOCK(sc);
+
+ cmd = sc->curcmd;
+
+ /* First handle SDMMC controller interrupts */
+ reg = READ4(sc, SDMMC_MINTSTS);
+ if (reg) {
+ dprintf("%s 0x%08x\n", __func__, reg);
+
+ if (reg & DWMMC_CMD_ERR_FLAGS) {
+ WRITE4(sc, SDMMC_RINTSTS, DWMMC_CMD_ERR_FLAGS);
+ dprintf("cmd err 0x%08x cmd 0x%08x\n",
+ reg, cmd->opcode);
+ cmd->error = MMC_ERR_TIMEOUT;
+ }
+
+ if (reg & DWMMC_DATA_ERR_FLAGS) {
+ WRITE4(sc, SDMMC_RINTSTS, DWMMC_DATA_ERR_FLAGS);
+ dprintf("data err 0x%08x cmd 0x%08x\n",
+ reg, cmd->opcode);
+ cmd->error = MMC_ERR_FAILED;
+ dma_done(sc, cmd);
+ dma_stop(sc);
+ }
+
+ if (reg & SDMMC_INTMASK_CMD_DONE) {
+ dwmmc_cmd_done(sc);
+ sc->cmd_done = 1;
+ WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CMD_DONE);
+ }
+
+ if (reg & SDMMC_INTMASK_ACD) {
+ sc->acd_rcvd = 1;
+ WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_ACD);
+ }
+
+ if (reg & SDMMC_INTMASK_DTO) {
+ sc->dto_rcvd = 1;
+ WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_DTO);
+ }
+
+ if (reg & SDMMC_INTMASK_CD) {
+ /* XXX: Handle card detect */
+ WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CD);
+ }
+ }
+
+ /* Now handle DMA interrupts */
+ reg = READ4(sc, SDMMC_IDSTS);
+ if (reg) {
+ dprintf("dma intr 0x%08x\n", reg);
+ if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
+ WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
+ SDMMC_IDINTEN_RI));
+ WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
+ dma_done(sc, cmd);
+ }
+ }
+
+ dwmmc_tasklet(sc);
+
+ DWMMC_UNLOCK(sc);
+}
+
+static int
+parse_fdt(struct dwmmc_softc *sc)
+{
+ pcell_t dts_value[3];
+ phandle_t node;
+ int len;
+
+ if ((node = ofw_bus_get_node(sc->dev)) == -1)
+ return (ENXIO);
+
+ /* fifo-depth */
+ if ((len = OF_getproplen(node, "fifo-depth")) <= 0)
+ return (ENXIO);
+ OF_getencprop(node, "fifo-depth", dts_value, len);
+ sc->fifo_depth = dts_value[0];
+
+ /* num-slots */
+ if ((len = OF_getproplen(node, "num-slots")) <= 0)
+ return (ENXIO);
+ OF_getencprop(node, "num-slots", dts_value, len);
+ sc->num_slots = dts_value[0];
+
+ /*
+ * We need some platform-specific code to know
+ * what the clock is supplied for our device.
+ * For now rely on the value specified in FDT.
+ */
+ if ((len = OF_getproplen(node, "bus-frequency")) <= 0)
+ return (ENXIO);
+ OF_getencprop(node, "bus-frequency", dts_value, len);
+ sc->bus_hz = dts_value[0];
+
+ /*
+ * Platform-specific stuff
+ * XXX: Move to separate file
+ */
+
+ if ((sc->hwtype & HWTYPE_MASK) != HWTYPE_EXYNOS)
+ return (0);
+
+ if ((len = OF_getproplen(node, "samsung,dw-mshc-ciu-div")) <= 0)
+ return (ENXIO);
+ OF_getencprop(node, "samsung,dw-mshc-ciu-div", dts_value, len);
+ sc->sdr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT);
+ sc->ddr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT);
+
+ if ((len = OF_getproplen(node, "samsung,dw-mshc-sdr-timing")) <= 0)
+ return (ENXIO);
+ OF_getencprop(node, "samsung,dw-mshc-sdr-timing", dts_value, len);
+ sc->sdr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) |
+ (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT));
+
+ if ((len = OF_getproplen(node, "samsung,dw-mshc-ddr-timing")) <= 0)
+ return (ENXIO);
+ OF_getencprop(node, "samsung,dw-mshc-ddr-timing", dts_value, len);
+ sc->ddr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) |
+ (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT));
+
+ return (0);
+}
+
+static int
+dwmmc_probe(device_t dev)
+{
+ uintptr_t hwtype;
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
+ if (hwtype == HWTYPE_NONE)
+ return (ENXIO);
+
+ device_set_desc(dev, "Synopsys DesignWare Mobile "
+ "Storage Host Controller");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+dwmmc_attach(device_t dev)
+{
+ struct dwmmc_softc *sc;
+ device_t child;
+ int error;
+ int slot;
+
+ sc = device_get_softc(dev);
+
+ sc->dev = dev;
+ sc->hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
+
+ /* Why not to use Auto Stop? It save a hundred of irq per second */
+ sc->use_auto_stop = 1;
+
+ error = parse_fdt(sc);
+ if (error != 0) {
+ device_printf(dev, "Can't get FDT property.\n");
+ return (ENXIO);
+ }
+
+ DWMMC_LOCK_INIT(sc);
+
+ if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
+ device_printf(dev, "could not allocate resources\n");
+ return (ENXIO);
+ }
+
+ /* Memory interface */
+ sc->bst = rman_get_bustag(sc->res[0]);
+ sc->bsh = rman_get_bushandle(sc->res[0]);
+
+ /* Setup interrupt handler. */
+ error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, dwmmc_intr, sc, &sc->intr_cookie);
+ if (error != 0) {
+ device_printf(dev, "could not setup interrupt handler.\n");
+ return (ENXIO);
+ }
+
+ device_printf(dev, "Hardware version ID is %04x\n",
+ READ4(sc, SDMMC_VERID) & 0xffff);
+
+ WRITE4(sc, EMMCP_MPSBEGIN0, 0);
+ WRITE4(sc, EMMCP_SEND0, 0);
+ WRITE4(sc, EMMCP_CTRL0, (MPSCTRL_SECURE_READ_BIT |
+ MPSCTRL_SECURE_WRITE_BIT |
+ MPSCTRL_NON_SECURE_READ_BIT |
+ MPSCTRL_NON_SECURE_WRITE_BIT |
+ MPSCTRL_VALID));
+
+ /* XXX: we support operation for slot index 0 only */
+ slot = 0;
+ WRITE4(sc, SDMMC_PWREN, (1 << slot));
+
+ /* Reset all */
+ if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
+ SDMMC_CTRL_FIFO_RESET |
+ SDMMC_CTRL_DMA_RESET)))
+ return (ENXIO);
+
+ dwmmc_setup_bus(sc, sc->host.f_min);
+
+ if (dma_setup(sc))
+ return (ENXIO);
+
+ /* Install desc base */
+ WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
+
+ /* Enable DMA interrupts */
+ WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
+ WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
+ SDMMC_IDINTEN_RI |
+ SDMMC_IDINTEN_TI));
+
+ /* Clear and disable interrups for a while */
+ WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
+ WRITE4(sc, SDMMC_INTMASK, 0);
+
+ /* Maximum timeout */
+ WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
+
+ /* Enable interrupts */
+ WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
+ WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
+ SDMMC_INTMASK_DTO |
+ SDMMC_INTMASK_ACD |
+ SDMMC_INTMASK_TXDR |
+ SDMMC_INTMASK_RXDR |
+ DWMMC_ERR_FLAGS |
+ SDMMC_INTMASK_CD));
+ WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
+
+ sc->host.f_min = 400000;
+ sc->host.f_max = 200000000;
+ sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
+ sc->host.caps = MMC_CAP_4_BIT_DATA;
+
+ child = device_add_child(dev, "mmc", 0);
+ return (bus_generic_attach(dev));
+}
+
+static int
+dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
+{
+ int tout;
+ int div;
+
+ if (freq == 0) {
+ WRITE4(sc, SDMMC_CLKENA, 0);
+ WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
+ SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
+
+ tout = 1000;
+ do {
+ if (tout-- < 0) {
+ device_printf(sc->dev, "Failed update clk\n");
+ return (1);
+ }
+ } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
+
+ return (0);
+ }
+
+ WRITE4(sc, SDMMC_CLKENA, 0);
+ WRITE4(sc, SDMMC_CLKSRC, 0);
+
+ div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
+
+ WRITE4(sc, SDMMC_CLKDIV, div);
+ WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
+ SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
+
+ tout = 1000;
+ do {
+ if (tout-- < 0) {
+ device_printf(sc->dev, "Failed to update clk");
+ return (1);
+ }
+ } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
+
+ WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
+ WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
+ SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
+
+ tout = 1000;
+ do {
+ if (tout-- < 0) {
+ device_printf(sc->dev, "Failed to enable clk\n");
+ return (1);
+ }
+ } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
+
+ return (0);
+}
+
+static int
+dwmmc_update_ios(device_t brdev, device_t reqdev)
+{
+ struct dwmmc_softc *sc;
+ struct mmc_ios *ios;
+
+ sc = device_get_softc(brdev);
+ ios = &sc->host.ios;
+
+ dprintf("Setting up clk %u bus_width %d\n",
+ ios->clock, ios->bus_width);
+
+ dwmmc_setup_bus(sc, ios->clock);
+
+ if (ios->bus_width == bus_width_8)
+ WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
+ else if (ios->bus_width == bus_width_4)
+ WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
+ else
+ WRITE4(sc, SDMMC_CTYPE, 0);
+
+ if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
+ /* XXX: take care about DDR or SDR use here */
+ WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
+ }
+
+ /*
+ * XXX: take care about DDR bit
+ *
+ * reg = READ4(sc, SDMMC_UHS_REG);
+ * reg |= (SDMMC_UHS_REG_DDR);
+ * WRITE4(sc, SDMMC_UHS_REG, reg);
+ */
+
+ return (0);
+}
+
+static int
+dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
+{
+ struct mmc_data *data;
+
+ data = cmd->data;
+
+ if (data->flags & MMC_DATA_WRITE)
+ bus_dmamap_sync(sc->buf_tag, sc->buf_map,
+ BUS_DMASYNC_POSTWRITE);
+ else
+ bus_dmamap_sync(sc->buf_tag, sc->buf_map,
+ BUS_DMASYNC_POSTREAD);
+
+ bus_dmamap_unload(sc->buf_tag, sc->buf_map);
+
+ return (0);
+}
+
+static int
+dma_stop(struct dwmmc_softc *sc)
+{
+ int reg;
+
+ reg = READ4(sc, SDMMC_CTRL);
+ reg &= ~(SDMMC_CTRL_USE_IDMAC);
+ reg |= (SDMMC_CTRL_DMA_RESET);
+ WRITE4(sc, SDMMC_CTRL, reg);
+
+ reg = READ4(sc, SDMMC_BMOD);
+ reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
+ reg |= (SDMMC_BMOD_SWR);
+ WRITE4(sc, SDMMC_BMOD, reg);
+
+ return (0);
+}
+
+static int
+dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
+{
+ struct mmc_data *data;
+ int len;
+ int err;
+ int reg;
+
+ data = cmd->data;
+ len = data->len;
+
+ reg = READ4(sc, SDMMC_INTMASK);
+ reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
+ WRITE4(sc, SDMMC_INTMASK, reg);
+
+ err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
+ data->data, data->len, dwmmc_ring_setup,
+ sc, BUS_DMA_NOWAIT);
+ if (err != 0)
+ panic("dmamap_load failed\n");
+
+ if (data->flags & MMC_DATA_WRITE)
+ bus_dmamap_sync(sc->buf_tag, sc->buf_map,
+ BUS_DMASYNC_PREWRITE);
+ else
+ bus_dmamap_sync(sc->buf_tag, sc->buf_map,
+ BUS_DMASYNC_PREREAD);
+
+ reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
+ reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
+ reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
+
+ WRITE4(sc, SDMMC_FIFOTH, reg);
+ wmb();
+
+ reg = READ4(sc, SDMMC_CTRL);
+ reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
+ WRITE4(sc, SDMMC_CTRL, reg);
+ wmb();
+
+ reg = READ4(sc, SDMMC_BMOD);
+ reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
+ WRITE4(sc, SDMMC_BMOD, reg);
+
+ /* Start */
+ WRITE4(sc, SDMMC_PLDMND, 1);
+
+ return (0);
+}
+
+static void
+dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
+{
+ struct mmc_data *data;
+ uint32_t blksz;
+ uint32_t cmdr;
+
+ sc->curcmd = cmd;
+ data = cmd->data;
+
+ /* XXX Upper layers don't always set this */
+ cmd->mrq = sc->req;
+
+ /* Begin setting up command register. */
+
+ cmdr = cmd->opcode;
+
+ dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
+
+ if (cmd->opcode == MMC_STOP_TRANSMISSION ||
+ cmd->opcode == MMC_GO_IDLE_STATE ||
+ cmd->opcode == MMC_GO_INACTIVE_STATE)
+ cmdr |= SDMMC_CMD_STOP_ABORT;
+ else if (cmd->opcode != MMC_SEND_STATUS && data)
+ cmdr |= SDMMC_CMD_WAIT_PRVDATA;
+
+ /* Set up response handling. */
+ if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
+ cmdr |= SDMMC_CMD_RESP_EXP;
+ if (cmd->flags & MMC_RSP_136)
+ cmdr |= SDMMC_CMD_RESP_LONG;
+ }
+
+ if (cmd->flags & MMC_RSP_CRC)
+ cmdr |= SDMMC_CMD_RESP_CRC;
+
+ /*
+ * XXX: Not all platforms want this.
+ */
+ cmdr |= SDMMC_CMD_USE_HOLD_REG;
+
+ if ((sc->flags & CARD_INIT_DONE) == 0) {
+ sc->flags |= (CARD_INIT_DONE);
+ cmdr |= SDMMC_CMD_SEND_INIT;
+ }
+
+ if (data) {
+ if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
+ sc->use_auto_stop)
+ cmdr |= SDMMC_CMD_SEND_ASTOP;
+
+ cmdr |= SDMMC_CMD_DATA_EXP;
+ if (data->flags & MMC_DATA_STREAM)
+ cmdr |= SDMMC_CMD_MODE_STREAM;
+ if (data->flags & MMC_DATA_WRITE)
+ cmdr |= SDMMC_CMD_DATA_WRITE;
+
+ WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
+ WRITE4(sc, SDMMC_BYTCNT, data->len);
+ blksz = (data->len < MMC_SECTOR_SIZE) ? \
+ data->len : MMC_SECTOR_SIZE;
+ WRITE4(sc, SDMMC_BLKSIZ, blksz);
+
+ dma_prepare(sc, cmd);
+ wmb();
+ }
+
+ dprintf("cmdr 0x%08x\n", cmdr);
+
+ WRITE4(sc, SDMMC_CMDARG, cmd->arg);
+ wmb();
+ WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
+};
+
+static void
+dwmmc_next_operation(struct dwmmc_softc *sc)
+{
+ struct mmc_request *req;
+
+ req = sc->req;
+ if (req == NULL)
+ return;
+
+ sc->acd_rcvd = 0;
+ sc->dto_rcvd = 0;
+ sc->cmd_done = 0;
+
+ /*
+ * XXX: Wait until card is still busy.
+ * We do need this to prevent data timeouts,
+ * mostly caused by multi-block write command
+ * followed by single-read.
+ */
+ while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
+ continue;
+
+ if (sc->flags & PENDING_CMD) {
+ sc->flags &= ~PENDING_CMD;
+ dwmmc_start_cmd(sc, req->cmd);
+ return;
+ } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
+ sc->flags &= ~PENDING_STOP;
+ dwmmc_start_cmd(sc, req->stop);
+ return;
+ }
+
+ sc->req = NULL;
+ sc->curcmd = NULL;
+ req->done(req);
+}
+
+static int
+dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
+{
+ struct dwmmc_softc *sc;
+
+ sc = device_get_softc(brdev);
+
+ dprintf("%s\n", __func__);
+
+ DWMMC_LOCK(sc);
+
+ if (sc->req != NULL) {
+ DWMMC_UNLOCK(sc);
+ return (EBUSY);
+ }
+
+ sc->req = req;
+ sc->flags |= PENDING_CMD;
+ if (sc->req->stop)
+ sc->flags |= PENDING_STOP;
+ dwmmc_next_operation(sc);
+
+ DWMMC_UNLOCK(sc);
+ return (0);
+}
+
+static int
+dwmmc_get_ro(device_t brdev, device_t reqdev)
+{
+
+ dprintf("%s\n", __func__);
+
+ return (0);
+}
+
+static int
+dwmmc_acquire_host(device_t brdev, device_t reqdev)
+{
+ struct dwmmc_softc *sc;
+
+ sc = device_get_softc(brdev);
+
+ DWMMC_LOCK(sc);
+ while (sc->bus_busy)
+ msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
+ sc->bus_busy++;
+ DWMMC_UNLOCK(sc);
+ return (0);
+}
+
+static int
+dwmmc_release_host(device_t brdev, device_t reqdev)
+{
+ struct dwmmc_softc *sc;
+
+ sc = device_get_softc(brdev);
+
+ DWMMC_LOCK(sc);
+ sc->bus_busy--;
+ wakeup(sc);
+ DWMMC_UNLOCK(sc);
+ return (0);
+}
+
+static int
+dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
+{
+ struct dwmmc_softc *sc;
+
+ sc = device_get_softc(bus);
+
+ switch (which) {
+ default:
+ return (EINVAL);
+ case MMCBR_IVAR_BUS_MODE:
+ *(int *)result = sc->host.ios.bus_mode;
+ break;
+ case MMCBR_IVAR_BUS_WIDTH:
+ *(int *)result = sc->host.ios.bus_width;
+ break;
+ case MMCBR_IVAR_CHIP_SELECT:
+ *(int *)result = sc->host.ios.chip_select;
+ break;
+ case MMCBR_IVAR_CLOCK:
+ *(int *)result = sc->host.ios.clock;
+ break;
+ case MMCBR_IVAR_F_MIN:
+ *(int *)result = sc->host.f_min;
+ break;
+ case MMCBR_IVAR_F_MAX:
+ *(int *)result = sc->host.f_max;
+ break;
+ case MMCBR_IVAR_HOST_OCR:
+ *(int *)result = sc->host.host_ocr;
+ break;
+ case MMCBR_IVAR_MODE:
+ *(int *)result = sc->host.mode;
+ break;
+ case MMCBR_IVAR_OCR:
+ *(int *)result = sc->host.ocr;
+ break;
+ case MMCBR_IVAR_POWER_MODE:
+ *(int *)result = sc->host.ios.power_mode;
+ break;
+ case MMCBR_IVAR_VDD:
+ *(int *)result = sc->host.ios.vdd;
+ break;
+ case MMCBR_IVAR_CAPS:
+ sc->host.caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
+ *(int *)result = sc->host.caps;
+ break;
+ case MMCBR_IVAR_MAX_DATA:
+ *(int *)result = DESC_COUNT;
+ }
+ return (0);
+}
+
+static int
+dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
+{
+ struct dwmmc_softc *sc;
+
+ sc = device_get_softc(bus);
+
+ switch (which) {
+ default:
+ return (EINVAL);
+ case MMCBR_IVAR_BUS_MODE:
+ sc->host.ios.bus_mode = value;
+ break;
+ case MMCBR_IVAR_BUS_WIDTH:
+ sc->host.ios.bus_width = value;
+ break;
+ case MMCBR_IVAR_CHIP_SELECT:
+ sc->host.ios.chip_select = value;
+ break;
+ case MMCBR_IVAR_CLOCK:
+ sc->host.ios.clock = value;
+ break;
+ case MMCBR_IVAR_MODE:
+ sc->host.mode = value;
+ break;
+ case MMCBR_IVAR_OCR:
+ sc->host.ocr = value;
+ break;
+ case MMCBR_IVAR_POWER_MODE:
+ sc->host.ios.power_mode = value;
+ break;
+ case MMCBR_IVAR_VDD:
+ sc->host.ios.vdd = value;
+ break;
+ /* These are read-only */
+ case MMCBR_IVAR_CAPS:
+ case MMCBR_IVAR_HOST_OCR:
+ case MMCBR_IVAR_F_MIN:
+ case MMCBR_IVAR_F_MAX:
+ case MMCBR_IVAR_MAX_DATA:
+ return (EINVAL);
+ }
+ return (0);
+}
+
+static device_method_t dwmmc_methods[] = {
+ DEVMETHOD(device_probe, dwmmc_probe),
+ DEVMETHOD(device_attach, dwmmc_attach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_read_ivar, dwmmc_read_ivar),
+ DEVMETHOD(bus_write_ivar, dwmmc_write_ivar),
+
+ /* mmcbr_if */
+ DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios),
+ DEVMETHOD(mmcbr_request, dwmmc_request),
+ DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro),
+ DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host),
+ DEVMETHOD(mmcbr_release_host, dwmmc_release_host),
+
+ DEVMETHOD_END
+};
+
+static driver_t dwmmc_driver = {
+ "dwmmc",
+ dwmmc_methods,
+ sizeof(struct dwmmc_softc),
+};
+
+static devclass_t dwmmc_devclass;
+
+DRIVER_MODULE(dwmmc, simplebus, dwmmc_driver, dwmmc_devclass, 0, 0);
+
diff --git a/sys/dev/mmc/host/dwmmc.h b/sys/dev/mmc/host/dwmmc.h
new file mode 100644
index 0000000..8f763cd
--- /dev/null
+++ b/sys/dev/mmc/host/dwmmc.h
@@ -0,0 +1,150 @@
+/*-
+ * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#define SDMMC_CTRL 0x0 /* Control Register */
+#define SDMMC_CTRL_USE_IDMAC (1 << 25) /* Use Internal DMAC */
+#define SDMMC_CTRL_DMA_ENABLE (1 << 5) /* */
+#define SDMMC_CTRL_INT_ENABLE (1 << 4) /* Enable interrupts */
+#define SDMMC_CTRL_DMA_RESET (1 << 2) /* Reset DMA */
+#define SDMMC_CTRL_FIFO_RESET (1 << 1) /* Reset FIFO */
+#define SDMMC_CTRL_RESET (1 << 0) /* Reset SD/MMC controller */
+#define SDMMC_PWREN 0x4 /* Power Enable Register */
+#define SDMMC_PWREN_PE (1 << 0) /* Power On */
+#define SDMMC_CLKDIV 0x8 /* Clock Divider Register */
+#define SDMMC_CLKSRC 0xC /* SD Clock Source Register */
+#define SDMMC_CLKENA 0x10 /* Clock Enable Register */
+#define SDMMC_CLKENA_LP (1 << 16) /* Low-power mode */
+#define SDMMC_CLKENA_CCLK_EN (1 << 0) /* SD/MMC Enable */
+#define SDMMC_TMOUT 0x14 /* Timeout Register */
+#define SDMMC_CTYPE 0x18 /* Card Type Register */
+#define SDMMC_CTYPE_8BIT (1 << 16)
+#define SDMMC_CTYPE_4BIT (1 << 0)
+#define SDMMC_BLKSIZ 0x1C /* Block Size Register */
+#define SDMMC_BYTCNT 0x20 /* Byte Count Register */
+#define SDMMC_INTMASK 0x24 /* Interrupt Mask Register */
+#define SDMMC_INTMASK_SDIO (1 << 16) /* SDIO Interrupt Enable */
+#define SDMMC_INTMASK_EBE (1 << 15) /* End-bit error */
+#define SDMMC_INTMASK_ACD (1 << 14) /* Auto command done */
+#define SDMMC_INTMASK_SBE (1 << 13) /* Start-bit error */
+#define SDMMC_INTMASK_HLE (1 << 12) /* Hardware locked write err */
+#define SDMMC_INTMASK_FRUN (1 << 11) /* FIFO underrun/overrun err */
+#define SDMMC_INTMASK_HTO (1 << 10) /* Data starvation by host timeout */
+#define SDMMC_INTMASK_DRT (1 << 9) /* Data read timeout */
+#define SDMMC_INTMASK_RTO (1 << 8) /* Response timeout */
+#define SDMMC_INTMASK_DCRC (1 << 7) /* Data CRC error */
+#define SDMMC_INTMASK_RCRC (1 << 6) /* Response CRC error */
+#define SDMMC_INTMASK_RXDR (1 << 5) /* Receive FIFO data request */
+#define SDMMC_INTMASK_TXDR (1 << 4) /* Transmit FIFO data request */
+#define SDMMC_INTMASK_DTO (1 << 3) /* Data transfer over */
+#define SDMMC_INTMASK_CMD_DONE (1 << 2) /* Command done */
+#define SDMMC_INTMASK_RE (1 << 1) /* Response error */
+#define SDMMC_INTMASK_CD (1 << 0) /* Card Detected */
+#define SDMMC_CMDARG 0x28 /* Command Argument Register */
+#define SDMMC_CMD 0x2C /* Command Register */
+#define SDMMC_CMD_START (1 << 31)
+#define SDMMC_CMD_USE_HOLD_REG (1 << 29)
+#define SDMMC_CMD_UPD_CLK_ONLY (1 << 21) /* Update clk only */
+#define SDMMC_CMD_SEND_INIT (1 << 15) /* Send initialization */
+#define SDMMC_CMD_STOP_ABORT (1 << 14) /* stop current data transfer */
+#define SDMMC_CMD_WAIT_PRVDATA (1 << 13) /* Wait for prev data transfer completion */
+#define SDMMC_CMD_SEND_ASTOP (1 << 12) /* Send stop command at end of data tx/rx */
+#define SDMMC_CMD_MODE_STREAM (1 << 11) /* Stream data transfer */
+#define SDMMC_CMD_DATA_WRITE (1 << 10) /* Write to card */
+#define SDMMC_CMD_DATA_EXP (1 << 9) /* Data transfer expected */
+#define SDMMC_CMD_RESP_CRC (1 << 8) /* Check Response CRC */
+#define SDMMC_CMD_RESP_LONG (1 << 7) /* Long response expected */
+#define SDMMC_CMD_RESP_EXP (1 << 6) /* Response expected */
+#define SDMMC_RESP0 0x30 /* Response Register 0 */
+#define SDMMC_RESP1 0x34 /* Response Register 1 */
+#define SDMMC_RESP2 0x38 /* Response Register 2 */
+#define SDMMC_RESP3 0x3C /* Response Register 3 */
+#define SDMMC_MINTSTS 0x40 /* Masked Interrupt Status Register */
+#define SDMMC_RINTSTS 0x44 /* Raw Interrupt Status Register */
+#define SDMMC_STATUS 0x48 /* Status Register */
+#define SDMMC_STATUS_DATA_BUSY (1 << 9) /* card_data[0] */
+#define SDMMC_FIFOTH 0x4C /* FIFO Threshold Watermark Register */
+#define SDMMC_FIFOTH_MSIZE_S 28 /* Burst size of multiple transaction */
+#define SDMMC_FIFOTH_RXWMARK_S 16 /* FIFO threshold watermark level */
+#define SDMMC_FIFOTH_TXWMARK_S 0 /* FIFO threshold watermark level */
+#define SDMMC_CDETECT 0x50 /* Card Detect Register */
+#define SDMMC_WRTPRT 0x54 /* Write Protect Register */
+#define SDMMC_TCBCNT 0x5C /* Transferred CIU Card Byte Count */
+#define SDMMC_TBBCNT 0x60 /* Transferred Host to BIU-FIFO Byte Count */
+#define SDMMC_DEBNCE 0x64 /* Debounce Count Register */
+#define SDMMC_USRID 0x68 /* User ID Register */
+#define SDMMC_VERID 0x6C /* Version ID Register */
+#define SDMMC_HCON 0x70 /* Hardware Configuration Register */
+#define SDMMC_UHS_REG 0x74 /* UHS-1 Register */
+#define SDMMC_UHS_REG_DDR (1 << 16) /* DDR mode */
+#define SDMMC_RST_N 0x78 /* Hardware Reset Register */
+#define SDMMC_BMOD 0x80 /* Bus Mode Register */
+#define SDMMC_BMOD_DE (1 << 7) /* IDMAC Enable */
+#define SDMMC_BMOD_FB (1 << 1) /* AHB Master Fixed Burst */
+#define SDMMC_BMOD_SWR (1 << 0) /* Reset DMA */
+#define SDMMC_PLDMND 0x84 /* Poll Demand Register */
+#define SDMMC_DBADDR 0x88 /* Descriptor List Base Address */
+#define SDMMC_IDSTS 0x8C /* Internal DMAC Status Register */
+#define SDMMC_IDINTEN 0x90 /* Internal DMAC Interrupt Enable */
+#define SDMMC_IDINTEN_AI (1 << 9) /* Abnormal Interrupt Summary */
+#define SDMMC_IDINTEN_NI (1 << 8) /* Normal Interrupt Summary */
+#define SDMMC_IDINTEN_CES (1 << 5) /* Card Error Summary */
+#define SDMMC_IDINTEN_DU (1 << 4) /* Descriptor Unavailable */
+#define SDMMC_IDINTEN_FBE (1 << 2) /* Fatal Bus Error */
+#define SDMMC_IDINTEN_RI (1 << 1) /* Receive Interrupt */
+#define SDMMC_IDINTEN_TI (1 << 0) /* Transmit Interrupt */
+#define SDMMC_IDINTEN_MASK (SDMMC_IDINTEN_AI | SDMMC_IDINTEN_NI | SDMMC_IDINTEN_CES | \
+ SDMMC_IDINTEN_DU | SDMMC_IDINTEN_FBE | SDMMC_IDINTEN_RI | \
+ SDMMC_IDINTEN_TI)
+#define SDMMC_DSCADDR 0x94 /* Current Host Descriptor Address */
+#define SDMMC_BUFADDR 0x98 /* Current Buffer Descriptor Address */
+#define SDMMC_CARDTHRCTL 0x100 /* Card Threshold Control Register */
+#define SDMMC_BACK_END_POWER_R 0x104 /* Back End Power Register */
+#define SDMMC_DATA 0x200 /* Data FIFO Access */
+
+/* eMMC */
+#define EMMCP_MPSBEGIN0 0x1200 /* */
+#define EMMCP_SEND0 0x1204 /* */
+#define EMMCP_CTRL0 0x120C /* */
+#define MPSCTRL_SECURE_READ_BIT (1 << 7)
+#define MPSCTRL_SECURE_WRITE_BIT (1 << 6)
+#define MPSCTRL_NON_SECURE_READ_BIT (1 << 5)
+#define MPSCTRL_NON_SECURE_WRITE_BIT (1 << 4)
+#define MPSCTRL_USE_FUSE_KEY (1 << 3)
+#define MPSCTRL_ECB_MODE (1 << 2)
+#define MPSCTRL_ENCRYPTION (1 << 1)
+#define MPSCTRL_VALID (1 << 0)
+
+/* Platform-specific defines */
+#define SDMMC_CLKSEL 0x9C
+#define SDMMC_CLKSEL_SAMPLE_SHIFT 0
+#define SDMMC_CLKSEL_DRIVE_SHIFT 16
+#define SDMMC_CLKSEL_DIVIDER_SHIFT 24
diff --git a/sys/dev/mmc/mmc.c b/sys/dev/mmc/mmc.c
index 8b6e1a8..a7762ba 100644
--- a/sys/dev/mmc/mmc.c
+++ b/sys/dev/mmc/mmc.c
@@ -1773,4 +1773,5 @@ DRIVER_MODULE(mmc, sdhci_imx, mmc_driver, mmc_devclass, NULL, NULL);
DRIVER_MODULE(mmc, sdhci_pci, mmc_driver, mmc_devclass, NULL, NULL);
DRIVER_MODULE(mmc, sdhci_ti, mmc_driver, mmc_devclass, NULL, NULL);
DRIVER_MODULE(mmc, ti_mmchs, mmc_driver, mmc_devclass, NULL, NULL);
+DRIVER_MODULE(mmc, dwmmc, mmc_driver, mmc_devclass, NULL, NULL);
diff --git a/sys/dev/mpr/mpr_sas.c b/sys/dev/mpr/mpr_sas.c
index 97e815c..7ddae71 100644
--- a/sys/dev/mpr/mpr_sas.c
+++ b/sys/dev/mpr/mpr_sas.c
@@ -1043,6 +1043,9 @@ mprsas_action(struct cam_sim *sim, union ccb *ccb)
case 0x0a:
sas->bitrate = 600000;
break;
+ case 0x0b:
+ sas->bitrate = 1200000;
+ break;
default:
sas->valid = 0;
}
diff --git a/sys/dev/mpr/mpr_table.c b/sys/dev/mpr/mpr_table.c
index b1e12b3..6e7797e 100644
--- a/sys/dev/mpr/mpr_table.c
+++ b/sys/dev/mpr/mpr_table.c
@@ -118,6 +118,7 @@ struct mpr_table_lookup mpr_linkrate_names[] = {
{"1.5Gbps", 0x08},
{"3.0Gbps", 0x09},
{"6.0Gbps", 0x0a},
+ {"12.0Gbps", 0x0b},
{NULL, 0},
{"LinkRate Unknown", 0x00}
};
diff --git a/sys/dev/mrsas/mrsas.c b/sys/dev/mrsas/mrsas.c
index b740128..ba15848 100644
--- a/sys/dev/mrsas/mrsas.c
+++ b/sys/dev/mrsas/mrsas.c
@@ -1,43 +1,38 @@
/*
- * Copyright (c) 2014, LSI Corp.
- * All rights reserved.
- * Author: Marian Choy
+ * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
* Support: freebsdraid@lsi.com
*
* Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * 3. Neither the name of the <ORGANIZATION> nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer. 2. Redistributions
+ * in binary form must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution. 3. Neither the name of the
+ * <ORGANIZATION> nor the names of its contributors may be used to endorse or
+ * promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
- * The views and conclusions contained in the software and documentation
- * are those of the authors and should not be interpreted as representing
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing
* official policies,either expressed or implied, of the FreeBSD Project.
*
- * Send feedback to: <megaraidfbsd@lsi.com>
- * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
- * ATTN: MegaRaid FreeBSD
+ * Send feedback to: <megaraidfbsd@lsi.com> Mail to: LSI Corporation, 1621
+ * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
*
*/
@@ -54,18 +49,23 @@ __FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
+#include <sys/smp.h>
-/*
- * Function prototypes
+/*
+ * Function prototypes
*/
-static d_open_t mrsas_open;
-static d_close_t mrsas_close;
-static d_read_t mrsas_read;
-static d_write_t mrsas_write;
-static d_ioctl_t mrsas_ioctl;
+static d_open_t mrsas_open;
+static d_close_t mrsas_close;
+static d_read_t mrsas_read;
+static d_write_t mrsas_write;
+static d_ioctl_t mrsas_ioctl;
+static d_poll_t mrsas_poll;
+static struct mrsas_mgmt_info mrsas_mgmt_info;
static struct mrsas_ident *mrsas_find_ident(device_t);
+static int mrsas_setup_msix(struct mrsas_softc *sc);
+static int mrsas_allocate_msix(struct mrsas_softc *sc);
static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
static void mrsas_flush_cache(struct mrsas_softc *sc);
static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
@@ -79,456 +79,476 @@ static int mrsas_setup_irq(struct mrsas_softc *sc);
static int mrsas_alloc_mem(struct mrsas_softc *sc);
static int mrsas_init_fw(struct mrsas_softc *sc);
static int mrsas_setup_raidmap(struct mrsas_softc *sc);
-static int mrsas_complete_cmd(struct mrsas_softc *sc);
+static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
static int mrsas_clear_intr(struct mrsas_softc *sc);
-static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
- struct mrsas_ctrl_info *ctrl_info);
-static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
- struct mrsas_mfi_cmd *cmd_to_abort);
+static int
+mrsas_get_ctrl_info(struct mrsas_softc *sc,
+ struct mrsas_ctrl_info *ctrl_info);
+static int
+mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd_to_abort);
u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
-u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
- struct mrsas_mfi_cmd *mfi_cmd);
-int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
-int mrsas_init_adapter(struct mrsas_softc *sc);
-int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
-int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
-int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
-int mrsas_ioc_init(struct mrsas_softc *sc);
-int mrsas_bus_scan(struct mrsas_softc *sc);
-int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
-int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
-int mrsas_reset_ctrl(struct mrsas_softc *sc);
-int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
-int mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
- struct mrsas_mfi_cmd *cmd);
-int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
- int size);
-void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
-void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
-void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
-void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
-void mrsas_disable_intr(struct mrsas_softc *sc);
-void mrsas_enable_intr(struct mrsas_softc *sc);
-void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
-void mrsas_free_mem(struct mrsas_softc *sc);
-void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
-void mrsas_isr(void *arg);
-void mrsas_teardown_intr(struct mrsas_softc *sc);
-void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
-void mrsas_kill_hba (struct mrsas_softc *sc);
-void mrsas_aen_handler(struct mrsas_softc *sc);
-void mrsas_write_reg(struct mrsas_softc *sc, int offset,
- u_int32_t value);
-void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
- u_int32_t req_desc_hi);
-void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
-void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
- struct mrsas_mfi_cmd *cmd, u_int8_t status);
-void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
- u_int8_t extStatus);
-struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc);
-MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc,
- struct mrsas_mfi_cmd *cmd);
+u_int8_t
+mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *mfi_cmd);
+int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
+int mrsas_init_adapter(struct mrsas_softc *sc);
+int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
+int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
+int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
+int mrsas_ioc_init(struct mrsas_softc *sc);
+int mrsas_bus_scan(struct mrsas_softc *sc);
+int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+int mrsas_reset_ctrl(struct mrsas_softc *sc);
+int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
+int
+mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd);
+int
+mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
+ int size);
+void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
+void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+void mrsas_disable_intr(struct mrsas_softc *sc);
+void mrsas_enable_intr(struct mrsas_softc *sc);
+void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
+void mrsas_free_mem(struct mrsas_softc *sc);
+void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
+void mrsas_isr(void *arg);
+void mrsas_teardown_intr(struct mrsas_softc *sc);
+void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
+void mrsas_kill_hba(struct mrsas_softc *sc);
+void mrsas_aen_handler(struct mrsas_softc *sc);
+void
+mrsas_write_reg(struct mrsas_softc *sc, int offset,
+ u_int32_t value);
+void
+mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
+ u_int32_t req_desc_hi);
+void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
+void
+mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd, u_int8_t status);
+void
+mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
+ u_int8_t extStatus);
+struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
+
+MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
+ (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
extern int mrsas_cam_attach(struct mrsas_softc *sc);
extern void mrsas_cam_detach(struct mrsas_softc *sc);
extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
-extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
+extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
-extern int mrsas_passthru(struct mrsas_softc *sc, void *arg);
+extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
-extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
-extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
+extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
+extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
extern void mrsas_xpt_release(struct mrsas_softc *sc);
-extern MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
- u_int16_t index);
+extern MRSAS_REQUEST_DESCRIPTOR_UNION *
+mrsas_get_request_desc(struct mrsas_softc *sc,
+ u_int16_t index);
extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
-SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
+SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
-/**
+/*
* PCI device struct and table
*
*/
typedef struct mrsas_ident {
- uint16_t vendor;
- uint16_t device;
- uint16_t subvendor;
- uint16_t subdevice;
- const char *desc;
-} MRSAS_CTLR_ID;
+ uint16_t vendor;
+ uint16_t device;
+ uint16_t subvendor;
+ uint16_t subdevice;
+ const char *desc;
+} MRSAS_CTLR_ID;
MRSAS_CTLR_ID device_table[] = {
- {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"},
- {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"},
- {0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"},
- {0, 0, 0, 0, NULL}
+ {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"},
+ {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"},
+ {0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"},
+ {0, 0, 0, 0, NULL}
};
-/**
- * Character device entry points
+/*
+ * Character device entry points
*
*/
static struct cdevsw mrsas_cdevsw = {
- .d_version = D_VERSION,
- .d_open = mrsas_open,
- .d_close = mrsas_close,
- .d_read = mrsas_read,
- .d_write = mrsas_write,
- .d_ioctl = mrsas_ioctl,
- .d_name = "mrsas",
+ .d_version = D_VERSION,
+ .d_open = mrsas_open,
+ .d_close = mrsas_close,
+ .d_read = mrsas_read,
+ .d_write = mrsas_write,
+ .d_ioctl = mrsas_ioctl,
+ .d_poll = mrsas_poll,
+ .d_name = "mrsas",
};
MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
-/**
- * In the cdevsw routines, we find our softc by using the si_drv1 member
- * of struct cdev. We set this variable to point to our softc in our
- * attach routine when we create the /dev entry.
+/*
+ * In the cdevsw routines, we find our softc by using the si_drv1 member of
+ * struct cdev. We set this variable to point to our softc in our attach
+ * routine when we create the /dev entry.
*/
int
mrsas_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
{
- struct mrsas_softc *sc;
+ struct mrsas_softc *sc;
- sc = dev->si_drv1;
- return (0);
+ sc = dev->si_drv1;
+ return (0);
}
int
mrsas_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
{
- struct mrsas_softc *sc;
+ struct mrsas_softc *sc;
- sc = dev->si_drv1;
- return (0);
+ sc = dev->si_drv1;
+ return (0);
}
int
mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
{
- struct mrsas_softc *sc;
+ struct mrsas_softc *sc;
- sc = dev->si_drv1;
- return (0);
+ sc = dev->si_drv1;
+ return (0);
}
int
mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
{
- struct mrsas_softc *sc;
+ struct mrsas_softc *sc;
- sc = dev->si_drv1;
- return (0);
+ sc = dev->si_drv1;
+ return (0);
}
-/**
- * Register Read/Write Functions
+/*
+ * Register Read/Write Functions
*
*/
void
mrsas_write_reg(struct mrsas_softc *sc, int offset,
- u_int32_t value)
+ u_int32_t value)
{
- bus_space_tag_t bus_tag = sc->bus_tag;
- bus_space_handle_t bus_handle = sc->bus_handle;
+ bus_space_tag_t bus_tag = sc->bus_tag;
+ bus_space_handle_t bus_handle = sc->bus_handle;
- bus_space_write_4(bus_tag, bus_handle, offset, value);
+ bus_space_write_4(bus_tag, bus_handle, offset, value);
}
u_int32_t
mrsas_read_reg(struct mrsas_softc *sc, int offset)
{
- bus_space_tag_t bus_tag = sc->bus_tag;
- bus_space_handle_t bus_handle = sc->bus_handle;
+ bus_space_tag_t bus_tag = sc->bus_tag;
+ bus_space_handle_t bus_handle = sc->bus_handle;
- return((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
+ return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
}
-/**
- * Interrupt Disable/Enable/Clear Functions
+/*
+ * Interrupt Disable/Enable/Clear Functions
*
*/
-void mrsas_disable_intr(struct mrsas_softc *sc)
+void
+mrsas_disable_intr(struct mrsas_softc *sc)
{
- u_int32_t mask = 0xFFFFFFFF;
- u_int32_t status;
+ u_int32_t mask = 0xFFFFFFFF;
+ u_int32_t status;
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
- /* Dummy read to force pci flush */
- status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
+ /* Dummy read to force pci flush */
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
}
-void mrsas_enable_intr(struct mrsas_softc *sc)
+void
+mrsas_enable_intr(struct mrsas_softc *sc)
{
- u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
- u_int32_t status;
+ u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
+ u_int32_t status;
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
- status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
- status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
}
-static int mrsas_clear_intr(struct mrsas_softc *sc)
+static int
+mrsas_clear_intr(struct mrsas_softc *sc)
{
- u_int32_t status, fw_status, fw_state;
+ u_int32_t status, fw_status, fw_state;
- /* Read received interrupt */
- status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
+ /* Read received interrupt */
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
- /* If FW state change interrupt is received, write to it again to clear */
- if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
- fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
- outbound_scratch_pad));
- fw_state = fw_status & MFI_STATE_MASK;
- if (fw_state == MFI_STATE_FAULT) {
- device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
- if(sc->ocr_thread_active)
- wakeup(&sc->ocr_chan);
- }
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
- mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
- return(1);
- }
-
- /* Not our interrupt, so just return */
- if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
- return(0);
+ /*
+ * If FW state change interrupt is received, write to it again to
+ * clear
+ */
+ if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
+ fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad));
+ fw_state = fw_status & MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT) {
+ device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
+ if (sc->ocr_thread_active)
+ wakeup(&sc->ocr_chan);
+ }
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
+ mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
+ return (1);
+ }
+ /* Not our interrupt, so just return */
+ if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
+ return (0);
- /* We got a reply interrupt */
- return(1);
+ /* We got a reply interrupt */
+ return (1);
}
-/**
- * PCI Support Functions
+/*
+ * PCI Support Functions
*
*/
-static struct mrsas_ident * mrsas_find_ident(device_t dev)
+static struct mrsas_ident *
+mrsas_find_ident(device_t dev)
{
- struct mrsas_ident *pci_device;
+ struct mrsas_ident *pci_device;
- for (pci_device=device_table; pci_device->vendor != 0; pci_device++)
- {
- if ((pci_device->vendor == pci_get_vendor(dev)) &&
- (pci_device->device == pci_get_device(dev)) &&
- ((pci_device->subvendor == pci_get_subvendor(dev)) ||
- (pci_device->subvendor == 0xffff)) &&
- ((pci_device->subdevice == pci_get_subdevice(dev)) ||
- (pci_device->subdevice == 0xffff)))
- return (pci_device);
- }
- return (NULL);
+ for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
+ if ((pci_device->vendor == pci_get_vendor(dev)) &&
+ (pci_device->device == pci_get_device(dev)) &&
+ ((pci_device->subvendor == pci_get_subvendor(dev)) ||
+ (pci_device->subvendor == 0xffff)) &&
+ ((pci_device->subdevice == pci_get_subdevice(dev)) ||
+ (pci_device->subdevice == 0xffff)))
+ return (pci_device);
+ }
+ return (NULL);
}
-static int mrsas_probe(device_t dev)
+static int
+mrsas_probe(device_t dev)
{
- static u_int8_t first_ctrl = 1;
- struct mrsas_ident *id;
+ static u_int8_t first_ctrl = 1;
+ struct mrsas_ident *id;
- if ((id = mrsas_find_ident(dev)) != NULL) {
- if (first_ctrl) {
- printf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n", MRSAS_VERSION);
- first_ctrl = 0;
- }
- device_set_desc(dev, id->desc);
- /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
- return (-30);
- }
- return (ENXIO);
+ if ((id = mrsas_find_ident(dev)) != NULL) {
+ if (first_ctrl) {
+ printf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n",
+ MRSAS_VERSION);
+ first_ctrl = 0;
+ }
+ device_set_desc(dev, id->desc);
+ /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
+ return (-30);
+ }
+ return (ENXIO);
}
-/**
- * mrsas_setup_sysctl: setup sysctl values for mrsas
- * input: Adapter instance soft state
+/*
+ * mrsas_setup_sysctl: setup sysctl values for mrsas
+ * input: Adapter instance soft state
*
* Setup sysctl entries for mrsas driver.
*/
static void
mrsas_setup_sysctl(struct mrsas_softc *sc)
{
- struct sysctl_ctx_list *sysctl_ctx = NULL;
- struct sysctl_oid *sysctl_tree = NULL;
- char tmpstr[80], tmpstr2[80];
-
- /*
- * Setup the sysctl variable so the user can change the debug level
- * on the fly.
- */
- snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
- device_get_unit(sc->mrsas_dev));
- snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
-
- sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
- if (sysctl_ctx != NULL)
- sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
-
- if (sysctl_tree == NULL) {
- sysctl_ctx_init(&sc->sysctl_ctx);
- sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
- SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
- CTLFLAG_RD, 0, tmpstr);
- if (sc->sysctl_tree == NULL)
- return;
- sysctl_ctx = &sc->sysctl_ctx;
- sysctl_tree = sc->sysctl_tree;
- }
- SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
- OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
- "Disable the use of OCR");
-
- SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
- OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
- strlen(MRSAS_VERSION), "driver version");
-
- SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
- OID_AUTO, "reset_count", CTLFLAG_RD,
- &sc->reset_count, 0, "number of ocr from start of the day");
-
- SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
- OID_AUTO, "fw_outstanding", CTLFLAG_RD,
- &sc->fw_outstanding, 0, "FW outstanding commands");
+ struct sysctl_ctx_list *sysctl_ctx = NULL;
+ struct sysctl_oid *sysctl_tree = NULL;
+ char tmpstr[80], tmpstr2[80];
+
+ /*
+ * Setup the sysctl variable so the user can change the debug level
+ * on the fly.
+ */
+ snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
+ device_get_unit(sc->mrsas_dev));
+ snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
+
+ sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
+ if (sysctl_ctx != NULL)
+ sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
+
+ if (sysctl_tree == NULL) {
+ sysctl_ctx_init(&sc->sysctl_ctx);
+ sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
+ SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
+ CTLFLAG_RD, 0, tmpstr);
+ if (sc->sysctl_tree == NULL)
+ return;
+ sysctl_ctx = &sc->sysctl_ctx;
+ sysctl_tree = sc->sysctl_tree;
+ }
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
+ "Disable the use of OCR");
+
+ SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
+ strlen(MRSAS_VERSION), "driver version");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
- OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
- &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
+ OID_AUTO, "reset_count", CTLFLAG_RD,
+ &sc->reset_count, 0, "number of ocr from start of the day");
- SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
- OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
- "Driver debug level");
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "fw_outstanding", CTLFLAG_RD,
+ &sc->fw_outstanding, 0, "FW outstanding commands");
- SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
- OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
- 0, "Driver IO timeout value in mili-second.");
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
+ &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
+
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
+ "Driver debug level");
- SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
- OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
- &sc->mrsas_fw_fault_check_delay,
- 0, "FW fault check thread delay in seconds. <default is 1 sec>");
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
+ 0, "Driver IO timeout value in mili-second.");
- SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
- OID_AUTO, "reset_in_progress", CTLFLAG_RD,
- &sc->reset_in_progress, 0, "ocr in progress status");
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
+ &sc->mrsas_fw_fault_check_delay,
+ 0, "FW fault check thread delay in seconds. <default is 1 sec>");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "reset_in_progress", CTLFLAG_RD,
+ &sc->reset_in_progress, 0, "ocr in progress status");
}
-/**
- * mrsas_get_tunables: get tunable parameters.
- * input: Adapter instance soft state
+/*
+ * mrsas_get_tunables: get tunable parameters.
+ * input: Adapter instance soft state
*
* Get tunable parameters. This will help to debug driver at boot time.
*/
static void
mrsas_get_tunables(struct mrsas_softc *sc)
{
- char tmpstr[80];
+ char tmpstr[80];
- /* XXX default to some debugging for now */
- sc->mrsas_debug = MRSAS_FAULT;
- sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
- sc->mrsas_fw_fault_check_delay = 1;
- sc->reset_count = 0;
- sc->reset_in_progress = 0;
+ /* XXX default to some debugging for now */
+ sc->mrsas_debug = MRSAS_FAULT;
+ sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
+ sc->mrsas_fw_fault_check_delay = 1;
+ sc->reset_count = 0;
+ sc->reset_in_progress = 0;
- /*
- * Grab the global variables.
- */
- TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
+ /*
+ * Grab the global variables.
+ */
+ TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
- /* Grab the unit-instance variables */
- snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
- device_get_unit(sc->mrsas_dev));
- TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
+ /* Grab the unit-instance variables */
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
+ device_get_unit(sc->mrsas_dev));
+ TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
}
-/**
- * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
- * Used to get sequence number at driver load time.
- * input: Adapter soft state
+/*
+ * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
+ * Used to get sequence number at driver load time.
+ * input: Adapter soft state
*
* Allocates DMAable memory for the event log info internal command.
*/
-int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
-{
- int el_info_size;
-
- /* Allocate get event log info command */
- el_info_size = sizeof(struct mrsas_evt_log_info);
- if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
- 1, 0, // algnmnt, boundary
- BUS_SPACE_MAXADDR_32BIT,// lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- el_info_size, // maxsize
- 1, // msegments
- el_info_size, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- NULL, NULL, // lockfunc, lockarg
- &sc->el_info_tag)) {
- device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
- return (ENOMEM);
- }
- if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
- BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
- device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
- return (ENOMEM);
- }
- if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
- sc->el_info_mem, el_info_size, mrsas_addr_cb,
- &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
- device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
- return (ENOMEM);
- }
-
- memset(sc->el_info_mem, 0, el_info_size);
- return (0);
-}
-
-/**
- * mrsas_free_evt_info_cmd: Free memory for Event log info command
- * input: Adapter soft state
+int
+mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
+{
+ int el_info_size;
+
+ /* Allocate get event log info command */
+ el_info_size = sizeof(struct mrsas_evt_log_info);
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 1, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ el_info_size,
+ 1,
+ el_info_size,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &sc->el_info_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
+ BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
+ sc->el_info_mem, el_info_size, mrsas_addr_cb,
+ &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
+ return (ENOMEM);
+ }
+ memset(sc->el_info_mem, 0, el_info_size);
+ return (0);
+}
+
+/*
+ * mrsas_free_evt_info_cmd: Free memory for Event log info command
+ * input: Adapter soft state
*
* Deallocates memory for the event log info internal command.
*/
-void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
+void
+mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
{
- if (sc->el_info_phys_addr)
- bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
- if (sc->el_info_mem != NULL)
- bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
- if (sc->el_info_tag != NULL)
- bus_dma_tag_destroy(sc->el_info_tag);
+ if (sc->el_info_phys_addr)
+ bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
+ if (sc->el_info_mem != NULL)
+ bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
+ if (sc->el_info_tag != NULL)
+ bus_dma_tag_destroy(sc->el_info_tag);
}
-/**
+/*
* mrsas_get_seq_num: Get latest event sequence number
* @sc: Adapter soft state
* @eli: Firmware event log sequence number information.
- * Firmware maintains a log of all events in a non-volatile area.
- * Driver get the sequence number using DCMD
- * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
+ *
+ * Firmware maintains a log of all events in a non-volatile area.
+ * Driver get the sequence number using DCMD
+ * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
*/
static int
mrsas_get_seq_num(struct mrsas_softc *sc,
- struct mrsas_evt_log_info *eli)
+ struct mrsas_evt_log_info *eli)
{
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
- cmd = mrsas_get_mfi_cmd(sc);
+ cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
return -ENOMEM;
}
-
dcmd = &cmd->frame->dcmd;
if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
@@ -536,7 +556,6 @@ mrsas_get_seq_num(struct mrsas_softc *sc,
mrsas_release_mfi_cmd(cmd);
return -ENOMEM;
}
-
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
@@ -553,8 +572,8 @@ mrsas_get_seq_num(struct mrsas_softc *sc,
mrsas_issue_blocked_cmd(sc, cmd);
/*
- * Copy the data back into callers buffer
- */
+ * Copy the data back into callers buffer
+ */
memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
mrsas_free_evt_log_info_cmd(sc);
mrsas_release_mfi_cmd(cmd);
@@ -563,18 +582,19 @@ mrsas_get_seq_num(struct mrsas_softc *sc,
}
-/**
+/*
* mrsas_register_aen: Register for asynchronous event notification
- * @sc: Adapter soft state
- * @seq_num: Starting sequence number
- * @class_locale: Class of the event
- * This function subscribes for events beyond the @seq_num
- * and type @class_locale.
- *
- * */
+ * @sc: Adapter soft state
+ * @seq_num: Starting sequence number
+ * @class_locale: Class of the event
+ *
+ * This function subscribes for events beyond the @seq_num
+ * and type @class_locale.
+ *
+ */
static int
mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
- u_int32_t class_locale_word)
+ u_int32_t class_locale_word)
{
int ret_val;
struct mrsas_mfi_cmd *cmd;
@@ -582,17 +602,16 @@ mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
union mrsas_evt_class_locale curr_aen;
union mrsas_evt_class_locale prev_aen;
-/*
- * If there an AEN pending already (aen_cmd), check if the
- * class_locale of that pending AEN is inclusive of the new
- * AEN request we currently have. If it is, then we don't have
- * to do anything. In other words, whichever events the current
- * AEN request is subscribing to, have already been subscribed
- * to.
- * If the old_cmd is _not_ inclusive, then we have to abort
- * that command, form a class_locale that is superset of both
- * old and current and re-issue to the FW
- * */
+ /*
+ * If there an AEN pending already (aen_cmd), check if the
+ * class_locale of that pending AEN is inclusive of the new AEN
+ * request we currently have. If it is, then we don't have to do
+ * anything. In other words, whichever events the current AEN request
+ * is subscribing to, have already been subscribed to. If the old_cmd
+ * is _not_ inclusive, then we have to abort that command, form a
+ * class_locale that is superset of both old and current and re-issue
+ * to the FW
+ */
curr_aen.word = class_locale_word;
@@ -600,21 +619,21 @@ mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
-/*
- * A class whose enum value is smaller is inclusive of all
- * higher values. If a PROGRESS (= -1) was previously
- * registered, then a new registration requests for higher
- * classes need not be sent to FW. They are automatically
- * included.
- * Locale numbers don't have such hierarchy. They are bitmap values
- */
+ /*
+ * A class whose enum value is smaller is inclusive of all
+ * higher values. If a PROGRESS (= -1) was previously
+ * registered, then a new registration requests for higher
+ * classes need not be sent to FW. They are automatically
+ * included. Locale numbers don't have such hierarchy. They
+ * are bitmap values
+ */
if ((prev_aen.members.class <= curr_aen.members.class) &&
- !((prev_aen.members.locale & curr_aen.members.locale) ^
- curr_aen.members.locale)) {
+ !((prev_aen.members.locale & curr_aen.members.locale) ^
+ curr_aen.members.locale)) {
/*
- * Previously issued event registration includes
- * current request. Nothing to do.
- */
+ * Previously issued event registration includes
+ * current request. Nothing to do.
+ */
return 0;
} else {
curr_aen.members.locale |= prev_aen.members.locale;
@@ -624,17 +643,16 @@ mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
sc->aen_cmd->abort_aen = 1;
ret_val = mrsas_issue_blocked_abort_cmd(sc,
- sc->aen_cmd);
+ sc->aen_cmd);
if (ret_val) {
printf("mrsas: Failed to abort "
- "previous AEN command\n");
+ "previous AEN command\n");
return ret_val;
}
}
}
-
- cmd = mrsas_get_mfi_cmd(sc);
+ cmd = mrsas_get_mfi_cmd(sc);
if (!cmd)
return -ENOMEM;
@@ -643,9 +661,9 @@ mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
-/*
- * Prepare DCMD for aen registration
- */
+ /*
+ * Prepare DCMD for aen registration
+ */
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
@@ -657,2895 +675,3236 @@ mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
dcmd->mbox.w[0] = seq_num;
- sc->last_seq_num = seq_num;
+ sc->last_seq_num = seq_num;
dcmd->mbox.w[1] = curr_aen.word;
- dcmd->sgl.sge32[0].phys_addr = (u_int32_t) sc->evt_detail_phys_addr;
+ dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
if (sc->aen_cmd != NULL) {
mrsas_release_mfi_cmd(cmd);
return 0;
}
-
/*
- * Store reference to the cmd used to register for AEN. When an
- * application wants us to register for AEN, we have to abort this
- * cmd and re-register with a new EVENT LOCALE supplied by that app
- */
+ * Store reference to the cmd used to register for AEN. When an
+ * application wants us to register for AEN, we have to abort this
+ * cmd and re-register with a new EVENT LOCALE supplied by that app
+ */
sc->aen_cmd = cmd;
/*
- Issue the aen registration frame
- */
- if (mrsas_issue_dcmd(sc, cmd)){
- device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
- return(1);
- }
-
+ * Issue the aen registration frame
+ */
+ if (mrsas_issue_dcmd(sc, cmd)) {
+ device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
+ return (1);
+ }
return 0;
}
-/**
- * mrsas_start_aen - Subscribes to AEN during driver load time
- * @instance: Adapter soft state
+
+/*
+ * mrsas_start_aen: Subscribes to AEN during driver load time
+ * @instance: Adapter soft state
*/
-static int mrsas_start_aen(struct mrsas_softc *sc)
+static int
+mrsas_start_aen(struct mrsas_softc *sc)
{
struct mrsas_evt_log_info eli;
union mrsas_evt_class_locale class_locale;
- /* Get the latest sequence number from FW*/
-
+ /* Get the latest sequence number from FW */
+
memset(&eli, 0, sizeof(eli));
if (mrsas_get_seq_num(sc, &eli))
return -1;
- /* Register AEN with FW for latest sequence number plus 1*/
+ /* Register AEN with FW for latest sequence number plus 1 */
class_locale.members.reserved = 0;
class_locale.members.locale = MR_EVT_LOCALE_ALL;
class_locale.members.class = MR_EVT_CLASS_DEBUG;
return mrsas_register_aen(sc, eli.newest_seq_num + 1,
- class_locale.word);
-}
-
-/**
- * mrsas_attach: PCI entry point
- * input: device struct pointer
- *
- * Performs setup of PCI and registers, initializes mutexes and
- * linked lists, registers interrupts and CAM, and initializes
- * the adapter/controller to its proper state.
- */
-static int mrsas_attach(device_t dev)
-{
- struct mrsas_softc *sc = device_get_softc(dev);
- uint32_t cmd, bar, error;
-
- /* Look up our softc and initialize its fields. */
- sc->mrsas_dev = dev;
- sc->device_id = pci_get_device(dev);
-
- mrsas_get_tunables(sc);
-
- /*
- * Set up PCI and registers
- */
- cmd = pci_read_config(dev, PCIR_COMMAND, 2);
- if ( (cmd & PCIM_CMD_PORTEN) == 0) {
- return (ENXIO);
- }
- /* Force the busmaster enable bit on. */
- cmd |= PCIM_CMD_BUSMASTEREN;
- pci_write_config(dev, PCIR_COMMAND, cmd, 2);
-
- //bar = pci_read_config(dev, MRSAS_PCI_BAR0, 4);
- bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
-
- sc->reg_res_id = MRSAS_PCI_BAR1; /* BAR1 offset */
- if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
- &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
- == NULL) {
- device_printf(dev, "Cannot allocate PCI registers\n");
- goto attach_fail;
- }
- sc->bus_tag = rman_get_bustag(sc->reg_res);
- sc->bus_handle = rman_get_bushandle(sc->reg_res);
-
- /* Intialize mutexes */
- mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
- mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
- mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
- mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
- mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
- mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
- mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
- mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
-
- /* Intialize linked list */
- TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
- TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
-
- atomic_set(&sc->fw_outstanding,0);
+ class_locale.word);
- sc->io_cmds_highwater = 0;
+}
+
+/*
+ * mrsas_setup_msix: Allocate MSI-x vectors
+ * @sc: adapter soft state
+ */
+static int
+mrsas_setup_msix(struct mrsas_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < sc->msix_vectors; i++) {
+ sc->irq_context[i].sc = sc;
+ sc->irq_context[i].MSIxIndex = i;
+ sc->irq_id[i] = i + 1;
+ sc->mrsas_irq[i] = bus_alloc_resource_any
+ (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
+ ,RF_ACTIVE);
+ if (sc->mrsas_irq[i] == NULL) {
+ device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
+ goto irq_alloc_failed;
+ }
+ if (bus_setup_intr(sc->mrsas_dev,
+ sc->mrsas_irq[i],
+ INTR_MPSAFE | INTR_TYPE_CAM,
+ NULL, mrsas_isr, &sc->irq_context[i],
+ &sc->intr_handle[i])) {
+ device_printf(sc->mrsas_dev,
+ "Cannot set up MSI-x interrupt handler\n");
+ goto irq_alloc_failed;
+ }
+ }
+ return SUCCESS;
- /* Create a /dev entry for this device. */
- sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT,
- GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
- device_get_unit(dev));
- if (sc->mrsas_cdev)
- sc->mrsas_cdev->si_drv1 = sc;
+irq_alloc_failed:
+ mrsas_teardown_intr(sc);
+ return (FAIL);
+}
- sc->adprecovery = MRSAS_HBA_OPERATIONAL;
- sc->UnevenSpanSupport = 0;
+/*
+ * mrsas_allocate_msix: Setup MSI-x vectors
+ * @sc: adapter soft state
+ */
+static int
+mrsas_allocate_msix(struct mrsas_softc *sc)
+{
+ if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
+ device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
+ " of vectors\n", sc->msix_vectors);
+ } else {
+ device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
+ goto irq_alloc_failed;
+ }
+ return SUCCESS;
- /* Initialize Firmware */
- if (mrsas_init_fw(sc) != SUCCESS) {
- goto attach_fail_fw;
- }
+irq_alloc_failed:
+ mrsas_teardown_intr(sc);
+ return (FAIL);
+}
- /* Register SCSI mid-layer */
- if ((mrsas_cam_attach(sc) != SUCCESS)) {
- goto attach_fail_cam;
- }
+/*
+ * mrsas_attach: PCI entry point
+ * input: pointer to device struct
+ *
+ * Performs setup of PCI and registers, initializes mutexes and linked lists,
+ * registers interrupts and CAM, and initializes the adapter/controller to
+ * its proper state.
+ */
+static int
+mrsas_attach(device_t dev)
+{
+ struct mrsas_softc *sc = device_get_softc(dev);
+ uint32_t cmd, bar, error;
- /* Register IRQs */
- if (mrsas_setup_irq(sc) != SUCCESS) {
- goto attach_fail_irq;
- }
+ /* Look up our softc and initialize its fields. */
+ sc->mrsas_dev = dev;
+ sc->device_id = pci_get_device(dev);
- /* Enable Interrupts */
- mrsas_enable_intr(sc);
+ mrsas_get_tunables(sc);
- error = mrsas_kproc_create(mrsas_ocr_thread, sc,
- &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
- device_get_unit(sc->mrsas_dev));
- if (error) {
- printf("Error %d starting rescan thread\n", error);
- goto attach_fail_irq;
- }
+ /*
+ * Set up PCI and registers
+ */
+ cmd = pci_read_config(dev, PCIR_COMMAND, 2);
+ if ((cmd & PCIM_CMD_PORTEN) == 0) {
+ return (ENXIO);
+ }
+ /* Force the busmaster enable bit on. */
+ cmd |= PCIM_CMD_BUSMASTEREN;
+ pci_write_config(dev, PCIR_COMMAND, cmd, 2);
+
+ bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
+
+ sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
+ if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
+ &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
+ == NULL) {
+ device_printf(dev, "Cannot allocate PCI registers\n");
+ goto attach_fail;
+ }
+ sc->bus_tag = rman_get_bustag(sc->reg_res);
+ sc->bus_handle = rman_get_bushandle(sc->reg_res);
+
+ /* Intialize mutexes */
+ mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
+ mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
+ mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
+ mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
+ mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
+ mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
+ mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
+ mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
+
+ /*
+ * Intialize a counting Semaphore to take care no. of concurrent
+ * IOCTLs
+ */
+ sema_init(&sc->ioctl_count_sema, MRSAS_MAX_MFI_CMDS - 5, IOCTL_SEMA_DESCRIPTION);
- mrsas_setup_sysctl(sc);
-
- /* Initiate AEN (Asynchronous Event Notification)*/
+ /* Intialize linked list */
+ TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
+ TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
+
+ atomic_set(&sc->fw_outstanding, 0);
+
+ sc->io_cmds_highwater = 0;
+
+ /* Create a /dev entry for this device. */
+ sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT,
+ GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
+ device_get_unit(dev));
+ if (device_get_unit(dev) == 0)
+ make_dev_alias(sc->mrsas_cdev, "megaraid_sas_ioctl_node");
+ if (sc->mrsas_cdev)
+ sc->mrsas_cdev->si_drv1 = sc;
+
+ sc->adprecovery = MRSAS_HBA_OPERATIONAL;
+ sc->UnevenSpanSupport = 0;
+
+ sc->msix_enable = 0;
+
+ /* Initialize Firmware */
+ if (mrsas_init_fw(sc) != SUCCESS) {
+ goto attach_fail_fw;
+ }
+ /* Register SCSI mid-layer */
+ if ((mrsas_cam_attach(sc) != SUCCESS)) {
+ goto attach_fail_cam;
+ }
+ /* Register IRQs */
+ if (mrsas_setup_irq(sc) != SUCCESS) {
+ goto attach_fail_irq;
+ }
+ /* Enable Interrupts */
+ mrsas_enable_intr(sc);
+
+ error = mrsas_kproc_create(mrsas_ocr_thread, sc,
+ &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
+ device_get_unit(sc->mrsas_dev));
+ if (error) {
+ printf("Error %d starting rescan thread\n", error);
+ goto attach_fail_irq;
+ }
+ mrsas_setup_sysctl(sc);
+
+ /* Initiate AEN (Asynchronous Event Notification) */
if (mrsas_start_aen(sc)) {
printf("Error: start aen failed\n");
goto fail_start_aen;
}
+ /*
+ * Add this controller to mrsas_mgmt_info structure so that it can be
+ * exported to management applications
+ */
+ if (device_get_unit(dev) == 0)
+ memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
- return (0);
+ mrsas_mgmt_info.count++;
+ mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
+ mrsas_mgmt_info.max_index++;
+
+ return (0);
fail_start_aen:
attach_fail_irq:
- mrsas_teardown_intr(sc);
+ mrsas_teardown_intr(sc);
attach_fail_cam:
- mrsas_cam_detach(sc);
+ mrsas_cam_detach(sc);
attach_fail_fw:
-//attach_fail_raidmap:
- mrsas_free_mem(sc);
- mtx_destroy(&sc->sim_lock);
- mtx_destroy(&sc->aen_lock);
- mtx_destroy(&sc->pci_lock);
- mtx_destroy(&sc->io_lock);
- mtx_destroy(&sc->ioctl_lock);
- mtx_destroy(&sc->mpt_cmd_pool_lock);
- mtx_destroy(&sc->mfi_cmd_pool_lock);
- mtx_destroy(&sc->raidmap_lock);
+ /* if MSIX vector is allocated and FW Init FAILED then release MSIX */
+ if (sc->msix_enable == 1)
+ pci_release_msi(sc->mrsas_dev);
+ mrsas_free_mem(sc);
+ mtx_destroy(&sc->sim_lock);
+ mtx_destroy(&sc->aen_lock);
+ mtx_destroy(&sc->pci_lock);
+ mtx_destroy(&sc->io_lock);
+ mtx_destroy(&sc->ioctl_lock);
+ mtx_destroy(&sc->mpt_cmd_pool_lock);
+ mtx_destroy(&sc->mfi_cmd_pool_lock);
+ mtx_destroy(&sc->raidmap_lock);
+ /* Destroy the counting semaphore created for Ioctl */
+ sema_destroy(&sc->ioctl_count_sema);
attach_fail:
- destroy_dev(sc->mrsas_cdev);
- if (sc->reg_res){
- bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
- sc->reg_res_id, sc->reg_res);
- }
- return (ENXIO);
-}
-
-/**
- * mrsas_detach: De-allocates and teardown resources
- * input: device struct pointer
- *
- * This function is the entry point for device disconnect and detach. It
- * performs memory de-allocations, shutdown of the controller and various
+ destroy_dev(sc->mrsas_cdev);
+ if (sc->reg_res) {
+ bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
+ sc->reg_res_id, sc->reg_res);
+ }
+ return (ENXIO);
+}
+
+/*
+ * mrsas_detach: De-allocates and teardown resources
+ * input: pointer to device struct
+ *
+ * This function is the entry point for device disconnect and detach.
+ * It performs memory de-allocations, shutdown of the controller and various
* teardown and destroy resource functions.
*/
-static int mrsas_detach(device_t dev)
-{
- struct mrsas_softc *sc;
- int i = 0;
-
- sc = device_get_softc(dev);
- sc->remove_in_progress = 1;
- if(sc->ocr_thread_active)
- wakeup(&sc->ocr_chan);
- while(sc->reset_in_progress){
- i++;
- if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
- mrsas_dprint(sc, MRSAS_INFO,
- "[%2d]waiting for ocr to be finished\n",i);
- }
- pause("mr_shutdown", hz);
- }
- i = 0;
- while(sc->ocr_thread_active){
- i++;
- if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
- mrsas_dprint(sc, MRSAS_INFO,
- "[%2d]waiting for "
- "mrsas_ocr thread to quit ocr %d\n",i,
- sc->ocr_thread_active);
- }
- pause("mr_shutdown", hz);
- }
- mrsas_flush_cache(sc);
- mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
- mrsas_disable_intr(sc);
- mrsas_cam_detach(sc);
- mrsas_teardown_intr(sc);
- mrsas_free_mem(sc);
- mtx_destroy(&sc->sim_lock);
- mtx_destroy(&sc->aen_lock);
- mtx_destroy(&sc->pci_lock);
- mtx_destroy(&sc->io_lock);
- mtx_destroy(&sc->ioctl_lock);
- mtx_destroy(&sc->mpt_cmd_pool_lock);
- mtx_destroy(&sc->mfi_cmd_pool_lock);
- mtx_destroy(&sc->raidmap_lock);
- if (sc->reg_res){
- bus_release_resource(sc->mrsas_dev,
- SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
- }
- destroy_dev(sc->mrsas_cdev);
- if (sc->sysctl_tree != NULL)
- sysctl_ctx_free(&sc->sysctl_ctx);
- return (0);
-}
-
-/**
- * mrsas_free_mem: Frees allocated memory
- * input: Adapter instance soft state
- *
+static int
+mrsas_detach(device_t dev)
+{
+ struct mrsas_softc *sc;
+ int i = 0;
+
+ sc = device_get_softc(dev);
+ sc->remove_in_progress = 1;
+
+ /* Destroy the character device so no other IOCTL will be handled */
+ destroy_dev(sc->mrsas_cdev);
+
+ /*
+ * Take the instance off the instance array. Note that we will not
+ * decrement the max_index. We let this array be sparse array
+ */
+ for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
+ if (mrsas_mgmt_info.sc_ptr[i] == sc) {
+ mrsas_mgmt_info.count--;
+ mrsas_mgmt_info.sc_ptr[i] = NULL;
+ break;
+ }
+ }
+
+ if (sc->ocr_thread_active)
+ wakeup(&sc->ocr_chan);
+ while (sc->reset_in_progress) {
+ i++;
+ if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
+ mrsas_dprint(sc, MRSAS_INFO,
+ "[%2d]waiting for ocr to be finished\n", i);
+ }
+ pause("mr_shutdown", hz);
+ }
+ i = 0;
+ while (sc->ocr_thread_active) {
+ i++;
+ if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
+ mrsas_dprint(sc, MRSAS_INFO,
+ "[%2d]waiting for "
+ "mrsas_ocr thread to quit ocr %d\n", i,
+ sc->ocr_thread_active);
+ }
+ pause("mr_shutdown", hz);
+ }
+ mrsas_flush_cache(sc);
+ mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
+ mrsas_disable_intr(sc);
+ mrsas_cam_detach(sc);
+ mrsas_teardown_intr(sc);
+ mrsas_free_mem(sc);
+ mtx_destroy(&sc->sim_lock);
+ mtx_destroy(&sc->aen_lock);
+ mtx_destroy(&sc->pci_lock);
+ mtx_destroy(&sc->io_lock);
+ mtx_destroy(&sc->ioctl_lock);
+ mtx_destroy(&sc->mpt_cmd_pool_lock);
+ mtx_destroy(&sc->mfi_cmd_pool_lock);
+ mtx_destroy(&sc->raidmap_lock);
+
+ /* Wait for all the semaphores to be released */
+ while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5))
+ pause("mr_shutdown", hz);
+
+ /* Destroy the counting semaphore created for Ioctl */
+ sema_destroy(&sc->ioctl_count_sema);
+
+ if (sc->reg_res) {
+ bus_release_resource(sc->mrsas_dev,
+ SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
+ }
+ if (sc->sysctl_tree != NULL)
+ sysctl_ctx_free(&sc->sysctl_ctx);
+
+ return (0);
+}
+
+/*
+ * mrsas_free_mem: Frees allocated memory
+ * input: Adapter instance soft state
+ *
* This function is called from mrsas_detach() to free previously allocated
- * memory.
+ * memory.
*/
-void mrsas_free_mem(struct mrsas_softc *sc)
+void
+mrsas_free_mem(struct mrsas_softc *sc)
{
- int i;
- u_int32_t max_cmd;
- struct mrsas_mfi_cmd *mfi_cmd;
- struct mrsas_mpt_cmd *mpt_cmd;
-
+ int i;
+ u_int32_t max_cmd;
+ struct mrsas_mfi_cmd *mfi_cmd;
+ struct mrsas_mpt_cmd *mpt_cmd;
+
+ /*
+ * Free RAID map memory
+ */
+ for (i = 0; i < 2; i++) {
+ if (sc->raidmap_phys_addr[i])
+ bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
+ if (sc->raidmap_mem[i] != NULL)
+ bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
+ if (sc->raidmap_tag[i] != NULL)
+ bus_dma_tag_destroy(sc->raidmap_tag[i]);
+
+ if (sc->ld_drv_map[i] != NULL)
+ free(sc->ld_drv_map[i], M_MRSAS);
+ }
+
+ /*
+ * Free version buffer memroy
+ */
+ if (sc->verbuf_phys_addr)
+ bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
+ if (sc->verbuf_mem != NULL)
+ bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
+ if (sc->verbuf_tag != NULL)
+ bus_dma_tag_destroy(sc->verbuf_tag);
+
+
+ /*
+ * Free sense buffer memory
+ */
+ if (sc->sense_phys_addr)
+ bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
+ if (sc->sense_mem != NULL)
+ bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
+ if (sc->sense_tag != NULL)
+ bus_dma_tag_destroy(sc->sense_tag);
+
+ /*
+ * Free chain frame memory
+ */
+ if (sc->chain_frame_phys_addr)
+ bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
+ if (sc->chain_frame_mem != NULL)
+ bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
+ if (sc->chain_frame_tag != NULL)
+ bus_dma_tag_destroy(sc->chain_frame_tag);
+
+ /*
+ * Free IO Request memory
+ */
+ if (sc->io_request_phys_addr)
+ bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
+ if (sc->io_request_mem != NULL)
+ bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
+ if (sc->io_request_tag != NULL)
+ bus_dma_tag_destroy(sc->io_request_tag);
+
+ /*
+ * Free Reply Descriptor memory
+ */
+ if (sc->reply_desc_phys_addr)
+ bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
+ if (sc->reply_desc_mem != NULL)
+ bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
+ if (sc->reply_desc_tag != NULL)
+ bus_dma_tag_destroy(sc->reply_desc_tag);
+
+ /*
+ * Free event detail memory
+ */
+ if (sc->evt_detail_phys_addr)
+ bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
+ if (sc->evt_detail_mem != NULL)
+ bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
+ if (sc->evt_detail_tag != NULL)
+ bus_dma_tag_destroy(sc->evt_detail_tag);
+
/*
- * Free RAID map memory
- */
- for (i=0; i < 2; i++)
- {
- if (sc->raidmap_phys_addr[i])
- bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
- if (sc->raidmap_mem[i] != NULL)
- bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
- if (sc->raidmap_tag[i] != NULL)
- bus_dma_tag_destroy(sc->raidmap_tag[i]);
- }
-
- /*
- * Free version buffer memroy
- */
- if (sc->verbuf_phys_addr)
- bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
- if (sc->verbuf_mem != NULL)
- bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
- if (sc->verbuf_tag != NULL)
- bus_dma_tag_destroy(sc->verbuf_tag);
-
-
- /*
- * Free sense buffer memory
- */
- if (sc->sense_phys_addr)
- bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
- if (sc->sense_mem != NULL)
- bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
- if (sc->sense_tag != NULL)
- bus_dma_tag_destroy(sc->sense_tag);
-
- /*
- * Free chain frame memory
- */
- if (sc->chain_frame_phys_addr)
- bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
- if (sc->chain_frame_mem != NULL)
- bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
- if (sc->chain_frame_tag != NULL)
- bus_dma_tag_destroy(sc->chain_frame_tag);
-
- /*
- * Free IO Request memory
- */
- if (sc->io_request_phys_addr)
- bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
- if (sc->io_request_mem != NULL)
- bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
- if (sc->io_request_tag != NULL)
- bus_dma_tag_destroy(sc->io_request_tag);
-
- /*
- * Free Reply Descriptor memory
- */
- if (sc->reply_desc_phys_addr)
- bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
- if (sc->reply_desc_mem != NULL)
- bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
- if (sc->reply_desc_tag != NULL)
- bus_dma_tag_destroy(sc->reply_desc_tag);
-
- /*
- * Free event detail memory
- */
- if (sc->evt_detail_phys_addr)
- bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
- if (sc->evt_detail_mem != NULL)
- bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
- if (sc->evt_detail_tag != NULL)
- bus_dma_tag_destroy(sc->evt_detail_tag);
-
- /*
- * Free MFI frames
- */
+ * Free MFI frames
+ */
if (sc->mfi_cmd_list) {
- for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
- mfi_cmd = sc->mfi_cmd_list[i];
- mrsas_free_frame(sc, mfi_cmd);
+ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
+ mfi_cmd = sc->mfi_cmd_list[i];
+ mrsas_free_frame(sc, mfi_cmd);
}
- }
- if (sc->mficmd_frame_tag != NULL)
- bus_dma_tag_destroy(sc->mficmd_frame_tag);
-
- /*
- * Free MPT internal command list
- */
- max_cmd = sc->max_fw_cmds;
+ }
+ if (sc->mficmd_frame_tag != NULL)
+ bus_dma_tag_destroy(sc->mficmd_frame_tag);
+
+ /*
+ * Free MPT internal command list
+ */
+ max_cmd = sc->max_fw_cmds;
if (sc->mpt_cmd_list) {
- for (i = 0; i < max_cmd; i++) {
- mpt_cmd = sc->mpt_cmd_list[i];
- bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
- free(sc->mpt_cmd_list[i], M_MRSAS);
- }
- free(sc->mpt_cmd_list, M_MRSAS);
- sc->mpt_cmd_list = NULL;
- }
-
- /*
- * Free MFI internal command list
- */
-
- if (sc->mfi_cmd_list) {
- for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
- free(sc->mfi_cmd_list[i], M_MRSAS);
- }
- free(sc->mfi_cmd_list, M_MRSAS);
- sc->mfi_cmd_list = NULL;
+ for (i = 0; i < max_cmd; i++) {
+ mpt_cmd = sc->mpt_cmd_list[i];
+ bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
+ free(sc->mpt_cmd_list[i], M_MRSAS);
+ }
+ free(sc->mpt_cmd_list, M_MRSAS);
+ sc->mpt_cmd_list = NULL;
}
+ /*
+ * Free MFI internal command list
+ */
- /*
- * Free request descriptor memory
- */
- free(sc->req_desc, M_MRSAS);
- sc->req_desc = NULL;
+ if (sc->mfi_cmd_list) {
+ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
+ free(sc->mfi_cmd_list[i], M_MRSAS);
+ }
+ free(sc->mfi_cmd_list, M_MRSAS);
+ sc->mfi_cmd_list = NULL;
+ }
+ /*
+ * Free request descriptor memory
+ */
+ free(sc->req_desc, M_MRSAS);
+ sc->req_desc = NULL;
- /*
- * Destroy parent tag
- */
- if (sc->mrsas_parent_tag != NULL)
- bus_dma_tag_destroy(sc->mrsas_parent_tag);
+ /*
+ * Destroy parent tag
+ */
+ if (sc->mrsas_parent_tag != NULL)
+ bus_dma_tag_destroy(sc->mrsas_parent_tag);
}
-/**
- * mrsas_teardown_intr: Teardown interrupt
- * input: Adapter instance soft state
+/*
+ * mrsas_teardown_intr: Teardown interrupt
+ * input: Adapter instance soft state
*
- * This function is called from mrsas_detach() to teardown and release
- * bus interrupt resourse.
+ * This function is called from mrsas_detach() to teardown and release bus
+ * interrupt resourse.
*/
-void mrsas_teardown_intr(struct mrsas_softc *sc)
+void
+mrsas_teardown_intr(struct mrsas_softc *sc)
{
- if (sc->intr_handle)
- bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq, sc->intr_handle);
- if (sc->mrsas_irq != NULL)
- bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id, sc->mrsas_irq);
- sc->intr_handle = NULL;
+ int i;
+
+ if (!sc->msix_enable) {
+ if (sc->intr_handle[0])
+ bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
+ if (sc->mrsas_irq[0] != NULL)
+ bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
+ sc->irq_id[0], sc->mrsas_irq[0]);
+ sc->intr_handle[0] = NULL;
+ } else {
+ for (i = 0; i < sc->msix_vectors; i++) {
+ if (sc->intr_handle[i])
+ bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
+ sc->intr_handle[i]);
+
+ if (sc->mrsas_irq[i] != NULL)
+ bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
+ sc->irq_id[i], sc->mrsas_irq[i]);
+
+ sc->intr_handle[i] = NULL;
+ }
+ pci_release_msi(sc->mrsas_dev);
+ }
+
}
-/**
- * mrsas_suspend: Suspend entry point
- * input: Device struct pointer
- *
- * This function is the entry point for system suspend from the OS.
+/*
+ * mrsas_suspend: Suspend entry point
+ * input: Device struct pointer
+ *
+ * This function is the entry point for system suspend from the OS.
*/
-static int mrsas_suspend(device_t dev)
+static int
+mrsas_suspend(device_t dev)
{
- struct mrsas_softc *sc;
+ struct mrsas_softc *sc;
- sc = device_get_softc(dev);
- return (0);
+ sc = device_get_softc(dev);
+ return (0);
}
-/**
- * mrsas_resume: Resume entry point
- * input: Device struct pointer
- *
- * This function is the entry point for system resume from the OS.
+/*
+ * mrsas_resume: Resume entry point
+ * input: Device struct pointer
+ *
+ * This function is the entry point for system resume from the OS.
*/
-static int mrsas_resume(device_t dev)
+static int
+mrsas_resume(device_t dev)
{
- struct mrsas_softc *sc;
+ struct mrsas_softc *sc;
- sc = device_get_softc(dev);
- return (0);
+ sc = device_get_softc(dev);
+ return (0);
}
-/**
- * mrsas_ioctl: IOCtl commands entry point.
- *
- * This function is the entry point for IOCtls from the OS. It calls the
+/*
+ * mrsas_ioctl: IOCtl commands entry point.
+ *
+ * This function is the entry point for IOCtls from the OS. It calls the
* appropriate function for processing depending on the command received.
*/
static int
mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
{
- struct mrsas_softc *sc;
- int ret = 0, i = 0;
-
- sc = (struct mrsas_softc *)(dev->si_drv1);
-
- if (sc->remove_in_progress) {
- mrsas_dprint(sc, MRSAS_INFO,
- "Driver remove or shutdown called.\n");
- return ENOENT;
- }
-
- mtx_lock_spin(&sc->ioctl_lock);
- if (!sc->reset_in_progress) {
- mtx_unlock_spin(&sc->ioctl_lock);
- goto do_ioctl;
- }
-
- /* Release ioclt_lock, and wait for OCR
- * to be finished */
- mtx_unlock_spin(&sc->ioctl_lock);
- while(sc->reset_in_progress){
- i++;
- if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
- mrsas_dprint(sc, MRSAS_INFO,
- "[%2d]waiting for "
- "OCR to be finished %d\n",i,
- sc->ocr_thread_active);
- }
- pause("mr_ioctl", hz);
- }
+ struct mrsas_softc *sc;
+ int ret = 0, i = 0;
+
+ struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
+
+ /* get the Host number & the softc from data sent by the Application */
+ sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
+
+ if ((mrsas_mgmt_info.max_index == user_ioc->host_no) || (sc == NULL)) {
+ printf("Please check the controller number\n");
+ if (sc == NULL)
+ printf("There is NO such Host no. %d\n", user_ioc->host_no);
+
+ return ENOENT;
+ }
+ if (sc->remove_in_progress) {
+ mrsas_dprint(sc, MRSAS_INFO,
+ "Driver remove or shutdown called.\n");
+ return ENOENT;
+ }
+ mtx_lock_spin(&sc->ioctl_lock);
+ if (!sc->reset_in_progress) {
+ mtx_unlock_spin(&sc->ioctl_lock);
+ goto do_ioctl;
+ }
+ mtx_unlock_spin(&sc->ioctl_lock);
+ while (sc->reset_in_progress) {
+ i++;
+ if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
+ mrsas_dprint(sc, MRSAS_INFO,
+ "[%2d]waiting for "
+ "OCR to be finished %d\n", i,
+ sc->ocr_thread_active);
+ }
+ pause("mr_ioctl", hz);
+ }
do_ioctl:
- switch (cmd) {
- case MRSAS_IOC_FIRMWARE_PASS_THROUGH:
- ret = mrsas_passthru(sc, (void *)arg);
- break;
- case MRSAS_IOC_SCAN_BUS:
- ret = mrsas_bus_scan(sc);
- break;
- }
-
- return (ret);
-}
-
-/**
- * mrsas_setup_irq: Set up interrupt.
- * input: Adapter instance soft state
- *
+ switch (cmd) {
+ case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
+#ifdef COMPAT_FREEBSD32
+ case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
+#endif
+ /*
+ * Decrement the Ioctl counting Semaphore before getting an
+ * mfi command
+ */
+ sema_wait(&sc->ioctl_count_sema);
+
+ ret = mrsas_passthru(sc, (void *)arg, cmd);
+
+ /* Increment the Ioctl counting semaphore value */
+ sema_post(&sc->ioctl_count_sema);
+
+ break;
+ case MRSAS_IOC_SCAN_BUS:
+ ret = mrsas_bus_scan(sc);
+ break;
+ default:
+ mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
+ ret = ENOENT;
+ }
+
+ return (ret);
+}
+
+/*
+ * mrsas_poll: poll entry point for mrsas driver fd
+ *
+ * This function is the entry point for poll from the OS. It waits for some AEN
+ * events to be triggered from the controller and notifies back.
+ */
+static int
+mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
+{
+ struct mrsas_softc *sc;
+ int revents = 0;
+
+ sc = dev->si_drv1;
+
+ if (poll_events & (POLLIN | POLLRDNORM)) {
+ if (sc->mrsas_aen_triggered) {
+ revents |= poll_events & (POLLIN | POLLRDNORM);
+ }
+ }
+ if (revents == 0) {
+ if (poll_events & (POLLIN | POLLRDNORM)) {
+ sc->mrsas_poll_waiting = 1;
+ selrecord(td, &sc->mrsas_select);
+ }
+ }
+ return revents;
+}
+
+/*
+ * mrsas_setup_irq: Set up interrupt
+ * input: Adapter instance soft state
+ *
* This function sets up interrupts as a bus resource, with flags indicating
- * resource permitting contemporaneous sharing and for resource to activate
+ * resource permitting contemporaneous sharing and for resource to activate
* atomically.
*/
-static int mrsas_setup_irq(struct mrsas_softc *sc)
+static int
+mrsas_setup_irq(struct mrsas_softc *sc)
+{
+ if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
+ device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
+
+ else {
+ device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
+ sc->irq_context[0].sc = sc;
+ sc->irq_context[0].MSIxIndex = 0;
+ sc->irq_id[0] = 0;
+ sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
+ SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
+ if (sc->mrsas_irq[0] == NULL) {
+ device_printf(sc->mrsas_dev, "Cannot allocate legcay"
+ "interrupt\n");
+ return (FAIL);
+ }
+ if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
+ INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
+ &sc->irq_context[0], &sc->intr_handle[0])) {
+ device_printf(sc->mrsas_dev, "Cannot set up legacy"
+ "interrupt\n");
+ return (FAIL);
+ }
+ }
+ return (0);
+}
+
+/*
+ * mrsas_isr: ISR entry point
+ * input: argument pointer
+ *
+ * This function is the interrupt service routine entry point. There are two
+ * types of interrupts, state change interrupt and response interrupt. If an
+ * interrupt is not ours, we just return.
+ */
+void
+mrsas_isr(void *arg)
{
- sc->irq_id = 0;
- sc->mrsas_irq = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ,
- &sc->irq_id, RF_SHAREABLE | RF_ACTIVE);
- if (sc->mrsas_irq == NULL){
- device_printf(sc->mrsas_dev, "Cannot allocate interrupt\n");
- return (FAIL);
- }
- if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq, INTR_MPSAFE|INTR_TYPE_CAM,
- NULL, mrsas_isr, sc, &sc->intr_handle)) {
- device_printf(sc->mrsas_dev, "Cannot set up interrupt\n");
- return (FAIL);
- }
-
- return (0);
-}
-
-/*
- * mrsas_isr: ISR entry point
- * input: argument pointer
- *
- * This function is the interrupt service routine entry point. There
- * are two types of interrupts, state change interrupt and response
- * interrupt. If an interrupt is not ours, we just return.
- */
-void mrsas_isr(void *arg)
-{
- struct mrsas_softc *sc = (struct mrsas_softc *)arg;
- int status;
-
- /* Clear FW state change interrupt */
- status = mrsas_clear_intr(sc);
-
- /* Not our interrupt */
- if (!status)
- return;
-
- /* If we are resetting, bail */
- if (test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
- printf(" Entered into ISR when OCR is going active. \n");
- mrsas_clear_intr(sc);
- return;
- }
- /* Process for reply request and clear response interrupt */
- if (mrsas_complete_cmd(sc) != SUCCESS)
- mrsas_clear_intr(sc);
-
- return;
-}
-
-/*
- * mrsas_complete_cmd: Process reply request
- * input: Adapter instance soft state
- *
- * This function is called from mrsas_isr() to process reply request and
- * clear response interrupt. Processing of the reply request entails
- * walking through the reply descriptor array for the command request
- * pended from Firmware. We look at the Function field to determine
- * the command type and perform the appropriate action. Before we
- * return, we clear the response interrupt.
- */
-static int mrsas_complete_cmd(struct mrsas_softc *sc)
-{
- Mpi2ReplyDescriptorsUnion_t *desc;
- MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
- MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
- struct mrsas_mpt_cmd *cmd_mpt;
- struct mrsas_mfi_cmd *cmd_mfi;
- u_int8_t arm, reply_descript_type;
- u_int16_t smid, num_completed;
- u_int8_t status, extStatus;
- union desc_value desc_val;
- PLD_LOAD_BALANCE_INFO lbinfo;
- u_int32_t device_id;
- int threshold_reply_count = 0;
-
-
- /* If we have a hardware error, not need to continue */
- if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
- return (DONE);
-
- desc = sc->reply_desc_mem;
- desc += sc->last_reply_idx;
-
- reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
-
- desc_val.word = desc->Words;
- num_completed = 0;
-
- reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
-
- /* Find our reply descriptor for the command and process */
- while((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF))
- {
- smid = reply_desc->SMID;
- cmd_mpt = sc->mpt_cmd_list[smid -1];
- scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *)cmd_mpt->io_request;
-
- status = scsi_io_req->RaidContext.status;
- extStatus = scsi_io_req->RaidContext.exStatus;
-
- switch (scsi_io_req->Function)
- {
- case MPI2_FUNCTION_SCSI_IO_REQUEST : /*Fast Path IO.*/
- device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
- lbinfo = &sc->load_balance_info[device_id];
- if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
- arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1;
- atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
- cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
- }
- //Fall thru and complete IO
- case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
- mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
- mrsas_cmd_done(sc, cmd_mpt);
- scsi_io_req->RaidContext.status = 0;
- scsi_io_req->RaidContext.exStatus = 0;
- atomic_dec(&sc->fw_outstanding);
- break;
- case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
- cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
- mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
- cmd_mpt->flags = 0;
- mrsas_release_mpt_cmd(cmd_mpt);
- break;
- }
-
- sc->last_reply_idx++;
- if (sc->last_reply_idx >= sc->reply_q_depth)
- sc->last_reply_idx = 0;
-
- desc->Words = ~((uint64_t)0x00); /* set it back to all 0xFFFFFFFFs */
- num_completed++;
- threshold_reply_count++;
-
- /* Get the next reply descriptor */
- if (!sc->last_reply_idx)
- desc = sc->reply_desc_mem;
- else
- desc++;
-
- reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
- desc_val.word = desc->Words;
-
- reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
-
- if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
- break;
-
- /*
- * Write to reply post index after completing threshold reply count
- * and still there are more replies in reply queue pending to be
- * completed.
- */
- if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),
- sc->last_reply_idx);
- threshold_reply_count = 0;
- }
- }
-
- /* No match, just return */
- if (num_completed == 0)
- return (DONE);
-
- /* Clear response interrupt */
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),sc->last_reply_idx);
-
- return(0);
-}
-
-/*
- * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
- * input: Adapter instance soft state
+ struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
+ struct mrsas_softc *sc = irq_context->sc;
+ int status = 0;
+
+ if (!sc->msix_vectors) {
+ status = mrsas_clear_intr(sc);
+ if (!status)
+ return;
+ }
+ /* If we are resetting, bail */
+ if (test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
+ printf(" Entered into ISR when OCR is going active. \n");
+ mrsas_clear_intr(sc);
+ return;
+ }
+ /* Process for reply request and clear response interrupt */
+ if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
+ mrsas_clear_intr(sc);
+
+ return;
+}
+
+/*
+ * mrsas_complete_cmd: Process reply request
+ * input: Adapter instance soft state
+ *
+ * This function is called from mrsas_isr() to process reply request and clear
+ * response interrupt. Processing of the reply request entails walking
+ * through the reply descriptor array for the command request pended from
+ * Firmware. We look at the Function field to determine the command type and
+ * perform the appropriate action. Before we return, we clear the response
+ * interrupt.
+ */
+static int
+mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
+{
+ Mpi2ReplyDescriptorsUnion_t *desc;
+ MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
+ MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
+ struct mrsas_mpt_cmd *cmd_mpt;
+ struct mrsas_mfi_cmd *cmd_mfi;
+ u_int8_t arm, reply_descript_type;
+ u_int16_t smid, num_completed;
+ u_int8_t status, extStatus;
+ union desc_value desc_val;
+ PLD_LOAD_BALANCE_INFO lbinfo;
+ u_int32_t device_id;
+ int threshold_reply_count = 0;
+
+
+ /* If we have a hardware error, not need to continue */
+ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
+ return (DONE);
+
+ desc = sc->reply_desc_mem;
+ desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
+ + sc->last_reply_idx[MSIxIndex];
+
+ reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
+
+ desc_val.word = desc->Words;
+ num_completed = 0;
+
+ reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+ /* Find our reply descriptor for the command and process */
+ while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
+ smid = reply_desc->SMID;
+ cmd_mpt = sc->mpt_cmd_list[smid - 1];
+ scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
+
+ status = scsi_io_req->RaidContext.status;
+ extStatus = scsi_io_req->RaidContext.exStatus;
+
+ switch (scsi_io_req->Function) {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */
+ device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
+ lbinfo = &sc->load_balance_info[device_id];
+ if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
+ arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1;
+ atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
+ cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
+ }
+ /* Fall thru and complete IO */
+ case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
+ mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
+ mrsas_cmd_done(sc, cmd_mpt);
+ scsi_io_req->RaidContext.status = 0;
+ scsi_io_req->RaidContext.exStatus = 0;
+ atomic_dec(&sc->fw_outstanding);
+ break;
+ case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */
+ cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
+ mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
+ cmd_mpt->flags = 0;
+ mrsas_release_mpt_cmd(cmd_mpt);
+ break;
+ }
+
+ sc->last_reply_idx[MSIxIndex]++;
+ if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
+ sc->last_reply_idx[MSIxIndex] = 0;
+
+ desc->Words = ~((uint64_t)0x00); /* set it back to all
+ * 0xFFFFFFFFs */
+ num_completed++;
+ threshold_reply_count++;
+
+ /* Get the next reply descriptor */
+ if (!sc->last_reply_idx[MSIxIndex]) {
+ desc = sc->reply_desc_mem;
+ desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
+ } else
+ desc++;
+
+ reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
+ desc_val.word = desc->Words;
+
+ reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+ if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ break;
+
+ /*
+ * Write to reply post index after completing threshold reply
+ * count and still there are more replies in reply queue
+ * pending to be completed.
+ */
+ if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
+ if (sc->msix_enable) {
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY))
+ mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
+ ((MSIxIndex & 0x7) << 24) |
+ sc->last_reply_idx[MSIxIndex]);
+ else
+ mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
+ sc->last_reply_idx[MSIxIndex]);
+ } else
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set,
+ reply_post_host_index), sc->last_reply_idx[0]);
+
+ threshold_reply_count = 0;
+ }
+ }
+
+ /* No match, just return */
+ if (num_completed == 0)
+ return (DONE);
+
+ /* Clear response interrupt */
+ if (sc->msix_enable) {
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY)) {
+ mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
+ ((MSIxIndex & 0x7) << 24) |
+ sc->last_reply_idx[MSIxIndex]);
+ } else
+ mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
+ sc->last_reply_idx[MSIxIndex]);
+ } else
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set,
+ reply_post_host_index), sc->last_reply_idx[0]);
+
+ return (0);
+}
+
+/*
+ * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
+ * input: Adapter instance soft state
*
* This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
- * It checks the command status and maps the appropriate CAM status for the CCB.
- */
-void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
-{
- struct mrsas_softc *sc = cmd->sc;
- u_int8_t *sense_data;
-
- switch (status) {
- case MFI_STAT_OK:
- cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
- break;
- case MFI_STAT_SCSI_IO_FAILED:
- case MFI_STAT_SCSI_DONE_WITH_ERROR:
- cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
- sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
- if (sense_data) {
- /* For now just copy 18 bytes back */
- memcpy(sense_data, cmd->sense, 18);
- cmd->ccb_ptr->csio.sense_len = 18;
- cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
- }
- break;
- case MFI_STAT_LD_OFFLINE:
- case MFI_STAT_DEVICE_NOT_FOUND:
- if (cmd->ccb_ptr->ccb_h.target_lun)
- cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
- else
- cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
- break;
- case MFI_STAT_CONFIG_SEQ_MISMATCH:
- /*send status to CAM layer to retry sending command without
- * decrementing retry counter*/
- cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
- break;
- default:
- device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
- cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
- cmd->ccb_ptr->csio.scsi_status = status;
- }
- return;
-}
-
-/*
- * mrsas_alloc_mem: Allocate DMAable memory.
- * input: Adapter instance soft state
- *
- * This function creates the parent DMA tag and allocates DMAable memory.
- * DMA tag describes constraints of DMA mapping. Memory allocated is mapped
- * into Kernel virtual address. Callback argument is physical memory address.
- */
-static int mrsas_alloc_mem(struct mrsas_softc *sc)
-{
- u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
- chain_frame_size, evt_detail_size;
-
- /*
- * Allocate parent DMA tag
- */
- if (bus_dma_tag_create(NULL, /* parent */
- 1, /* alignment */
- 0, /* boundary */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MRSAS_MAX_IO_SIZE,/* maxsize */
- MRSAS_MAX_SGL, /* nsegments */
- MRSAS_MAX_IO_SIZE,/* maxsegsize */
- 0, /* flags */
- NULL, NULL, /* lockfunc, lockarg */
- &sc->mrsas_parent_tag /* tag */
- )) {
- device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
- return(ENOMEM);
- }
-
- /*
- * Allocate for version buffer
- */
- verbuf_size = MRSAS_MAX_NAME_LENGTH*(sizeof(bus_addr_t));
- if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
- 1, 0, // algnmnt, boundary
- BUS_SPACE_MAXADDR_32BIT,// lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- verbuf_size, // maxsize
- 1, // msegments
- verbuf_size, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- NULL, NULL, // lockfunc, lockarg
- &sc->verbuf_tag)) {
- device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
- return (ENOMEM);
- }
- if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
- BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
- device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
- return (ENOMEM);
- }
- bzero(sc->verbuf_mem, verbuf_size);
- if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
- verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)){
- device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
- return(ENOMEM);
- }
-
- /*
- * Allocate IO Request Frames
- */
- io_req_size = sc->io_frames_alloc_sz;
- if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
- 16, 0, // algnmnt, boundary
- BUS_SPACE_MAXADDR_32BIT,// lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- io_req_size, // maxsize
- 1, // msegments
- io_req_size, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- NULL, NULL, // lockfunc, lockarg
- &sc->io_request_tag)) {
- device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
- return (ENOMEM);
- }
- if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
- BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
- device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
- return (ENOMEM);
- }
- bzero(sc->io_request_mem, io_req_size);
- if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
- sc->io_request_mem, io_req_size, mrsas_addr_cb,
- &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
- device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
- return (ENOMEM);
- }
-
- /*
- * Allocate Chain Frames
- */
- chain_frame_size = sc->chain_frames_alloc_sz;
- if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
- 4, 0, // algnmnt, boundary
- BUS_SPACE_MAXADDR_32BIT,// lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- chain_frame_size, // maxsize
- 1, // msegments
- chain_frame_size, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- NULL, NULL, // lockfunc, lockarg
- &sc->chain_frame_tag)) {
- device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
- return (ENOMEM);
- }
- if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
- BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
- device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
- return (ENOMEM);
- }
- bzero(sc->chain_frame_mem, chain_frame_size);
- if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
- sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
- &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
- device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
- return (ENOMEM);
- }
-
- /*
- * Allocate Reply Descriptor Array
- */
- reply_desc_size = sc->reply_alloc_sz;
- if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
- 16, 0, // algnmnt, boundary
- BUS_SPACE_MAXADDR_32BIT,// lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- reply_desc_size, // maxsize
- 1, // msegments
- reply_desc_size, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- NULL, NULL, // lockfunc, lockarg
- &sc->reply_desc_tag)) {
- device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
- return (ENOMEM);
- }
- if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
- BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
- device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
- return (ENOMEM);
- }
- if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
- sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
- &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
- device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
- return (ENOMEM);
- }
-
- /*
- * Allocate Sense Buffer Array. Keep in lower 4GB
- */
- sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
- if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
- 64, 0, // algnmnt, boundary
- BUS_SPACE_MAXADDR_32BIT,// lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- sense_size, // maxsize
- 1, // nsegments
- sense_size, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- NULL, NULL, // lockfunc, lockarg
- &sc->sense_tag)) {
- device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
- return (ENOMEM);
- }
- if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
- BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
- device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
- return (ENOMEM);
- }
- if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
- sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
- BUS_DMA_NOWAIT)){
- device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
- return (ENOMEM);
- }
-
- /*
- * Allocate for Event detail structure
- */
- evt_detail_size = sizeof(struct mrsas_evt_detail);
- if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
- 1, 0, // algnmnt, boundary
- BUS_SPACE_MAXADDR_32BIT,// lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- evt_detail_size, // maxsize
- 1, // msegments
- evt_detail_size, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- NULL, NULL, // lockfunc, lockarg
- &sc->evt_detail_tag)) {
- device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
- return (ENOMEM);
- }
- if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
- BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
- device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
- return (ENOMEM);
- }
- bzero(sc->evt_detail_mem, evt_detail_size);
- if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
- sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
- &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
- device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
- return (ENOMEM);
- }
-
-
- /*
- * Create a dma tag for data buffers; size will be the maximum
- * possible I/O size (280kB).
- */
- if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
- 1, // alignment
- 0, // boundary
- BUS_SPACE_MAXADDR, // lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- MRSAS_MAX_IO_SIZE, // maxsize
- MRSAS_MAX_SGL, // nsegments
- MRSAS_MAX_IO_SIZE, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- busdma_lock_mutex, // lockfunc
- &sc->io_lock, // lockfuncarg
- &sc->data_tag)) {
- device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
- return(ENOMEM);
- }
-
- return(0);
-}
-
-/*
- * mrsas_addr_cb: Callback function of bus_dmamap_load()
- * input: callback argument,
- * machine dependent type that describes DMA segments,
- * number of segments,
- * error code.
- *
- * This function is for the driver to receive mapping information resultant
- * of the bus_dmamap_load(). The information is actually not being used,
- * but the address is saved anyway.
+ * It checks the command status and maps the appropriate CAM status for the
+ * CCB.
+ */
+void
+mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
+{
+ struct mrsas_softc *sc = cmd->sc;
+ u_int8_t *sense_data;
+
+ switch (status) {
+ case MFI_STAT_OK:
+ cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case MFI_STAT_SCSI_IO_FAILED:
+ case MFI_STAT_SCSI_DONE_WITH_ERROR:
+ cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+ sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
+ if (sense_data) {
+ /* For now just copy 18 bytes back */
+ memcpy(sense_data, cmd->sense, 18);
+ cmd->ccb_ptr->csio.sense_len = 18;
+ cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
+ }
+ break;
+ case MFI_STAT_LD_OFFLINE:
+ case MFI_STAT_DEVICE_NOT_FOUND:
+ if (cmd->ccb_ptr->ccb_h.target_lun)
+ cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
+ else
+ cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
+ break;
+ case MFI_STAT_CONFIG_SEQ_MISMATCH:
+ cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
+ break;
+ default:
+ device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
+ cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
+ cmd->ccb_ptr->csio.scsi_status = status;
+ }
+ return;
+}
+
+/*
+ * mrsas_alloc_mem: Allocate DMAable memory
+ * input: Adapter instance soft state
+ *
+ * This function creates the parent DMA tag and allocates DMAable memory. DMA
+ * tag describes constraints of DMA mapping. Memory allocated is mapped into
+ * Kernel virtual address. Callback argument is physical memory address.
+ */
+static int
+mrsas_alloc_mem(struct mrsas_softc *sc)
+{
+ u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size,
+ evt_detail_size, count;
+
+ /*
+ * Allocate parent DMA tag
+ */
+ if (bus_dma_tag_create(NULL, /* parent */
+ 1, /* alignment */
+ 0, /* boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MRSAS_MAX_IO_SIZE, /* maxsize */
+ MRSAS_MAX_SGL, /* nsegments */
+ MRSAS_MAX_IO_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->mrsas_parent_tag /* tag */
+ )) {
+ device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
+ return (ENOMEM);
+ }
+ /*
+ * Allocate for version buffer
+ */
+ verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 1, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ verbuf_size,
+ 1,
+ verbuf_size,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &sc->verbuf_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
+ BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->verbuf_mem, verbuf_size);
+ if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
+ verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
+ BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
+ return (ENOMEM);
+ }
+ /*
+ * Allocate IO Request Frames
+ */
+ io_req_size = sc->io_frames_alloc_sz;
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 16, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ io_req_size,
+ 1,
+ io_req_size,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &sc->io_request_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
+ BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->io_request_mem, io_req_size);
+ if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
+ sc->io_request_mem, io_req_size, mrsas_addr_cb,
+ &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
+ return (ENOMEM);
+ }
+ /*
+ * Allocate Chain Frames
+ */
+ chain_frame_size = sc->chain_frames_alloc_sz;
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 4, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ chain_frame_size,
+ 1,
+ chain_frame_size,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &sc->chain_frame_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
+ BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->chain_frame_mem, chain_frame_size);
+ if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
+ sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
+ &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
+ return (ENOMEM);
+ }
+ count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
+ /*
+ * Allocate Reply Descriptor Array
+ */
+ reply_desc_size = sc->reply_alloc_sz * count;
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 16, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ reply_desc_size,
+ 1,
+ reply_desc_size,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &sc->reply_desc_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
+ BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
+ sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
+ &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
+ return (ENOMEM);
+ }
+ /*
+ * Allocate Sense Buffer Array. Keep in lower 4GB
+ */
+ sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 64, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ sense_size,
+ 1,
+ sense_size,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &sc->sense_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
+ BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
+ sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
+ BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
+ return (ENOMEM);
+ }
+ /*
+ * Allocate for Event detail structure
+ */
+ evt_detail_size = sizeof(struct mrsas_evt_detail);
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 1, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ evt_detail_size,
+ 1,
+ evt_detail_size,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &sc->evt_detail_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
+ BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->evt_detail_mem, evt_detail_size);
+ if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
+ sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
+ &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
+ return (ENOMEM);
+ }
+ /*
+ * Create a dma tag for data buffers; size will be the maximum
+ * possible I/O size (280kB).
+ */
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 1,
+ 0,
+ BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ MRSAS_MAX_IO_SIZE,
+ MRSAS_MAX_SGL,
+ MRSAS_MAX_IO_SIZE,
+ BUS_DMA_ALLOCNOW,
+ busdma_lock_mutex,
+ &sc->io_lock,
+ &sc->data_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
+ return (ENOMEM);
+ }
+ return (0);
+}
+
+/*
+ * mrsas_addr_cb: Callback function of bus_dmamap_load()
+ * input: callback argument, machine dependent type
+ * that describes DMA segments, number of segments, error code
+ *
+ * This function is for the driver to receive mapping information resultant of
+ * the bus_dmamap_load(). The information is actually not being used, but the
+ * address is saved anyway.
*/
void
mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
- bus_addr_t *addr;
-
- addr = arg;
- *addr = segs[0].ds_addr;
+ bus_addr_t *addr;
+
+ addr = arg;
+ *addr = segs[0].ds_addr;
}
/*
- * mrsas_setup_raidmap: Set up RAID map.
- * input: Adapter instance soft state
+ * mrsas_setup_raidmap: Set up RAID map.
+ * input: Adapter instance soft state
*
* Allocate DMA memory for the RAID maps and perform setup.
*/
-static int mrsas_setup_raidmap(struct mrsas_softc *sc)
-{
- sc->map_sz = sizeof(MR_FW_RAID_MAP) +
- (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
-
- for (int i=0; i < 2; i++)
- {
- if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
- 4, 0, // algnmnt, boundary
- BUS_SPACE_MAXADDR_32BIT,// lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- sc->map_sz, // maxsize
- 1, // nsegments
- sc->map_sz, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- NULL, NULL, // lockfunc, lockarg
- &sc->raidmap_tag[i])) {
- device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n");
- return (ENOMEM);
- }
- if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i],
- BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
- device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n");
- return (ENOMEM);
- }
- if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
- sc->raidmap_mem[i], sc->map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i],
- BUS_DMA_NOWAIT)){
- device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
- return (ENOMEM);
- }
- if (!sc->raidmap_mem[i]) {
- device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n");
- return (ENOMEM);
- }
- }
-
- if (!mrsas_get_map_info(sc))
- mrsas_sync_map_info(sc);
-
- return (0);
-}
-
-/**
- * mrsas_init_fw: Initialize Firmware
- * input: Adapter soft state
- *
- * Calls transition_to_ready() to make sure Firmware is in operational
- * state and calls mrsas_init_adapter() to send IOC_INIT command to
- * Firmware. It issues internal commands to get the controller info
- * after the IOC_INIT command response is received by Firmware.
- * Note: code relating to get_pdlist, get_ld_list and max_sectors
- * are currently not being used, it is left here as placeholder.
- */
-static int mrsas_init_fw(struct mrsas_softc *sc)
-{
- u_int32_t max_sectors_1;
- u_int32_t max_sectors_2;
- u_int32_t tmp_sectors;
- struct mrsas_ctrl_info *ctrl_info;
-
- int ret, ocr = 0;
-
-
- /* Make sure Firmware is ready */
- ret = mrsas_transition_to_ready(sc, ocr);
- if (ret != SUCCESS) {
- return(ret);
- }
-
- /* Get operational params, sge flags, send init cmd to ctlr */
- if (mrsas_init_adapter(sc) != SUCCESS){
- device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
- return(1);
- }
-
- /* Allocate internal commands for pass-thru */
- if (mrsas_alloc_mfi_cmds(sc) != SUCCESS){
- device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
- return(1);
- }
-
- if (mrsas_setup_raidmap(sc) != SUCCESS) {
- device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
- return(1);
- }
-
- /* For pass-thru, get PD/LD list and controller info */
- memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
- mrsas_get_pd_list(sc);
-
- memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
- mrsas_get_ld_list(sc);
-
- //memset(sc->log_to_span, 0, MRSAS_MAX_LD * sizeof(LD_SPAN_INFO));
-
- ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
-
- /*
- * Compute the max allowed sectors per IO: The controller info has two
- * limits on max sectors. Driver should use the minimum of these two.
- *
- * 1 << stripe_sz_ops.min = max sectors per strip
- *
- * Note that older firmwares ( < FW ver 30) didn't report information
- * to calculate max_sectors_1. So the number ended up as zero always.
- */
- tmp_sectors = 0;
- if (ctrl_info && !mrsas_get_ctrl_info(sc, ctrl_info)) {
- max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
- ctrl_info->max_strips_per_io;
- max_sectors_2 = ctrl_info->max_request_size;
- tmp_sectors = min(max_sectors_1 , max_sectors_2);
- sc->disableOnlineCtrlReset =
- ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
- sc->UnevenSpanSupport =
- ctrl_info->adapterOperations2.supportUnevenSpans;
- if(sc->UnevenSpanSupport) {
- device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n",
- sc->UnevenSpanSupport);
- if (MR_ValidateMapInfo(sc))
- sc->fast_path_io = 1;
- else
- sc->fast_path_io = 0;
-
- }
- }
- sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
-
- if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
- sc->max_sectors_per_req = tmp_sectors;
-
- if (ctrl_info)
- free(ctrl_info, M_MRSAS);
-
- return(0);
-}
-
-/**
- * mrsas_init_adapter: Initializes the adapter/controller
- * input: Adapter soft state
- *
- * Prepares for the issuing of the IOC Init cmd to FW for initializing the
- * ROC/controller. The FW register is read to determined the number of
+static int
+mrsas_setup_raidmap(struct mrsas_softc *sc)
+{
+ int i;
+
+ sc->drv_supported_vd_count =
+ MRSAS_MAX_LD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL;
+ sc->drv_supported_pd_count =
+ MRSAS_MAX_PD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL;
+
+ if (sc->max256vdSupport) {
+ sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
+ sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+ } else {
+ sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
+ sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+ }
+
+#if VD_EXT_DEBUG
+ device_printf(sc->mrsas_dev, "FW supports: max256vdSupport = %s\n",
+ sc->max256vdSupport ? "YES" : "NO");
+ device_printf(sc->mrsas_dev, "FW supports %dVDs %dPDs\n"
+ "DRIVER supports %dVDs %dPDs \n",
+ sc->fw_supported_vd_count, sc->fw_supported_pd_count,
+ sc->drv_supported_vd_count, sc->drv_supported_pd_count);
+#endif
+
+ sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
+ (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
+ sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
+ sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
+ (sizeof(MR_LD_SPAN_MAP) * (sc->drv_supported_vd_count - 1));
+
+ for (i = 0; i < 2; i++) {
+ sc->ld_drv_map[i] =
+ (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
+ /* Do Error handling */
+ if (!sc->ld_drv_map[i]) {
+ device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
+
+ if (i == 1)
+ free(sc->ld_drv_map[0], M_MRSAS);
+ /* ABORT driver initialization */
+ goto ABORT;
+ }
+ }
+
+ sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
+
+ if (sc->max256vdSupport)
+ sc->current_map_sz = sc->new_map_sz;
+ else
+ sc->current_map_sz = sc->old_map_sz;
+
+
+ for (int i = 0; i < 2; i++) {
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 4, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ sc->max_map_sz,
+ 1,
+ sc->max_map_sz,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &sc->raidmap_tag[i])) {
+ device_printf(sc->mrsas_dev,
+ "Cannot allocate raid map tag.\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->raidmap_tag[i],
+ (void **)&sc->raidmap_mem[i],
+ BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
+ device_printf(sc->mrsas_dev,
+ "Cannot allocate raidmap memory.\n");
+ return (ENOMEM);
+ }
+ bzero(sc->raidmap_mem[i], sc->max_map_sz);
+
+ if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
+ sc->raidmap_mem[i], sc->max_map_sz,
+ mrsas_addr_cb, &sc->raidmap_phys_addr[i],
+ BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
+ return (ENOMEM);
+ }
+ if (!sc->raidmap_mem[i]) {
+ device_printf(sc->mrsas_dev,
+ "Cannot allocate memory for raid map.\n");
+ return (ENOMEM);
+ }
+ }
+
+ if (!mrsas_get_map_info(sc))
+ mrsas_sync_map_info(sc);
+
+ return (0);
+
+ABORT:
+ return (1);
+}
+
+/*
+ * mrsas_init_fw: Initialize Firmware
+ * input: Adapter soft state
+ *
+ * Calls transition_to_ready() to make sure Firmware is in operational state and
+ * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It
+ * issues internal commands to get the controller info after the IOC_INIT
+ * command response is received by Firmware. Note: code relating to
+ * get_pdlist, get_ld_list and max_sectors are currently not being used, it
+ * is left here as placeholder.
+ */
+static int
+mrsas_init_fw(struct mrsas_softc *sc)
+{
+
+ int ret, loop, ocr = 0;
+ u_int32_t max_sectors_1;
+ u_int32_t max_sectors_2;
+ u_int32_t tmp_sectors;
+ struct mrsas_ctrl_info *ctrl_info;
+ u_int32_t scratch_pad_2;
+ int msix_enable = 0;
+ int fw_msix_count = 0;
+
+ /* Make sure Firmware is ready */
+ ret = mrsas_transition_to_ready(sc, ocr);
+ if (ret != SUCCESS) {
+ return (ret);
+ }
+ /* MSI-x index 0- reply post host index register */
+ sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
+ /* Check if MSI-X is supported while in ready state */
+ msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
+
+ if (msix_enable) {
+ scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad_2));
+
+ /* Check max MSI-X vectors */
+ if (sc->device_id == MRSAS_TBOLT) {
+ sc->msix_vectors = (scratch_pad_2
+ & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
+ fw_msix_count = sc->msix_vectors;
+ } else {
+ /* Invader/Fury supports 96 MSI-X vectors */
+ sc->msix_vectors = ((scratch_pad_2
+ & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
+ >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
+ fw_msix_count = sc->msix_vectors;
+
+ for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
+ loop++) {
+ sc->msix_reg_offset[loop] =
+ MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
+ (loop * 0x10);
+ }
+ }
+
+ /* Don't bother allocating more MSI-X vectors than cpus */
+ sc->msix_vectors = min(sc->msix_vectors,
+ mp_ncpus);
+
+ /* Allocate MSI-x vectors */
+ if (mrsas_allocate_msix(sc) == SUCCESS)
+ sc->msix_enable = 1;
+ else
+ sc->msix_enable = 0;
+
+ device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
+ "Online CPU %d Current MSIX <%d>\n",
+ fw_msix_count, mp_ncpus, sc->msix_vectors);
+ }
+ if (mrsas_init_adapter(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
+ return (1);
+ }
+ /* Allocate internal commands for pass-thru */
+ if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
+ return (1);
+ }
+ /*
+ * Get the controller info from FW, so that the MAX VD support
+ * availability can be decided.
+ */
+ ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
+ if (!ctrl_info)
+ device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
+
+ if (mrsas_get_ctrl_info(sc, ctrl_info)) {
+ device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
+ }
+ sc->max256vdSupport =
+ (u_int8_t)ctrl_info->adapterOperations3.supportMaxExtLDs;
+
+ if (ctrl_info->max_lds > 64) {
+ sc->max256vdSupport = 1;
+ }
+ if (mrsas_setup_raidmap(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
+ return (1);
+ }
+ /* For pass-thru, get PD/LD list and controller info */
+ memset(sc->pd_list, 0,
+ MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
+ mrsas_get_pd_list(sc);
+
+ memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
+ mrsas_get_ld_list(sc);
+
+ /*
+ * Compute the max allowed sectors per IO: The controller info has
+ * two limits on max sectors. Driver should use the minimum of these
+ * two.
+ *
+ * 1 << stripe_sz_ops.min = max sectors per strip
+ *
+ * Note that older firmwares ( < FW ver 30) didn't report information to
+ * calculate max_sectors_1. So the number ended up as zero always.
+ */
+ tmp_sectors = 0;
+ max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
+ ctrl_info->max_strips_per_io;
+ max_sectors_2 = ctrl_info->max_request_size;
+ tmp_sectors = min(max_sectors_1, max_sectors_2);
+ sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
+
+ if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
+ sc->max_sectors_per_req = tmp_sectors;
+
+ sc->disableOnlineCtrlReset =
+ ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ sc->UnevenSpanSupport =
+ ctrl_info->adapterOperations2.supportUnevenSpans;
+ if (sc->UnevenSpanSupport) {
+ device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
+ sc->UnevenSpanSupport);
+
+ if (MR_ValidateMapInfo(sc))
+ sc->fast_path_io = 1;
+ else
+ sc->fast_path_io = 0;
+ }
+ if (ctrl_info)
+ free(ctrl_info, M_MRSAS);
+
+ return (0);
+}
+
+/*
+ * mrsas_init_adapter: Initializes the adapter/controller
+ * input: Adapter soft state
+ *
+ * Prepares for the issuing of the IOC Init cmd to FW for initializing the
+ * ROC/controller. The FW register is read to determined the number of
* commands that is supported. All memory allocations for IO is based on
- * max_cmd. Appropriate calculations are performed in this function.
+ * max_cmd. Appropriate calculations are performed in this function.
*/
-int mrsas_init_adapter(struct mrsas_softc *sc)
+int
+mrsas_init_adapter(struct mrsas_softc *sc)
{
- uint32_t status;
- u_int32_t max_cmd;
- int ret;
+ uint32_t status;
+ u_int32_t max_cmd;
+ int ret;
+ int i = 0;
+
+ /* Read FW status register */
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
- /* Read FW status register */
- status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
+ /* Get operational params from status register */
+ sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
- /* Get operational params from status register */
- sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
+ /* Decrement the max supported by 1, to correlate with FW */
+ sc->max_fw_cmds = sc->max_fw_cmds - 1;
+ max_cmd = sc->max_fw_cmds;
- /* Decrement the max supported by 1, to correlate with FW */
- sc->max_fw_cmds = sc->max_fw_cmds-1;
- max_cmd = sc->max_fw_cmds;
+ /* Determine allocation size of command frames */
+ sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16);
+ sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
+ sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
+ sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
+ sc->chain_frames_alloc_sz = 1024 * max_cmd;
+ sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+ offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
- /* Determine allocation size of command frames */
- sc->reply_q_depth = ((max_cmd *2 +1 +15)/16*16);
- sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
- sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
- sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
- sc->chain_frames_alloc_sz = 1024 * max_cmd;
- sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
- offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL))/16;
+ sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
+ sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
- sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
- sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
+ /* Used for pass thru MFI frame (DCMD) */
+ sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
- /* Used for pass thru MFI frame (DCMD) */
- sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/16;
+ sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+ sizeof(MPI2_SGE_IO_UNION)) / 16;
- sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
- sizeof(MPI2_SGE_IO_UNION))/16;
+ int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
- sc->last_reply_idx = 0;
+ for (i = 0; i < count; i++)
+ sc->last_reply_idx[i] = 0;
- ret = mrsas_alloc_mem(sc);
- if (ret != SUCCESS)
- return(ret);
+ ret = mrsas_alloc_mem(sc);
+ if (ret != SUCCESS)
+ return (ret);
- ret = mrsas_alloc_mpt_cmds(sc);
- if (ret != SUCCESS)
- return(ret);
+ ret = mrsas_alloc_mpt_cmds(sc);
+ if (ret != SUCCESS)
+ return (ret);
- ret = mrsas_ioc_init(sc);
- if (ret != SUCCESS)
- return(ret);
-
-
- return(0);
+ ret = mrsas_ioc_init(sc);
+ if (ret != SUCCESS)
+ return (ret);
+
+ return (0);
}
-/**
- * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
- * input: Adapter soft state
+/*
+ * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
+ * input: Adapter soft state
*
* Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
*/
-int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
-{
- int ioc_init_size;
-
- /* Allocate IOC INIT command */
- ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
- if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
- 1, 0, // algnmnt, boundary
- BUS_SPACE_MAXADDR_32BIT,// lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- ioc_init_size, // maxsize
- 1, // msegments
- ioc_init_size, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- NULL, NULL, // lockfunc, lockarg
- &sc->ioc_init_tag)) {
- device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
- return (ENOMEM);
- }
- if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
- BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
- device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
- return (ENOMEM);
- }
- bzero(sc->ioc_init_mem, ioc_init_size);
- if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
- sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
- &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
- device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
- return (ENOMEM);
- }
-
- return (0);
-}
-
-/**
- * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
- * input: Adapter soft state
+int
+mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
+{
+ int ioc_init_size;
+
+ /* Allocate IOC INIT command */
+ ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 1, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ ioc_init_size,
+ 1,
+ ioc_init_size,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &sc->ioc_init_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
+ BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
+ return (ENOMEM);
+ }
+ bzero(sc->ioc_init_mem, ioc_init_size);
+ if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
+ sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
+ &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
+ return (ENOMEM);
+ }
+ return (0);
+}
+
+/*
+ * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
+ * input: Adapter soft state
*
* Deallocates memory of the IOC Init cmd.
*/
-void mrsas_free_ioc_cmd(struct mrsas_softc *sc)
+void
+mrsas_free_ioc_cmd(struct mrsas_softc *sc)
{
- if (sc->ioc_init_phys_mem)
- bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
- if (sc->ioc_init_mem != NULL)
- bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
- if (sc->ioc_init_tag != NULL)
- bus_dma_tag_destroy(sc->ioc_init_tag);
+ if (sc->ioc_init_phys_mem)
+ bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
+ if (sc->ioc_init_mem != NULL)
+ bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
+ if (sc->ioc_init_tag != NULL)
+ bus_dma_tag_destroy(sc->ioc_init_tag);
}
-/**
- * mrsas_ioc_init: Sends IOC Init command to FW
- * input: Adapter soft state
+/*
+ * mrsas_ioc_init: Sends IOC Init command to FW
+ * input: Adapter soft state
*
* Issues the IOC Init cmd to FW to initialize the ROC/controller.
*/
-int mrsas_ioc_init(struct mrsas_softc *sc)
-{
- struct mrsas_init_frame *init_frame;
- pMpi2IOCInitRequest_t IOCInitMsg;
- MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
- u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
- bus_addr_t phys_addr;
- int i, retcode = 0;
-
- /* Allocate memory for the IOC INIT command */
- if (mrsas_alloc_ioc_cmd(sc)) {
- device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
- return(1);
- }
-
- IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) +1024);
- IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
- IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
- IOCInitMsg->MsgVersion = MPI2_VERSION;
- IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
- IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
- IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
- IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
- IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
-
- init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
- init_frame->cmd = MFI_CMD_INIT;
- init_frame->cmd_status = 0xFF;
- init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
-
- if (sc->verbuf_mem) {
- snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION)+2,"%s\n",
- MRSAS_VERSION);
- init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
- init_frame->driver_ver_hi = 0;
- }
-
- phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
- init_frame->queue_info_new_phys_addr_lo = phys_addr;
- init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
-
- req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
- req_desc.MFAIo.RequestFlags =
- (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
-
- mrsas_disable_intr(sc);
- mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
- //device_printf(sc->mrsas_dev, "Issuing IOC INIT command to FW.\n");del?
- mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
-
- /*
- * Poll response timer to wait for Firmware response. While this
- * timer with the DELAY call could block CPU, the time interval for
- * this is only 1 millisecond.
- */
- if (init_frame->cmd_status == 0xFF) {
- for (i=0; i < (max_wait * 1000); i++){
- if (init_frame->cmd_status == 0xFF)
- DELAY(1000);
- else
- break;
- }
- }
-
- if (init_frame->cmd_status == 0)
- mrsas_dprint(sc, MRSAS_OCR,
- "IOC INIT response received from FW.\n");
- //device_printf(sc->mrsas_dev, "IOC INIT response received from FW.\n");del?
- else
- {
- if (init_frame->cmd_status == 0xFF)
- device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
- else
- device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
- retcode = 1;
- }
-
- mrsas_free_ioc_cmd(sc);
- return (retcode);
-}
-
-/**
- * mrsas_alloc_mpt_cmds: Allocates the command packets
- * input: Adapter instance soft state
+int
+mrsas_ioc_init(struct mrsas_softc *sc)
+{
+ struct mrsas_init_frame *init_frame;
+ pMpi2IOCInitRequest_t IOCInitMsg;
+ MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
+ u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
+ bus_addr_t phys_addr;
+ int i, retcode = 0;
+
+ /* Allocate memory for the IOC INIT command */
+ if (mrsas_alloc_ioc_cmd(sc)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
+ return (1);
+ }
+ IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
+ IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
+ IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
+ IOCInitMsg->MsgVersion = MPI2_VERSION;
+ IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
+ IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
+ IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
+ IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
+ IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
+ IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
+
+ init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
+ init_frame->cmd = MFI_CMD_INIT;
+ init_frame->cmd_status = 0xFF;
+ init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+ /* driver support Extended MSIX */
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY)) {
+ init_frame->driver_operations.
+ mfi_capabilities.support_additional_msix = 1;
+ }
+ if (sc->verbuf_mem) {
+ snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
+ MRSAS_VERSION);
+ init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
+ init_frame->driver_ver_hi = 0;
+ }
+ init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
+ phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
+ init_frame->queue_info_new_phys_addr_lo = phys_addr;
+ init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
+
+ req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
+ req_desc.MFAIo.RequestFlags =
+ (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+ mrsas_disable_intr(sc);
+ mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
+ mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
+
+ /*
+ * Poll response timer to wait for Firmware response. While this
+ * timer with the DELAY call could block CPU, the time interval for
+ * this is only 1 millisecond.
+ */
+ if (init_frame->cmd_status == 0xFF) {
+ for (i = 0; i < (max_wait * 1000); i++) {
+ if (init_frame->cmd_status == 0xFF)
+ DELAY(1000);
+ else
+ break;
+ }
+ }
+ if (init_frame->cmd_status == 0)
+ mrsas_dprint(sc, MRSAS_OCR,
+ "IOC INIT response received from FW.\n");
+ else {
+ if (init_frame->cmd_status == 0xFF)
+ device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
+ else
+ device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
+ retcode = 1;
+ }
+
+ mrsas_free_ioc_cmd(sc);
+ return (retcode);
+}
+
+/*
+ * mrsas_alloc_mpt_cmds: Allocates the command packets
+ * input: Adapter instance soft state
*
* This function allocates the internal commands for IOs. Each command that is
- * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd.
- * An array is allocated with mrsas_mpt_cmd context. The free commands are
+ * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
+ * array is allocated with mrsas_mpt_cmd context. The free commands are
* maintained in a linked list (cmd pool). SMID value range is from 1 to
* max_fw_cmds.
*/
-int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
-{
- int i, j;
- u_int32_t max_cmd;
- struct mrsas_mpt_cmd *cmd;
- pMpi2ReplyDescriptorsUnion_t reply_desc;
- u_int32_t offset, chain_offset, sense_offset;
- bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
- u_int8_t *io_req_base, *chain_frame_base, *sense_base;
-
- max_cmd = sc->max_fw_cmds;
-
- sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
- if (!sc->req_desc) {
- device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
- return(ENOMEM);
- }
- memset(sc->req_desc, 0, sc->request_alloc_sz);
-
- /*
- * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. Allocate the
- * dynamic array first and then allocate individual commands.
- */
- sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd*)*max_cmd, M_MRSAS, M_NOWAIT);
- if (!sc->mpt_cmd_list) {
- device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
- return(ENOMEM);
- }
- memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *)*max_cmd);
- for (i = 0; i < max_cmd; i++) {
- sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
- M_MRSAS, M_NOWAIT);
- if (!sc->mpt_cmd_list[i]) {
- for (j = 0; j < i; j++)
- free(sc->mpt_cmd_list[j],M_MRSAS);
- free(sc->mpt_cmd_list, M_MRSAS);
- sc->mpt_cmd_list = NULL;
- return(ENOMEM);
- }
- }
-
- io_req_base = (u_int8_t*)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
- io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
- chain_frame_base = (u_int8_t*)sc->chain_frame_mem;
- chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
- sense_base = (u_int8_t*)sc->sense_mem;
- sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
- for (i = 0; i < max_cmd; i++) {
- cmd = sc->mpt_cmd_list[i];
- offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
- chain_offset = 1024 * i;
- sense_offset = MRSAS_SENSE_LEN * i;
- memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
- cmd->index = i + 1;
- cmd->ccb_ptr = NULL;
- callout_init(&cmd->cm_callout, 0);
- cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
- cmd->sc = sc;
- cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
- memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
- cmd->io_request_phys_addr = io_req_base_phys + offset;
- cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
- cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
- cmd->sense = sense_base + sense_offset;
- cmd->sense_phys_addr = sense_base_phys + sense_offset;
- if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
- return(FAIL);
- }
- TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
- }
-
- /* Initialize reply descriptor array to 0xFFFFFFFF */
- reply_desc = sc->reply_desc_mem;
- for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
- reply_desc->Words = MRSAS_ULONG_MAX;
- }
- return(0);
-}
-
-/**
- * mrsas_fire_cmd: Sends command to FW
- * input: Adapter soft state
- * request descriptor address low
- * request descriptor address high
- *
- * This functions fires the command to Firmware by writing to the
- * inbound_low_queue_port and inbound_high_queue_port.
- */
-void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
- u_int32_t req_desc_hi)
-{
- mtx_lock(&sc->pci_lock);
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
- req_desc_lo);
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
- req_desc_hi);
- mtx_unlock(&sc->pci_lock);
-}
-
-/**
- * mrsas_transition_to_ready: Move FW to Ready state
- * input: Adapter instance soft state
- *
- * During the initialization, FW passes can potentially be in any one of
- * several possible states. If the FW in operational, waiting-for-handshake
- * states, driver must take steps to bring it to ready state. Otherwise, it
- * has to wait for the ready state.
- */
-int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
-{
- int i;
- u_int8_t max_wait;
- u_int32_t val, fw_state;
- u_int32_t cur_state;
- u_int32_t abs_state, curr_abs_state;
-
- val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
- fw_state = val & MFI_STATE_MASK;
- max_wait = MRSAS_RESET_WAIT_TIME;
-
- if (fw_state != MFI_STATE_READY)
- device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
-
- while (fw_state != MFI_STATE_READY) {
- abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
- switch (fw_state) {
- case MFI_STATE_FAULT:
- device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
- if (ocr) {
- cur_state = MFI_STATE_FAULT;
- break;
- }
- else
- return -ENODEV;
- case MFI_STATE_WAIT_HANDSHAKE:
- /* Set the CLR bit in inbound doorbell */
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
- MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG);
- cur_state = MFI_STATE_WAIT_HANDSHAKE;
- break;
- case MFI_STATE_BOOT_MESSAGE_PENDING:
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
- MFI_INIT_HOTPLUG);
- cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
- break;
- case MFI_STATE_OPERATIONAL:
- /* Bring it to READY state; assuming max wait 10 secs */
- mrsas_disable_intr(sc);
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
- for (i=0; i < max_wait * 1000; i++) {
- if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
- DELAY(1000);
- else
- break;
- }
- cur_state = MFI_STATE_OPERATIONAL;
- break;
- case MFI_STATE_UNDEFINED:
- /* This state should not last for more than 2 seconds */
- cur_state = MFI_STATE_UNDEFINED;
- break;
- case MFI_STATE_BB_INIT:
- cur_state = MFI_STATE_BB_INIT;
- break;
- case MFI_STATE_FW_INIT:
- cur_state = MFI_STATE_FW_INIT;
- break;
- case MFI_STATE_FW_INIT_2:
- cur_state = MFI_STATE_FW_INIT_2;
- break;
- case MFI_STATE_DEVICE_SCAN:
- cur_state = MFI_STATE_DEVICE_SCAN;
- break;
- case MFI_STATE_FLUSH_CACHE:
- cur_state = MFI_STATE_FLUSH_CACHE;
- break;
- default:
- device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
- return -ENODEV;
+int
+mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
+{
+ int i, j;
+ u_int32_t max_cmd, count;
+ struct mrsas_mpt_cmd *cmd;
+ pMpi2ReplyDescriptorsUnion_t reply_desc;
+ u_int32_t offset, chain_offset, sense_offset;
+ bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
+ u_int8_t *io_req_base, *chain_frame_base, *sense_base;
+
+ max_cmd = sc->max_fw_cmds;
+
+ sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
+ if (!sc->req_desc) {
+ device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
+ return (ENOMEM);
}
+ memset(sc->req_desc, 0, sc->request_alloc_sz);
/*
- * The cur_state should not last for more than max_wait secs
+ * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
+ * Allocate the dynamic array first and then allocate individual
+ * commands.
*/
- for (i = 0; i < (max_wait * 1000); i++) {
- fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
- outbound_scratch_pad))& MFI_STATE_MASK);
- curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
- outbound_scratch_pad));
- if (abs_state == curr_abs_state)
- DELAY(1000);
- else
- break;
+ sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
+ if (!sc->mpt_cmd_list) {
+ device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
+ return (ENOMEM);
+ }
+ memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
+ for (i = 0; i < max_cmd; i++) {
+ sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
+ M_MRSAS, M_NOWAIT);
+ if (!sc->mpt_cmd_list[i]) {
+ for (j = 0; j < i; j++)
+ free(sc->mpt_cmd_list[j], M_MRSAS);
+ free(sc->mpt_cmd_list, M_MRSAS);
+ sc->mpt_cmd_list = NULL;
+ return (ENOMEM);
+ }
}
- /*
- * Return error if fw_state hasn't changed after max_wait
- */
- if (curr_abs_state == abs_state) {
- device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
- "in %d secs\n", fw_state, max_wait);
- return -ENODEV;
+ io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+ io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+ chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
+ chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
+ sense_base = (u_int8_t *)sc->sense_mem;
+ sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
+ for (i = 0; i < max_cmd; i++) {
+ cmd = sc->mpt_cmd_list[i];
+ offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
+ chain_offset = 1024 * i;
+ sense_offset = MRSAS_SENSE_LEN * i;
+ memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
+ cmd->index = i + 1;
+ cmd->ccb_ptr = NULL;
+ callout_init(&cmd->cm_callout, 0);
+ cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
+ cmd->sc = sc;
+ cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
+ memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
+ cmd->io_request_phys_addr = io_req_base_phys + offset;
+ cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
+ cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
+ cmd->sense = sense_base + sense_offset;
+ cmd->sense_phys_addr = sense_base_phys + sense_offset;
+ if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
+ return (FAIL);
+ }
+ TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
}
- }
- mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
- //device_printf(sc->mrsas_dev, "FW now in Ready state\n");del?
- return 0;
+
+ /* Initialize reply descriptor array to 0xFFFFFFFF */
+ reply_desc = sc->reply_desc_mem;
+ count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
+ for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
+ reply_desc->Words = MRSAS_ULONG_MAX;
+ }
+ return (0);
}
-/**
- * mrsas_get_mfi_cmd: Get a cmd from free command pool
- * input: Adapter soft state
+/*
+ * mrsas_fire_cmd: Sends command to FW
+ * input: Adapter softstate
+ * request descriptor address low
+ * request descriptor address high
+ *
+ * This functions fires the command to Firmware by writing to the
+ * inbound_low_queue_port and inbound_high_queue_port.
+ */
+void
+mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
+ u_int32_t req_desc_hi)
+{
+ mtx_lock(&sc->pci_lock);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
+ req_desc_lo);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
+ req_desc_hi);
+ mtx_unlock(&sc->pci_lock);
+}
+
+/*
+ * mrsas_transition_to_ready: Move FW to Ready state input:
+ * Adapter instance soft state
+ *
+ * During the initialization, FW passes can potentially be in any one of several
+ * possible states. If the FW in operational, waiting-for-handshake states,
+ * driver must take steps to bring it to ready state. Otherwise, it has to
+ * wait for the ready state.
+ */
+int
+mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
+{
+ int i;
+ u_int8_t max_wait;
+ u_int32_t val, fw_state;
+ u_int32_t cur_state;
+ u_int32_t abs_state, curr_abs_state;
+
+ val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
+ fw_state = val & MFI_STATE_MASK;
+ max_wait = MRSAS_RESET_WAIT_TIME;
+
+ if (fw_state != MFI_STATE_READY)
+ device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
+
+ while (fw_state != MFI_STATE_READY) {
+ abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
+ switch (fw_state) {
+ case MFI_STATE_FAULT:
+ device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
+ if (ocr) {
+ cur_state = MFI_STATE_FAULT;
+ break;
+ } else
+ return -ENODEV;
+ case MFI_STATE_WAIT_HANDSHAKE:
+ /* Set the CLR bit in inbound doorbell */
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
+ MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
+ cur_state = MFI_STATE_WAIT_HANDSHAKE;
+ break;
+ case MFI_STATE_BOOT_MESSAGE_PENDING:
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
+ MFI_INIT_HOTPLUG);
+ cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
+ break;
+ case MFI_STATE_OPERATIONAL:
+ /*
+ * Bring it to READY state; assuming max wait 10
+ * secs
+ */
+ mrsas_disable_intr(sc);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
+ for (i = 0; i < max_wait * 1000; i++) {
+ if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
+ DELAY(1000);
+ else
+ break;
+ }
+ cur_state = MFI_STATE_OPERATIONAL;
+ break;
+ case MFI_STATE_UNDEFINED:
+ /*
+ * This state should not last for more than 2
+ * seconds
+ */
+ cur_state = MFI_STATE_UNDEFINED;
+ break;
+ case MFI_STATE_BB_INIT:
+ cur_state = MFI_STATE_BB_INIT;
+ break;
+ case MFI_STATE_FW_INIT:
+ cur_state = MFI_STATE_FW_INIT;
+ break;
+ case MFI_STATE_FW_INIT_2:
+ cur_state = MFI_STATE_FW_INIT_2;
+ break;
+ case MFI_STATE_DEVICE_SCAN:
+ cur_state = MFI_STATE_DEVICE_SCAN;
+ break;
+ case MFI_STATE_FLUSH_CACHE:
+ cur_state = MFI_STATE_FLUSH_CACHE;
+ break;
+ default:
+ device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
+ return -ENODEV;
+ }
+
+ /*
+ * The cur_state should not last for more than max_wait secs
+ */
+ for (i = 0; i < (max_wait * 1000); i++) {
+ fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad)) & MFI_STATE_MASK);
+ curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad));
+ if (abs_state == curr_abs_state)
+ DELAY(1000);
+ else
+ break;
+ }
+
+ /*
+ * Return error if fw_state hasn't changed after max_wait
+ */
+ if (curr_abs_state == abs_state) {
+ device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
+ "in %d secs\n", fw_state, max_wait);
+ return -ENODEV;
+ }
+ }
+ mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
+ return 0;
+}
+
+/*
+ * mrsas_get_mfi_cmd: Get a cmd from free command pool
+ * input: Adapter soft state
*
* This function removes an MFI command from the command list.
*/
-struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc)
+struct mrsas_mfi_cmd *
+mrsas_get_mfi_cmd(struct mrsas_softc *sc)
{
- struct mrsas_mfi_cmd *cmd = NULL;
-
- mtx_lock(&sc->mfi_cmd_pool_lock);
- if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)){
- cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
- TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
- }
- mtx_unlock(&sc->mfi_cmd_pool_lock);
-
- return cmd;
+ struct mrsas_mfi_cmd *cmd = NULL;
+
+ mtx_lock(&sc->mfi_cmd_pool_lock);
+ if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
+ cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
+ TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
+ }
+ mtx_unlock(&sc->mfi_cmd_pool_lock);
+
+ return cmd;
}
-/**
- * mrsas_ocr_thread Thread to handle OCR/Kill Adapter.
- * input: Adapter Context.
+/*
+ * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter.
+ * input: Adapter Context.
*
- * This function will check FW status register and flag
- * do_timeout_reset flag. It will do OCR/Kill adapter if
- * FW is in fault state or IO timed out has trigger reset.
+ * This function will check FW status register and flag do_timeout_reset flag.
+ * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
+ * trigger reset.
*/
static void
mrsas_ocr_thread(void *arg)
{
- struct mrsas_softc *sc;
- u_int32_t fw_status, fw_state;
-
- sc = (struct mrsas_softc *)arg;
-
- mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
-
- sc->ocr_thread_active = 1;
- mtx_lock(&sc->sim_lock);
- for (;;) {
- /* Sleep for 1 second and check the queue status*/
- msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
- "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
- if (sc->remove_in_progress) {
- mrsas_dprint(sc, MRSAS_OCR,
- "Exit due to shutdown from %s\n", __func__);
- break;
- }
- fw_status = mrsas_read_reg(sc,
- offsetof(mrsas_reg_set, outbound_scratch_pad));
- fw_state = fw_status & MFI_STATE_MASK;
- if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
- device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
- sc->do_timedout_reset?"IO Timeout":
- "FW fault detected");
- mtx_lock_spin(&sc->ioctl_lock);
- sc->reset_in_progress = 1;
- sc->reset_count++;
- mtx_unlock_spin(&sc->ioctl_lock);
- mrsas_xpt_freeze(sc);
- mrsas_reset_ctrl(sc);
- mrsas_xpt_release(sc);
- sc->reset_in_progress = 0;
- sc->do_timedout_reset = 0;
- }
- }
- mtx_unlock(&sc->sim_lock);
- sc->ocr_thread_active = 0;
- mrsas_kproc_exit(0);
-}
-
-/**
- * mrsas_reset_reply_desc Reset Reply descriptor as part of OCR.
- * input: Adapter Context.
- *
- * This function will clear reply descriptor so that post OCR
- * driver and FW will lost old history.
- */
-void mrsas_reset_reply_desc(struct mrsas_softc *sc)
-{
- int i;
- pMpi2ReplyDescriptorsUnion_t reply_desc;
-
- sc->last_reply_idx = 0;
- reply_desc = sc->reply_desc_mem;
- for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
- reply_desc->Words = MRSAS_ULONG_MAX;
- }
-}
-
-/**
- * mrsas_reset_ctrl Core function to OCR/Kill adapter.
- * input: Adapter Context.
- *
- * This function will run from thread context so that it can sleep.
- * 1. Do not handle OCR if FW is in HW critical error.
- * 2. Wait for outstanding command to complete for 180 seconds.
- * 3. If #2 does not find any outstanding command Controller is in working
- * state, so skip OCR.
- * Otherwise, do OCR/kill Adapter based on flag disableOnlineCtrlReset.
- * 4. Start of the OCR, return all SCSI command back to CAM layer which has
- * ccb_ptr.
- * 5. Post OCR, Re-fire Managment command and move Controller to Operation
- * state.
- */
-int mrsas_reset_ctrl(struct mrsas_softc *sc)
-{
- int retval = SUCCESS, i, j, retry = 0;
- u_int32_t host_diag, abs_state, status_reg, reset_adapter;
- union ccb *ccb;
- struct mrsas_mfi_cmd *mfi_cmd;
- struct mrsas_mpt_cmd *mpt_cmd;
- MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
-
- if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
- device_printf(sc->mrsas_dev,
- "mrsas: Hardware critical error, returning FAIL.\n");
- return FAIL;
- }
-
- set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
- sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
- mrsas_disable_intr(sc);
- DELAY(1000 * 1000);
-
- /* First try waiting for commands to complete */
- if (mrsas_wait_for_outstanding(sc)) {
- mrsas_dprint(sc, MRSAS_OCR,
- "resetting adapter from %s.\n",
- __func__);
- /* Now return commands back to the CAM layer */
- for (i = 0 ; i < sc->max_fw_cmds; i++) {
- mpt_cmd = sc->mpt_cmd_list[i];
- if (mpt_cmd->ccb_ptr) {
- ccb = (union ccb *)(mpt_cmd->ccb_ptr);
- ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
- mrsas_cmd_done(sc, mpt_cmd);
- atomic_dec(&sc->fw_outstanding);
- }
- }
-
- status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
- outbound_scratch_pad));
- abs_state = status_reg & MFI_STATE_MASK;
- reset_adapter = status_reg & MFI_RESET_ADAPTER;
- if (sc->disableOnlineCtrlReset ||
- (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
- /* Reset not supported, kill adapter */
- mrsas_dprint(sc, MRSAS_OCR,"Reset not supported, killing adapter.\n");
- mrsas_kill_hba(sc);
- sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
- retval = FAIL;
- goto out;
- }
-
- /* Now try to reset the chip */
- for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
- MPI2_WRSEQ_FLUSH_KEY_VALUE);
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
- MPI2_WRSEQ_1ST_KEY_VALUE);
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
- MPI2_WRSEQ_2ND_KEY_VALUE);
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
- MPI2_WRSEQ_3RD_KEY_VALUE);
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
- MPI2_WRSEQ_4TH_KEY_VALUE);
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
- MPI2_WRSEQ_5TH_KEY_VALUE);
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
- MPI2_WRSEQ_6TH_KEY_VALUE);
-
- /* Check that the diag write enable (DRWE) bit is on */
- host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
- fusion_host_diag));
- retry = 0;
- while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
- DELAY(100 * 1000);
- host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
- fusion_host_diag));
- if (retry++ == 100) {
- mrsas_dprint(sc, MRSAS_OCR,
- "Host diag unlock failed!\n");
- break;
- }
- }
- if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
- continue;
-
- /* Send chip reset command */
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
- host_diag | HOST_DIAG_RESET_ADAPTER);
- DELAY(3000 * 1000);
-
- /* Make sure reset adapter bit is cleared */
- host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
- fusion_host_diag));
- retry = 0;
- while (host_diag & HOST_DIAG_RESET_ADAPTER) {
- DELAY(100 * 1000);
- host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
- fusion_host_diag));
- if (retry++ == 1000) {
- mrsas_dprint(sc, MRSAS_OCR,
- "Diag reset adapter never cleared!\n");
- break;
- }
- }
- if (host_diag & HOST_DIAG_RESET_ADAPTER)
- continue;
-
- abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
- outbound_scratch_pad)) & MFI_STATE_MASK;
- retry = 0;
-
- while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
- DELAY(100 * 1000);
- abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
- outbound_scratch_pad)) & MFI_STATE_MASK;
- }
- if (abs_state <= MFI_STATE_FW_INIT) {
- mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
- " state = 0x%x\n", abs_state);
- continue;
- }
-
- /* Wait for FW to become ready */
- if (mrsas_transition_to_ready(sc, 1)) {
- mrsas_dprint(sc, MRSAS_OCR,
- "mrsas: Failed to transition controller to ready.\n");
- continue;
- }
-
- mrsas_reset_reply_desc(sc);
- if (mrsas_ioc_init(sc)) {
- mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
- continue;
- }
-
- clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
- mrsas_enable_intr(sc);
- sc->adprecovery = MRSAS_HBA_OPERATIONAL;
-
- /* Re-fire management commands */
- for (j = 0 ; j < sc->max_fw_cmds; j++) {
- mpt_cmd = sc->mpt_cmd_list[j];
- if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
- mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
- if (mfi_cmd->frame->dcmd.opcode ==
- MR_DCMD_LD_MAP_GET_INFO) {
- mrsas_release_mfi_cmd(mfi_cmd);
- mrsas_release_mpt_cmd(mpt_cmd);
- } else {
- req_desc = mrsas_get_request_desc(sc,
- mfi_cmd->cmd_id.context.smid - 1);
- mrsas_dprint(sc, MRSAS_OCR,
- "Re-fire command DCMD opcode 0x%x index %d\n ",
- mfi_cmd->frame->dcmd.opcode, j);
- if (!req_desc)
- device_printf(sc->mrsas_dev,
- "Cannot build MPT cmd.\n");
- else
- mrsas_fire_cmd(sc, req_desc->addr.u.low,
- req_desc->addr.u.high);
- }
- }
- }
-
- /* Reset load balance info */
- memset(sc->load_balance_info, 0,
- sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES);
-
- if (!mrsas_get_map_info(sc))
- mrsas_sync_map_info(sc);
-
- /* Adapter reset completed successfully */
- device_printf(sc->mrsas_dev, "Reset successful\n");
- retval = SUCCESS;
- goto out;
- }
- /* Reset failed, kill the adapter */
- device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
- mrsas_kill_hba(sc);
- retval = FAIL;
- } else {
- clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
- mrsas_enable_intr(sc);
- sc->adprecovery = MRSAS_HBA_OPERATIONAL;
- }
+ struct mrsas_softc *sc;
+ u_int32_t fw_status, fw_state;
+
+ sc = (struct mrsas_softc *)arg;
+
+ mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
+
+ sc->ocr_thread_active = 1;
+ mtx_lock(&sc->sim_lock);
+ for (;;) {
+ /* Sleep for 1 second and check the queue status */
+ msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
+ "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
+ if (sc->remove_in_progress) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Exit due to shutdown from %s\n", __func__);
+ break;
+ }
+ fw_status = mrsas_read_reg(sc,
+ offsetof(mrsas_reg_set, outbound_scratch_pad));
+ fw_state = fw_status & MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
+ device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
+ sc->do_timedout_reset ? "IO Timeout" :
+ "FW fault detected");
+ mtx_lock_spin(&sc->ioctl_lock);
+ sc->reset_in_progress = 1;
+ sc->reset_count++;
+ mtx_unlock_spin(&sc->ioctl_lock);
+ mrsas_xpt_freeze(sc);
+ mrsas_reset_ctrl(sc);
+ mrsas_xpt_release(sc);
+ sc->reset_in_progress = 0;
+ sc->do_timedout_reset = 0;
+ }
+ }
+ mtx_unlock(&sc->sim_lock);
+ sc->ocr_thread_active = 0;
+ mrsas_kproc_exit(0);
+}
+
+/*
+ * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR.
+ * input: Adapter Context.
+ *
+ * This function will clear reply descriptor so that post OCR driver and FW will
+ * lost old history.
+ */
+void
+mrsas_reset_reply_desc(struct mrsas_softc *sc)
+{
+ int i, count;
+ pMpi2ReplyDescriptorsUnion_t reply_desc;
+
+ count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
+ for (i = 0; i < count; i++)
+ sc->last_reply_idx[i] = 0;
+
+ reply_desc = sc->reply_desc_mem;
+ for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
+ reply_desc->Words = MRSAS_ULONG_MAX;
+ }
+}
+
+/*
+ * mrsas_reset_ctrl: Core function to OCR/Kill adapter.
+ * input: Adapter Context.
+ *
+ * This function will run from thread context so that it can sleep. 1. Do not
+ * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
+ * to complete for 180 seconds. 3. If #2 does not find any outstanding
+ * command Controller is in working state, so skip OCR. Otherwise, do
+ * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
+ * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
+ * OCR, Re-fire Managment command and move Controller to Operation state.
+ */
+int
+mrsas_reset_ctrl(struct mrsas_softc *sc)
+{
+ int retval = SUCCESS, i, j, retry = 0;
+ u_int32_t host_diag, abs_state, status_reg, reset_adapter;
+ union ccb *ccb;
+ struct mrsas_mfi_cmd *mfi_cmd;
+ struct mrsas_mpt_cmd *mpt_cmd;
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+
+ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
+ device_printf(sc->mrsas_dev,
+ "mrsas: Hardware critical error, returning FAIL.\n");
+ return FAIL;
+ }
+ set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
+ sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
+ mrsas_disable_intr(sc);
+ DELAY(1000 * 1000);
+
+ /* First try waiting for commands to complete */
+ if (mrsas_wait_for_outstanding(sc)) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "resetting adapter from %s.\n",
+ __func__);
+ /* Now return commands back to the CAM layer */
+ for (i = 0; i < sc->max_fw_cmds; i++) {
+ mpt_cmd = sc->mpt_cmd_list[i];
+ if (mpt_cmd->ccb_ptr) {
+ ccb = (union ccb *)(mpt_cmd->ccb_ptr);
+ ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
+ mrsas_cmd_done(sc, mpt_cmd);
+ atomic_dec(&sc->fw_outstanding);
+ }
+ }
+
+ status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad));
+ abs_state = status_reg & MFI_STATE_MASK;
+ reset_adapter = status_reg & MFI_RESET_ADAPTER;
+ if (sc->disableOnlineCtrlReset ||
+ (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
+ /* Reset not supported, kill adapter */
+ mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
+ mrsas_kill_hba(sc);
+ sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
+ retval = FAIL;
+ goto out;
+ }
+ /* Now try to reset the chip */
+ for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_FLUSH_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_1ST_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_2ND_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_3RD_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_4TH_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_5TH_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_6TH_KEY_VALUE);
+
+ /* Check that the diag write enable (DRWE) bit is on */
+ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ fusion_host_diag));
+ retry = 0;
+ while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
+ DELAY(100 * 1000);
+ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ fusion_host_diag));
+ if (retry++ == 100) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Host diag unlock failed!\n");
+ break;
+ }
+ }
+ if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
+ continue;
+
+ /* Send chip reset command */
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
+ host_diag | HOST_DIAG_RESET_ADAPTER);
+ DELAY(3000 * 1000);
+
+ /* Make sure reset adapter bit is cleared */
+ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ fusion_host_diag));
+ retry = 0;
+ while (host_diag & HOST_DIAG_RESET_ADAPTER) {
+ DELAY(100 * 1000);
+ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ fusion_host_diag));
+ if (retry++ == 1000) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Diag reset adapter never cleared!\n");
+ break;
+ }
+ }
+ if (host_diag & HOST_DIAG_RESET_ADAPTER)
+ continue;
+
+ abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad)) & MFI_STATE_MASK;
+ retry = 0;
+
+ while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
+ DELAY(100 * 1000);
+ abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad)) & MFI_STATE_MASK;
+ }
+ if (abs_state <= MFI_STATE_FW_INIT) {
+ mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
+ " state = 0x%x\n", abs_state);
+ continue;
+ }
+ /* Wait for FW to become ready */
+ if (mrsas_transition_to_ready(sc, 1)) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "mrsas: Failed to transition controller to ready.\n");
+ continue;
+ }
+ mrsas_reset_reply_desc(sc);
+ if (mrsas_ioc_init(sc)) {
+ mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
+ continue;
+ }
+ clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
+ mrsas_enable_intr(sc);
+ sc->adprecovery = MRSAS_HBA_OPERATIONAL;
+
+ /* Re-fire management commands */
+ for (j = 0; j < sc->max_fw_cmds; j++) {
+ mpt_cmd = sc->mpt_cmd_list[j];
+ if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
+ mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
+ if (mfi_cmd->frame->dcmd.opcode ==
+ MR_DCMD_LD_MAP_GET_INFO) {
+ mrsas_release_mfi_cmd(mfi_cmd);
+ mrsas_release_mpt_cmd(mpt_cmd);
+ } else {
+ req_desc = mrsas_get_request_desc(sc,
+ mfi_cmd->cmd_id.context.smid - 1);
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Re-fire command DCMD opcode 0x%x index %d\n ",
+ mfi_cmd->frame->dcmd.opcode, j);
+ if (!req_desc)
+ device_printf(sc->mrsas_dev,
+ "Cannot build MPT cmd.\n");
+ else
+ mrsas_fire_cmd(sc, req_desc->addr.u.low,
+ req_desc->addr.u.high);
+ }
+ }
+ }
+
+ /* Reset load balance info */
+ memset(sc->load_balance_info, 0,
+ sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
+
+ if (!mrsas_get_map_info(sc))
+ mrsas_sync_map_info(sc);
+
+ /* Adapter reset completed successfully */
+ device_printf(sc->mrsas_dev, "Reset successful\n");
+ retval = SUCCESS;
+ goto out;
+ }
+ /* Reset failed, kill the adapter */
+ device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
+ mrsas_kill_hba(sc);
+ retval = FAIL;
+ } else {
+ clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
+ mrsas_enable_intr(sc);
+ sc->adprecovery = MRSAS_HBA_OPERATIONAL;
+ }
out:
- clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
- mrsas_dprint(sc, MRSAS_OCR,
- "Reset Exit with %d.\n", retval);
- return retval;
+ clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Reset Exit with %d.\n", retval);
+ return retval;
}
-/**
- * mrsas_kill_hba Kill HBA when OCR is not supported.
- * input: Adapter Context.
+/*
+ * mrsas_kill_hba: Kill HBA when OCR is not supported
+ * input: Adapter Context.
*
* This function will kill HBA when OCR is not supported.
*/
-void mrsas_kill_hba (struct mrsas_softc *sc)
-{
- mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
- MFI_STOP_ADP);
- /* Flush */
- mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
-}
-
-/**
- * mrsas_wait_for_outstanding Wait for outstanding commands
- * input: Adapter Context.
- *
- * This function will wait for 180 seconds for outstanding
- * commands to be completed.
- */
-int mrsas_wait_for_outstanding(struct mrsas_softc *sc)
-{
- int i, outstanding, retval = 0;
- u_int32_t fw_state;
-
- for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
- if (sc->remove_in_progress) {
- mrsas_dprint(sc, MRSAS_OCR,
- "Driver remove or shutdown called.\n");
- retval = 1;
- goto out;
- }
- /* Check if firmware is in fault state */
- fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
- outbound_scratch_pad)) & MFI_STATE_MASK;
- if (fw_state == MFI_STATE_FAULT) {
- mrsas_dprint(sc, MRSAS_OCR,
- "Found FW in FAULT state, will reset adapter.\n");
- retval = 1;
- goto out;
- }
- outstanding = atomic_read(&sc->fw_outstanding);
- if (!outstanding)
- goto out;
-
- if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
- mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
- "commands to complete\n",i,outstanding);
- mrsas_complete_cmd(sc);
- }
- DELAY(1000 * 1000);
- }
-
- if (atomic_read(&sc->fw_outstanding)) {
- mrsas_dprint(sc, MRSAS_OCR,
- " pending commands remain after waiting,"
- " will reset adapter.\n");
- retval = 1;
- }
+void
+mrsas_kill_hba(struct mrsas_softc *sc)
+{
+ mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
+ MFI_STOP_ADP);
+ /* Flush */
+ mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
+}
+
+/*
+ * mrsas_wait_for_outstanding: Wait for outstanding commands
+ * input: Adapter Context.
+ *
+ * This function will wait for 180 seconds for outstanding commands to be
+ * completed.
+ */
+int
+mrsas_wait_for_outstanding(struct mrsas_softc *sc)
+{
+ int i, outstanding, retval = 0;
+ u_int32_t fw_state, count, MSIxIndex;
+
+
+ for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
+ if (sc->remove_in_progress) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Driver remove or shutdown called.\n");
+ retval = 1;
+ goto out;
+ }
+ /* Check if firmware is in fault state */
+ fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad)) & MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Found FW in FAULT state, will reset adapter.\n");
+ retval = 1;
+ goto out;
+ }
+ outstanding = atomic_read(&sc->fw_outstanding);
+ if (!outstanding)
+ goto out;
+
+ if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
+ mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
+ "commands to complete\n", i, outstanding);
+ count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
+ for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
+ mrsas_complete_cmd(sc, MSIxIndex);
+ }
+ DELAY(1000 * 1000);
+ }
+
+ if (atomic_read(&sc->fw_outstanding)) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ " pending commands remain after waiting,"
+ " will reset adapter.\n");
+ retval = 1;
+ }
out:
- return retval;
+ return retval;
}
-/**
- * mrsas_release_mfi_cmd: Return a cmd to free command pool
- * input: Command packet for return to free cmd pool
+/*
+ * mrsas_release_mfi_cmd: Return a cmd to free command pool
+ * input: Command packet for return to free cmd pool
*
* This function returns the MFI command to the command list.
*/
-void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
+void
+mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
{
- struct mrsas_softc *sc = cmd->sc;
+ struct mrsas_softc *sc = cmd->sc;
- mtx_lock(&sc->mfi_cmd_pool_lock);
- cmd->ccb_ptr = NULL;
+ mtx_lock(&sc->mfi_cmd_pool_lock);
+ cmd->ccb_ptr = NULL;
cmd->cmd_id.frame_count = 0;
- TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
- mtx_unlock(&sc->mfi_cmd_pool_lock);
+ TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
+ mtx_unlock(&sc->mfi_cmd_pool_lock);
- return;
+ return;
}
-/**
- * mrsas_get_controller_info - Returns FW's controller structure
- * input: Adapter soft state
- * Controller information structure
+/*
+ * mrsas_get_controller_info: Returns FW's controller structure
+ * input: Adapter soft state
+ * Controller information structure
*
- * Issues an internal command (DCMD) to get the FW's controller structure.
- * This information is mainly used to find out the maximum IO transfer per
- * command supported by the FW.
+ * Issues an internal command (DCMD) to get the FW's controller structure. This
+ * information is mainly used to find out the maximum IO transfer per command
+ * supported by the FW.
*/
-static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
- struct mrsas_ctrl_info *ctrl_info)
+static int
+mrsas_get_ctrl_info(struct mrsas_softc *sc,
+ struct mrsas_ctrl_info *ctrl_info)
{
- int retcode = 0;
- struct mrsas_mfi_cmd *cmd;
- struct mrsas_dcmd_frame *dcmd;
+ int retcode = 0;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
- cmd = mrsas_get_mfi_cmd(sc);
+ cmd = mrsas_get_mfi_cmd(sc);
- if (!cmd) {
- device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
- return -ENOMEM;
- }
- dcmd = &cmd->frame->dcmd;
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
+ return -ENOMEM;
+ }
+ dcmd = &cmd->frame->dcmd;
- if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
- device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
- mrsas_release_mfi_cmd(cmd);
- return -ENOMEM;
- }
- memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+ if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
+ mrsas_release_mfi_cmd(cmd);
+ return -ENOMEM;
+ }
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
- dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0xFF;
- dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
- dcmd->timeout = 0;
- dcmd->pad_0 = 0;
- dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
- dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
- dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
- dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
+ dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
+ dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
+ dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
- if (!mrsas_issue_polled(sc, cmd))
- memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
- else
- retcode = 1;
+ if (!mrsas_issue_polled(sc, cmd))
+ memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
+ else
+ retcode = 1;
- mrsas_free_ctlr_info_cmd(sc);
- mrsas_release_mfi_cmd(cmd);
- return(retcode);
+ mrsas_free_ctlr_info_cmd(sc);
+ mrsas_release_mfi_cmd(cmd);
+ return (retcode);
}
-/**
- * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
- * input: Adapter soft state
+/*
+ * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
+ * input: Adapter soft state
*
* Allocates DMAable memory for the controller info internal command.
*/
-int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
-{
- int ctlr_info_size;
-
- /* Allocate get controller info command */
- ctlr_info_size = sizeof(struct mrsas_ctrl_info);
- if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
- 1, 0, // algnmnt, boundary
- BUS_SPACE_MAXADDR_32BIT,// lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- ctlr_info_size, // maxsize
- 1, // msegments
- ctlr_info_size, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- NULL, NULL, // lockfunc, lockarg
- &sc->ctlr_info_tag)) {
- device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
- return (ENOMEM);
- }
- if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
- BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
- device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
- return (ENOMEM);
- }
- if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
- sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
- &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
- device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
- return (ENOMEM);
- }
-
- memset(sc->ctlr_info_mem, 0, ctlr_info_size);
- return (0);
-}
-
-/**
- * mrsas_free_ctlr_info_cmd: Free memory for controller info command
- * input: Adapter soft state
+int
+mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
+{
+ int ctlr_info_size;
+
+ /* Allocate get controller info command */
+ ctlr_info_size = sizeof(struct mrsas_ctrl_info);
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 1, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ ctlr_info_size,
+ 1,
+ ctlr_info_size,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &sc->ctlr_info_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
+ BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
+ sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
+ &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
+ return (ENOMEM);
+ }
+ memset(sc->ctlr_info_mem, 0, ctlr_info_size);
+ return (0);
+}
+
+/*
+ * mrsas_free_ctlr_info_cmd: Free memory for controller info command
+ * input: Adapter soft state
*
* Deallocates memory of the get controller info cmd.
*/
-void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
-{
- if (sc->ctlr_info_phys_addr)
- bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
- if (sc->ctlr_info_mem != NULL)
- bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
- if (sc->ctlr_info_tag != NULL)
- bus_dma_tag_destroy(sc->ctlr_info_tag);
-}
-
-/**
- * mrsas_issue_polled: Issues a polling command
- * inputs: Adapter soft state
- * Command packet to be issued
- *
- * This function is for posting of internal commands to Firmware. MFI
- * requires the cmd_status to be set to 0xFF before posting. The maximun
- * wait time of the poll response timer is 180 seconds.
- */
-int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
-{
- struct mrsas_header *frame_hdr = &cmd->frame->hdr;
- u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
- int i, retcode = 0;
-
- frame_hdr->cmd_status = 0xFF;
- frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
-
- /* Issue the frame using inbound queue port */
- if (mrsas_issue_dcmd(sc, cmd)) {
- device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
- return(1);
- }
-
- /*
- * Poll response timer to wait for Firmware response. While this
- * timer with the DELAY call could block CPU, the time interval for
- * this is only 1 millisecond.
- */
- if (frame_hdr->cmd_status == 0xFF) {
- for (i=0; i < (max_wait * 1000); i++){
- if (frame_hdr->cmd_status == 0xFF)
- DELAY(1000);
- else
- break;
- }
- }
- if (frame_hdr->cmd_status != 0)
- {
- if (frame_hdr->cmd_status == 0xFF)
- device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
- else
- device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
- retcode = 1;
- }
- return(retcode);
-}
-
-/**
- * mrsas_issue_dcmd - Issues a MFI Pass thru cmd
- * input: Adapter soft state
- * mfi cmd pointer
+void
+mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
+{
+ if (sc->ctlr_info_phys_addr)
+ bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
+ if (sc->ctlr_info_mem != NULL)
+ bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
+ if (sc->ctlr_info_tag != NULL)
+ bus_dma_tag_destroy(sc->ctlr_info_tag);
+}
+
+/*
+ * mrsas_issue_polled: Issues a polling command
+ * inputs: Adapter soft state
+ * Command packet to be issued
+ *
+ * This function is for posting of internal commands to Firmware. MFI requires
+ * the cmd_status to be set to 0xFF before posting. The maximun wait time of
+ * the poll response timer is 180 seconds.
+ */
+int
+mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ struct mrsas_header *frame_hdr = &cmd->frame->hdr;
+ u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
+ int i, retcode = 0;
+
+ frame_hdr->cmd_status = 0xFF;
+ frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+ /* Issue the frame using inbound queue port */
+ if (mrsas_issue_dcmd(sc, cmd)) {
+ device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
+ return (1);
+ }
+ /*
+ * Poll response timer to wait for Firmware response. While this
+ * timer with the DELAY call could block CPU, the time interval for
+ * this is only 1 millisecond.
+ */
+ if (frame_hdr->cmd_status == 0xFF) {
+ for (i = 0; i < (max_wait * 1000); i++) {
+ if (frame_hdr->cmd_status == 0xFF)
+ DELAY(1000);
+ else
+ break;
+ }
+ }
+ if (frame_hdr->cmd_status != 0) {
+ if (frame_hdr->cmd_status == 0xFF)
+ device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
+ else
+ device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
+ retcode = 1;
+ }
+ return (retcode);
+}
+
+/*
+ * mrsas_issue_dcmd: Issues a MFI Pass thru cmd
+ * input: Adapter soft state mfi cmd pointer
*
* This function is called by mrsas_issued_blocked_cmd() and
- * mrsas_issued_polled(), to build the MPT command and then fire the
- * command to Firmware.
+ * mrsas_issued_polled(), to build the MPT command and then fire the command
+ * to Firmware.
*/
int
mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
- MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
-
- req_desc = mrsas_build_mpt_cmd(sc, cmd);
- if (!req_desc) {
- device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
- return(1);
- }
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
- mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
+ req_desc = mrsas_build_mpt_cmd(sc, cmd);
+ if (!req_desc) {
+ device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
+ return (1);
+ }
+ mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
- return(0);
+ return (0);
}
-/**
- * mrsas_build_mpt_cmd - Calls helper function to build Passthru cmd
- * input: Adapter soft state
- * mfi cmd to build
+/*
+ * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd
+ * input: Adapter soft state mfi cmd to build
*
- * This function is called by mrsas_issue_cmd() to build the MPT-MFI
- * passthru command and prepares the MPT command to send to Firmware.
+ * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
+ * command and prepares the MPT command to send to Firmware.
*/
MRSAS_REQUEST_DESCRIPTOR_UNION *
mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
- MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
- u_int16_t index;
-
- if (mrsas_build_mptmfi_passthru(sc, cmd)) {
- device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
- return NULL;
- }
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u_int16_t index;
- index = cmd->cmd_id.context.smid;
+ if (mrsas_build_mptmfi_passthru(sc, cmd)) {
+ device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
+ return NULL;
+ }
+ index = cmd->cmd_id.context.smid;
- req_desc = mrsas_get_request_desc(sc, index-1);
- if(!req_desc)
- return NULL;
+ req_desc = mrsas_get_request_desc(sc, index - 1);
+ if (!req_desc)
+ return NULL;
- req_desc->addr.Words = 0;
- req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ req_desc->addr.Words = 0;
+ req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- req_desc->SCSIIO.SMID = index;
+ req_desc->SCSIIO.SMID = index;
- return(req_desc);
+ return (req_desc);
}
-/**
- * mrsas_build_mptmfi_passthru - Builds a MPT MFI Passthru command
- * input: Adapter soft state
- * mfi cmd pointer
+/*
+ * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command
+ * input: Adapter soft state mfi cmd pointer
*
- * The MPT command and the io_request are setup as a passthru command.
- * The SGE chain address is set to frame_phys_addr of the MFI command.
+ * The MPT command and the io_request are setup as a passthru command. The SGE
+ * chain address is set to frame_phys_addr of the MFI command.
*/
u_int8_t
mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
{
- MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
- PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
- struct mrsas_mpt_cmd *mpt_cmd;
- struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
+ MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
+ PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
+ struct mrsas_mpt_cmd *mpt_cmd;
+ struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
- mpt_cmd = mrsas_get_mpt_cmd(sc);
- if (!mpt_cmd)
- return(1);
+ mpt_cmd = mrsas_get_mpt_cmd(sc);
+ if (!mpt_cmd)
+ return (1);
- /* Save the smid. To be used for returning the cmd */
- mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
+ /* Save the smid. To be used for returning the cmd */
+ mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
- mpt_cmd->sync_cmd_idx = mfi_cmd->index;
+ mpt_cmd->sync_cmd_idx = mfi_cmd->index;
- /*
- * For cmds where the flag is set, store the flag and check
- * on completion. For cmds with this flag, don't call
- * mrsas_complete_cmd.
- */
+ /*
+ * For cmds where the flag is set, store the flag and check on
+ * completion. For cmds with this flag, don't call
+ * mrsas_complete_cmd.
+ */
- if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
- mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+ if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
+ mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
- io_req = mpt_cmd->io_request;
+ io_req = mpt_cmd->io_request;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
- pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t) &io_req->SGL;
- sgl_ptr_end += sc->max_sge_in_main_msg - 1;
- sgl_ptr_end->Flags = 0;
- }
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
- mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
+ sgl_ptr_end += sc->max_sge_in_main_msg - 1;
+ sgl_ptr_end->Flags = 0;
+ }
+ mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
- io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
- io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
- io_req->ChainOffset = sc->chain_offset_mfi_pthru;
+ io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
+ io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
+ io_req->ChainOffset = sc->chain_offset_mfi_pthru;
- mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
+ mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
- mpi25_ieee_chain->Flags= IEEE_SGE_FLAGS_CHAIN_ELEMENT |
- MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
+ mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
- mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
+ mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
- return(0);
+ return (0);
}
-/**
- * mrsas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
- * input: Adapter soft state
- * Command to be issued
+/*
+ * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds
+ * input: Adapter soft state Command to be issued
*
- * This function waits on an event for the command to be returned
- * from the ISR. Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs.
- * Used for issuing internal and ioctl commands.
+ * This function waits on an event for the command to be returned from the ISR.
+ * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
+ * internal and ioctl commands.
*/
-int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+int
+mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
- u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
- unsigned long total_time = 0;
- int retcode = 0;
-
- /* Initialize cmd_status */
- cmd->cmd_status = ECONNREFUSED;
-
- /* Build MPT-MFI command for issue to FW */
- if (mrsas_issue_dcmd(sc, cmd)){
- device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
- return(1);
- }
-
- sc->chan = (void*)&cmd;
+ u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
+ unsigned long total_time = 0;
+ int retcode = 0;
- /* The following is for debug only... */
- //device_printf(sc->mrsas_dev,"DCMD issued to FW, about to sleep-wait...\n");
- //device_printf(sc->mrsas_dev,"sc->chan = %p\n", sc->chan);
+ /* Initialize cmd_status */
+ cmd->cmd_status = ECONNREFUSED;
- while (1) {
- if (cmd->cmd_status == ECONNREFUSED){
- tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
- }
- else
- break;
- total_time++;
- if (total_time >= max_wait) {
- device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait);
- retcode = 1;
- break;
- }
- }
- return(retcode);
+ /* Build MPT-MFI command for issue to FW */
+ if (mrsas_issue_dcmd(sc, cmd)) {
+ device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
+ return (1);
+ }
+ sc->chan = (void *)&cmd;
+
+ while (1) {
+ if (cmd->cmd_status == ECONNREFUSED) {
+ tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
+ } else
+ break;
+ total_time++;
+ if (total_time >= max_wait) {
+ device_printf(sc->mrsas_dev,
+ "Internal command timed out after %d seconds.\n", max_wait);
+ retcode = 1;
+ break;
+ }
+ }
+ return (retcode);
}
-/**
- * mrsas_complete_mptmfi_passthru - Completes a command
- * input: sc: Adapter soft state
- * cmd: Command to be completed
- * status: cmd completion status
+/*
+ * mrsas_complete_mptmfi_passthru: Completes a command
+ * input: @sc: Adapter soft state
+ * @cmd: Command to be completed
+ * @status: cmd completion status
*
- * This function is called from mrsas_complete_cmd() after an interrupt
- * is received from Firmware, and io_request->Function is
+ * This function is called from mrsas_complete_cmd() after an interrupt is
+ * received from Firmware, and io_request->Function is
* MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
*/
void
mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
- u_int8_t status)
-{
- struct mrsas_header *hdr = &cmd->frame->hdr;
- u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
-
- /* Reset the retry counter for future re-tries */
- cmd->retry_for_fw_reset = 0;
-
- if (cmd->ccb_ptr)
- cmd->ccb_ptr = NULL;
-
- switch (hdr->cmd) {
- case MFI_CMD_INVALID:
- device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
- break;
- case MFI_CMD_PD_SCSI_IO:
- case MFI_CMD_LD_SCSI_IO:
- /*
- * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
- * issued either through an IO path or an IOCTL path. If it
- * was via IOCTL, we will send it to internal completion.
- */
- if (cmd->sync_cmd) {
- cmd->sync_cmd = 0;
- mrsas_wakeup(sc, cmd);
- break;
- }
- case MFI_CMD_SMP:
- case MFI_CMD_STP:
- case MFI_CMD_DCMD:
- /* Check for LD map update */
- if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
- (cmd->frame->dcmd.mbox.b[1] == 1)) {
- sc->fast_path_io = 0;
- mtx_lock(&sc->raidmap_lock);
- if (cmd_status != 0) {
- if (cmd_status != MFI_STAT_NOT_FOUND)
- device_printf(sc->mrsas_dev, "map sync failed, status=%x\n",cmd_status);
- else {
- mrsas_release_mfi_cmd(cmd);
- mtx_unlock(&sc->raidmap_lock);
- break;
- }
- }
- else
- sc->map_id++;
- mrsas_release_mfi_cmd(cmd);
- if (MR_ValidateMapInfo(sc))
- sc->fast_path_io = 0;
- else
- sc->fast_path_io = 1;
- mrsas_sync_map_info(sc);
- mtx_unlock(&sc->raidmap_lock);
- break;
- }
-#if 0 //currently not supporting event handling, so commenting out
- if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
- cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
- mrsas_poll_wait_aen = 0;
- }
-#endif
- /* See if got an event notification */
- if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
- mrsas_complete_aen(sc, cmd);
- else
- mrsas_wakeup(sc, cmd);
- break;
- case MFI_CMD_ABORT:
- /* Command issued to abort another cmd return */
- mrsas_complete_abort(sc, cmd);
- break;
- default:
- device_printf(sc->mrsas_dev,"Unknown command completed! [0x%X]\n", hdr->cmd);
- break;
- }
+ u_int8_t status)
+{
+ struct mrsas_header *hdr = &cmd->frame->hdr;
+ u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
+
+ /* Reset the retry counter for future re-tries */
+ cmd->retry_for_fw_reset = 0;
+
+ if (cmd->ccb_ptr)
+ cmd->ccb_ptr = NULL;
+
+ switch (hdr->cmd) {
+ case MFI_CMD_INVALID:
+ device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
+ break;
+ case MFI_CMD_PD_SCSI_IO:
+ case MFI_CMD_LD_SCSI_IO:
+ /*
+ * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
+ * issued either through an IO path or an IOCTL path. If it
+ * was via IOCTL, we will send it to internal completion.
+ */
+ if (cmd->sync_cmd) {
+ cmd->sync_cmd = 0;
+ mrsas_wakeup(sc, cmd);
+ break;
+ }
+ case MFI_CMD_SMP:
+ case MFI_CMD_STP:
+ case MFI_CMD_DCMD:
+ /* Check for LD map update */
+ if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
+ (cmd->frame->dcmd.mbox.b[1] == 1)) {
+ sc->fast_path_io = 0;
+ mtx_lock(&sc->raidmap_lock);
+ if (cmd_status != 0) {
+ if (cmd_status != MFI_STAT_NOT_FOUND)
+ device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
+ else {
+ mrsas_release_mfi_cmd(cmd);
+ mtx_unlock(&sc->raidmap_lock);
+ break;
+ }
+ } else
+ sc->map_id++;
+ mrsas_release_mfi_cmd(cmd);
+ if (MR_ValidateMapInfo(sc))
+ sc->fast_path_io = 0;
+ else
+ sc->fast_path_io = 1;
+ mrsas_sync_map_info(sc);
+ mtx_unlock(&sc->raidmap_lock);
+ break;
+ }
+ if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
+ cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
+ sc->mrsas_aen_triggered = 0;
+ }
+ /* See if got an event notification */
+ if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
+ mrsas_complete_aen(sc, cmd);
+ else
+ mrsas_wakeup(sc, cmd);
+ break;
+ case MFI_CMD_ABORT:
+ /* Command issued to abort another cmd return */
+ mrsas_complete_abort(sc, cmd);
+ break;
+ default:
+ device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
+ break;
+ }
}
-/**
- * mrsas_wakeup - Completes an internal command
- * input: Adapter soft state
- * Command to be completed
+/*
+ * mrsas_wakeup: Completes an internal command
+ * input: Adapter soft state
+ * Command to be completed
*
- * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware,
- * a wait timer is started. This function is called from
- * mrsas_complete_mptmfi_passthru() as it completes the command,
- * to wake up from the command wait.
+ * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
+ * timer is started. This function is called from
+ * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
+ * from the command wait.
*/
-void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+void
+mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
- cmd->cmd_status = cmd->frame->io.cmd_status;
+ cmd->cmd_status = cmd->frame->io.cmd_status;
- if (cmd->cmd_status == ECONNREFUSED)
- cmd->cmd_status = 0;
+ if (cmd->cmd_status == ECONNREFUSED)
+ cmd->cmd_status = 0;
- /* For debug only ... */
- //device_printf(sc->mrsas_dev,"DCMD rec'd for wakeup, sc->chan=%p\n", sc->chan);
-
- sc->chan = (void*)&cmd;
- wakeup_one((void *)&sc->chan);
- return;
+ sc->chan = (void *)&cmd;
+ wakeup_one((void *)&sc->chan);
+ return;
}
-/**
- * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller
- * input: Adapter soft state
- * Shutdown/Hibernate
+/*
+ * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input:
+ * Adapter soft state Shutdown/Hibernate
*
- * This function issues a DCMD internal command to Firmware to initiate
- * shutdown of the controller.
+ * This function issues a DCMD internal command to Firmware to initiate shutdown
+ * of the controller.
*/
-static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
+static void
+mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
{
- struct mrsas_mfi_cmd *cmd;
- struct mrsas_dcmd_frame *dcmd;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
- if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
- return;
+ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
+ return;
- cmd = mrsas_get_mfi_cmd(sc);
- if (!cmd) {
- device_printf(sc->mrsas_dev,"Cannot allocate for shutdown cmd.\n");
- return;
- }
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
+ return;
+ }
+ if (sc->aen_cmd)
+ mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
- if (sc->aen_cmd)
- mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
-
if (sc->map_update_cmd)
- mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
+ mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
- dcmd = &cmd->frame->dcmd;
- memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+ dcmd = &cmd->frame->dcmd;
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
- dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0x0;
- dcmd->sge_count = 0;
- dcmd->flags = MFI_FRAME_DIR_NONE;
- dcmd->timeout = 0;
- dcmd->pad_0 = 0;
- dcmd->data_xfer_len = 0;
- dcmd->opcode = opcode;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 0;
+ dcmd->flags = MFI_FRAME_DIR_NONE;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = 0;
+ dcmd->opcode = opcode;
- device_printf(sc->mrsas_dev,"Preparing to shut down controller.\n");
+ device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
- mrsas_issue_blocked_cmd(sc, cmd);
- mrsas_release_mfi_cmd(cmd);
+ mrsas_issue_blocked_cmd(sc, cmd);
+ mrsas_release_mfi_cmd(cmd);
- return;
+ return;
}
-/**
- * mrsas_flush_cache: Requests FW to flush all its caches
- * input: Adapter soft state
+/*
+ * mrsas_flush_cache: Requests FW to flush all its caches input:
+ * Adapter soft state
*
* This function is issues a DCMD internal command to Firmware to initiate
* flushing of all caches.
*/
-static void mrsas_flush_cache(struct mrsas_softc *sc)
-{
- struct mrsas_mfi_cmd *cmd;
- struct mrsas_dcmd_frame *dcmd;
-
- if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
- return;
-
- cmd = mrsas_get_mfi_cmd(sc);
- if (!cmd) {
- device_printf(sc->mrsas_dev,"Cannot allocate for flush cache cmd.\n");
- return;
- }
-
- dcmd = &cmd->frame->dcmd;
- memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
- dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0x0;
- dcmd->sge_count = 0;
- dcmd->flags = MFI_FRAME_DIR_NONE;
- dcmd->timeout = 0;
- dcmd->pad_0 = 0;
- dcmd->data_xfer_len = 0;
- dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
- dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
-
- mrsas_issue_blocked_cmd(sc, cmd);
- mrsas_release_mfi_cmd(cmd);
-
- return;
-}
-
-/**
- * mrsas_get_map_info: Load and validate RAID map
- * input: Adapter instance soft state
- *
- * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo()
- * to load and validate RAID map. It returns 0 if successful, 1 other-
- * wise.
- */
-static int mrsas_get_map_info(struct mrsas_softc *sc)
-{
- uint8_t retcode = 0;
-
- sc->fast_path_io = 0;
- if (!mrsas_get_ld_map_info(sc)) {
- retcode = MR_ValidateMapInfo(sc);
- if (retcode == 0) {
- sc->fast_path_io = 1;
- return 0;
- }
- }
- return 1;
-}
-
-/**
- * mrsas_get_ld_map_info: Get FW's ld_map structure
- * input: Adapter instance soft state
- *
- * Issues an internal command (DCMD) to get the FW's controller PD
- * list structure.
- */
-static int mrsas_get_ld_map_info(struct mrsas_softc *sc)
-{
- int retcode = 0;
- struct mrsas_mfi_cmd *cmd;
- struct mrsas_dcmd_frame *dcmd;
- MR_FW_RAID_MAP_ALL *map;
- bus_addr_t map_phys_addr = 0;
-
- cmd = mrsas_get_mfi_cmd(sc);
- if (!cmd) {
- device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n");
- return 1;
- }
-
- dcmd = &cmd->frame->dcmd;
-
- map = sc->raidmap_mem[(sc->map_id & 1)];
- map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
- if (!map) {
- device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n");
- mrsas_release_mfi_cmd(cmd);
- return (ENOMEM);
- }
- memset(map, 0, sizeof(*map));
- memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
- dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0xFF;
- dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
- dcmd->timeout = 0;
- dcmd->pad_0 = 0;
- dcmd->data_xfer_len = sc->map_sz;
- dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
- dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
- dcmd->sgl.sge32[0].length = sc->map_sz;
- if (!mrsas_issue_polled(sc, cmd))
- retcode = 0;
- else
- {
- device_printf(sc->mrsas_dev, "Fail to send get LD map info cmd.\n");
- retcode = 1;
- }
- mrsas_release_mfi_cmd(cmd);
- return(retcode);
-}
-
-/**
- * mrsas_sync_map_info: Get FW's ld_map structure
- * input: Adapter instance soft state
- *
- * Issues an internal command (DCMD) to get the FW's controller PD
- * list structure.
- */
-static int mrsas_sync_map_info(struct mrsas_softc *sc)
-{
- int retcode = 0, i;
- struct mrsas_mfi_cmd *cmd;
- struct mrsas_dcmd_frame *dcmd;
- uint32_t size_sync_info, num_lds;
- MR_LD_TARGET_SYNC *target_map = NULL;
- MR_FW_RAID_MAP_ALL *map;
- MR_LD_RAID *raid;
- MR_LD_TARGET_SYNC *ld_sync;
- bus_addr_t map_phys_addr = 0;
-
- cmd = mrsas_get_mfi_cmd(sc);
- if (!cmd) {
- device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
- return 1;
- }
-
- map = sc->raidmap_mem[sc->map_id & 1];
- num_lds = map->raidMap.ldCount;
-
- dcmd = &cmd->frame->dcmd;
- size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
- memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
- target_map = (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1];
- memset(target_map, 0, sizeof(MR_FW_RAID_MAP_ALL));
-
- map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
-
- ld_sync = (MR_LD_TARGET_SYNC *)target_map;
-
- for (i = 0; i < num_lds; i++, ld_sync++) {
- raid = MR_LdRaidGet(i, map);
- ld_sync->targetId = MR_GetLDTgtId(i, map);
- ld_sync->seqNum = raid->seqNum;
- }
-
- dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0xFF;
- dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_WRITE;
- dcmd->timeout = 0;
- dcmd->pad_0 = 0;
- dcmd->data_xfer_len = sc->map_sz;
- dcmd->mbox.b[0] = num_lds;
- dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
- dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
- dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
- dcmd->sgl.sge32[0].length = sc->map_sz;
-
- sc->map_update_cmd = cmd;
- if (mrsas_issue_dcmd(sc, cmd)) {
- device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n");
- return(1);
- }
- return(retcode);
-}
-
-/**
- * mrsas_get_pd_list: Returns FW's PD list structure
- * input: Adapter soft state
- *
- * Issues an internal command (DCMD) to get the FW's controller PD
- * list structure. This information is mainly used to find out about
- * system supported by Firmware.
- */
-static int mrsas_get_pd_list(struct mrsas_softc *sc)
-{
- int retcode = 0, pd_index = 0, pd_count=0, pd_list_size;
- struct mrsas_mfi_cmd *cmd;
- struct mrsas_dcmd_frame *dcmd;
- struct MR_PD_LIST *pd_list_mem;
- struct MR_PD_ADDRESS *pd_addr;
- bus_addr_t pd_list_phys_addr = 0;
- struct mrsas_tmp_dcmd *tcmd;
-
- cmd = mrsas_get_mfi_cmd(sc);
- if (!cmd) {
- device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n");
- return 1;
- }
-
- dcmd = &cmd->frame->dcmd;
-
- tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
- pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
- if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
- device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n");
- mrsas_release_mfi_cmd(cmd);
- return(ENOMEM);
- }
- else {
- pd_list_mem = tcmd->tmp_dcmd_mem;
- pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
- }
- memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
- dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
- dcmd->mbox.b[1] = 0;
- dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0xFF;
- dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
- dcmd->timeout = 0;
- dcmd->pad_0 = 0;
- dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
- dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
- dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
- dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
-
- if (!mrsas_issue_polled(sc, cmd))
- retcode = 0;
- else
- retcode = 1;
-
- /* Get the instance PD list */
- pd_count = MRSAS_MAX_PD;
- pd_addr = pd_list_mem->addr;
- if (retcode == 0 && pd_list_mem->count < pd_count) {
- memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
- for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
- sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
- sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType;
- sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM;
- pd_addr++;
- }
- }
-
- /* Use mutext/spinlock if pd_list component size increase more than 32 bit. */
- memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
- mrsas_free_tmp_dcmd(tcmd);
- mrsas_release_mfi_cmd(cmd);
- free(tcmd, M_MRSAS);
- return(retcode);
-}
-
-/**
- * mrsas_get_ld_list: Returns FW's LD list structure
- * input: Adapter soft state
- *
- * Issues an internal command (DCMD) to get the FW's controller PD
- * list structure. This information is mainly used to find out about
- * supported by the FW.
+static void
+mrsas_flush_cache(struct mrsas_softc *sc)
+{
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+
+ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
+ return;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
+ return;
+ }
+ dcmd = &cmd->frame->dcmd;
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 0;
+ dcmd->flags = MFI_FRAME_DIR_NONE;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = 0;
+ dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
+ dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
+
+ mrsas_issue_blocked_cmd(sc, cmd);
+ mrsas_release_mfi_cmd(cmd);
+
+ return;
+}
+
+/*
+ * mrsas_get_map_info: Load and validate RAID map input:
+ * Adapter instance soft state
+ *
+ * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
+ * and validate RAID map. It returns 0 if successful, 1 other- wise.
*/
-static int mrsas_get_ld_list(struct mrsas_softc *sc)
-{
- int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
- struct mrsas_mfi_cmd *cmd;
- struct mrsas_dcmd_frame *dcmd;
- struct MR_LD_LIST *ld_list_mem;
- bus_addr_t ld_list_phys_addr = 0;
- struct mrsas_tmp_dcmd *tcmd;
-
- cmd = mrsas_get_mfi_cmd(sc);
- if (!cmd) {
- device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n");
- return 1;
- }
-
- dcmd = &cmd->frame->dcmd;
-
- tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
- ld_list_size = sizeof(struct MR_LD_LIST);
- if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
- device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n");
- mrsas_release_mfi_cmd(cmd);
- return(ENOMEM);
- }
- else {
- ld_list_mem = tcmd->tmp_dcmd_mem;
- ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
- }
- memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
- dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0xFF;
- dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
- dcmd->timeout = 0;
- dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
- dcmd->opcode = MR_DCMD_LD_GET_LIST;
- dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
- dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
- dcmd->pad_0 = 0;
-
- if (!mrsas_issue_polled(sc, cmd))
- retcode = 0;
- else
- retcode = 1;
-
- /* Get the instance LD list */
- if ((retcode == 0) && (ld_list_mem->ldCount <= (MAX_LOGICAL_DRIVES))){
- sc->CurLdCount = ld_list_mem->ldCount;
- memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
- for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
- if (ld_list_mem->ldList[ld_index].state != 0) {
- ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
- sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
- }
- }
- }
-
- mrsas_free_tmp_dcmd(tcmd);
- mrsas_release_mfi_cmd(cmd);
- free(tcmd, M_MRSAS);
- return(retcode);
-}
-
-/**
- * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command
- * input: Adapter soft state
- * Temp command
- * Size of alloction
+static int
+mrsas_get_map_info(struct mrsas_softc *sc)
+{
+ uint8_t retcode = 0;
+
+ sc->fast_path_io = 0;
+ if (!mrsas_get_ld_map_info(sc)) {
+ retcode = MR_ValidateMapInfo(sc);
+ if (retcode == 0) {
+ sc->fast_path_io = 1;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/*
+ * mrsas_get_ld_map_info: Get FW's ld_map structure input:
+ * Adapter instance soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD list
+ * structure.
+ */
+static int
+mrsas_get_ld_map_info(struct mrsas_softc *sc)
+{
+ int retcode = 0;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ void *map;
+ bus_addr_t map_phys_addr = 0;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev,
+ "Cannot alloc for ld map info cmd.\n");
+ return 1;
+ }
+ dcmd = &cmd->frame->dcmd;
+
+ map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
+ map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
+ if (!map) {
+ device_printf(sc->mrsas_dev,
+ "Failed to alloc mem for ld map info.\n");
+ mrsas_release_mfi_cmd(cmd);
+ return (ENOMEM);
+ }
+ memset(map, 0, sizeof(sc->max_map_sz));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sc->current_map_sz;
+ dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
+ dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
+ dcmd->sgl.sge32[0].length = sc->current_map_sz;
+
+ if (!mrsas_issue_polled(sc, cmd))
+ retcode = 0;
+ else {
+ device_printf(sc->mrsas_dev,
+ "Fail to send get LD map info cmd.\n");
+ retcode = 1;
+ }
+ mrsas_release_mfi_cmd(cmd);
+
+ return (retcode);
+}
+
+/*
+ * mrsas_sync_map_info: Get FW's ld_map structure input:
+ * Adapter instance soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD list
+ * structure.
+ */
+static int
+mrsas_sync_map_info(struct mrsas_softc *sc)
+{
+ int retcode = 0, i;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ uint32_t size_sync_info, num_lds;
+ MR_LD_TARGET_SYNC *target_map = NULL;
+ MR_DRV_RAID_MAP_ALL *map;
+ MR_LD_RAID *raid;
+ MR_LD_TARGET_SYNC *ld_sync;
+ bus_addr_t map_phys_addr = 0;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev,
+ "Cannot alloc for sync map info cmd\n");
+ return 1;
+ }
+ map = sc->ld_drv_map[sc->map_id & 1];
+ num_lds = map->raidMap.ldCount;
+
+ dcmd = &cmd->frame->dcmd;
+ size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
+ memset(target_map, 0, sc->max_map_sz);
+
+ map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
+
+ ld_sync = (MR_LD_TARGET_SYNC *) target_map;
+
+ for (i = 0; i < num_lds; i++, ld_sync++) {
+ raid = MR_LdRaidGet(i, map);
+ ld_sync->targetId = MR_GetLDTgtId(i, map);
+ ld_sync->seqNum = raid->seqNum;
+ }
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_WRITE;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sc->current_map_sz;
+ dcmd->mbox.b[0] = num_lds;
+ dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
+ dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
+ dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
+ dcmd->sgl.sge32[0].length = sc->current_map_sz;
+
+ sc->map_update_cmd = cmd;
+ if (mrsas_issue_dcmd(sc, cmd)) {
+ device_printf(sc->mrsas_dev,
+ "Fail to send sync map info command.\n");
+ return (1);
+ }
+ return (retcode);
+}
+
+/*
+ * mrsas_get_pd_list: Returns FW's PD list structure input:
+ * Adapter soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD list
+ * structure. This information is mainly used to find out about system
+ * supported by Firmware.
+ */
+static int
+mrsas_get_pd_list(struct mrsas_softc *sc)
+{
+ int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ struct MR_PD_LIST *pd_list_mem;
+ struct MR_PD_ADDRESS *pd_addr;
+ bus_addr_t pd_list_phys_addr = 0;
+ struct mrsas_tmp_dcmd *tcmd;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev,
+ "Cannot alloc for get PD list cmd\n");
+ return 1;
+ }
+ dcmd = &cmd->frame->dcmd;
+
+ tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
+ pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
+ if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
+ device_printf(sc->mrsas_dev,
+ "Cannot alloc dmamap for get PD list cmd\n");
+ mrsas_release_mfi_cmd(cmd);
+ return (ENOMEM);
+ } else {
+ pd_list_mem = tcmd->tmp_dcmd_mem;
+ pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
+ }
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
+ dcmd->mbox.b[1] = 0;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
+ dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
+ dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
+ dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
+
+ if (!mrsas_issue_polled(sc, cmd))
+ retcode = 0;
+ else
+ retcode = 1;
+
+ /* Get the instance PD list */
+ pd_count = MRSAS_MAX_PD;
+ pd_addr = pd_list_mem->addr;
+ if (retcode == 0 && pd_list_mem->count < pd_count) {
+ memset(sc->local_pd_list, 0,
+ MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
+ for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
+ sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
+ sc->local_pd_list[pd_addr->deviceId].driveType =
+ pd_addr->scsiDevType;
+ sc->local_pd_list[pd_addr->deviceId].driveState =
+ MR_PD_STATE_SYSTEM;
+ pd_addr++;
+ }
+ }
+ /*
+ * Use mutext/spinlock if pd_list component size increase more than
+ * 32 bit.
+ */
+ memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
+ mrsas_free_tmp_dcmd(tcmd);
+ mrsas_release_mfi_cmd(cmd);
+ free(tcmd, M_MRSAS);
+ return (retcode);
+}
+
+/*
+ * mrsas_get_ld_list: Returns FW's LD list structure input:
+ * Adapter soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD list
+ * structure. This information is mainly used to find out about supported by
+ * the FW.
+ */
+static int
+mrsas_get_ld_list(struct mrsas_softc *sc)
+{
+ int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ struct MR_LD_LIST *ld_list_mem;
+ bus_addr_t ld_list_phys_addr = 0;
+ struct mrsas_tmp_dcmd *tcmd;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev,
+ "Cannot alloc for get LD list cmd\n");
+ return 1;
+ }
+ dcmd = &cmd->frame->dcmd;
+
+ tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
+ ld_list_size = sizeof(struct MR_LD_LIST);
+ if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
+ device_printf(sc->mrsas_dev,
+ "Cannot alloc dmamap for get LD list cmd\n");
+ mrsas_release_mfi_cmd(cmd);
+ return (ENOMEM);
+ } else {
+ ld_list_mem = tcmd->tmp_dcmd_mem;
+ ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
+ }
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ if (sc->max256vdSupport)
+ dcmd->mbox.b[0] = 1;
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
+ dcmd->opcode = MR_DCMD_LD_GET_LIST;
+ dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
+ dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
+ dcmd->pad_0 = 0;
+
+ if (!mrsas_issue_polled(sc, cmd))
+ retcode = 0;
+ else
+ retcode = 1;
+
+#if VD_EXT_DEBUG
+ printf("Number of LDs %d\n", ld_list_mem->ldCount);
+#endif
+
+ /* Get the instance LD list */
+ if ((retcode == 0) &&
+ (ld_list_mem->ldCount <= sc->fw_supported_vd_count)) {
+ sc->CurLdCount = ld_list_mem->ldCount;
+ memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
+ for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
+ if (ld_list_mem->ldList[ld_index].state != 0) {
+ ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
+ sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
+ }
+ }
+ }
+ mrsas_free_tmp_dcmd(tcmd);
+ mrsas_release_mfi_cmd(cmd);
+ free(tcmd, M_MRSAS);
+ return (retcode);
+}
+
+/*
+ * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input:
+ * Adapter soft state Temp command Size of alloction
*
* Allocates DMAable memory for a temporary internal command. The allocated
- * memory is initialized to all zeros upon successful loading of the dma
+ * memory is initialized to all zeros upon successful loading of the dma
* mapped memory.
*/
-int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
- int size)
-{
- if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
- 1, 0, // algnmnt, boundary
- BUS_SPACE_MAXADDR_32BIT,// lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- size, // maxsize
- 1, // msegments
- size, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- NULL, NULL, // lockfunc, lockarg
- &tcmd->tmp_dcmd_tag)) {
- device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
- return (ENOMEM);
- }
- if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
- BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
- device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
- return (ENOMEM);
- }
- if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
- tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
- &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
- device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
- return (ENOMEM);
- }
-
- memset(tcmd->tmp_dcmd_mem, 0, size);
- return (0);
-}
-
-/**
- * mrsas_free_tmp_dcmd: Free memory for temporary command
- * input: temporary dcmd pointer
- *
- * Deallocates memory of the temporary command for use in the construction
- * of the internal DCMD.
- */
-void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
-{
- if (tmp->tmp_dcmd_phys_addr)
- bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
- if (tmp->tmp_dcmd_mem != NULL)
- bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
- if (tmp->tmp_dcmd_tag != NULL)
- bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
-}
-
-/**
- * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd
- * input: Adapter soft state
- * Previously issued cmd to be aborted
- *
- * This function is used to abort previously issued commands, such as AEN and
- * RAID map sync map commands. The abort command is sent as a DCMD internal
- * command and subsequently the driver will wait for a return status. The
- * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
- */
-static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
- struct mrsas_mfi_cmd *cmd_to_abort)
-{
- struct mrsas_mfi_cmd *cmd;
- struct mrsas_abort_frame *abort_fr;
- u_int8_t retcode = 0;
- unsigned long total_time = 0;
- u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
-
- cmd = mrsas_get_mfi_cmd(sc);
- if (!cmd) {
- device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
- return(1);
- }
-
- abort_fr = &cmd->frame->abort;
-
- /* Prepare and issue the abort frame */
- abort_fr->cmd = MFI_CMD_ABORT;
- abort_fr->cmd_status = 0xFF;
- abort_fr->flags = 0;
- abort_fr->abort_context = cmd_to_abort->index;
- abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
- abort_fr->abort_mfi_phys_addr_hi = 0;
-
- cmd->sync_cmd = 1;
- cmd->cmd_status = 0xFF;
-
- if (mrsas_issue_dcmd(sc, cmd)) {
- device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
- return(1);
- }
-
- /* Wait for this cmd to complete */
- sc->chan = (void*)&cmd;
- while (1) {
- if (cmd->cmd_status == 0xFF){
- tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
- }
- else
- break;
- total_time++;
- if (total_time >= max_wait) {
- device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
- retcode = 1;
- break;
- }
- }
-
- cmd->sync_cmd = 0;
- mrsas_release_mfi_cmd(cmd);
- return(retcode);
-}
-
-/**
- * mrsas_complete_abort: Completes aborting a command
- * input: Adapter soft state
- * Cmd that was issued to abort another cmd
- *
- * The mrsas_issue_blocked_abort_cmd() function waits for the command status
- * to change after sending the command. This function is called from
+int
+mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
+ struct mrsas_tmp_dcmd *tcmd, int size)
+{
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 1, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ size,
+ 1,
+ size,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &tcmd->tmp_dcmd_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
+ BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
+ tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
+ &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
+ return (ENOMEM);
+ }
+ memset(tcmd->tmp_dcmd_mem, 0, size);
+ return (0);
+}
+
+/*
+ * mrsas_free_tmp_dcmd: Free memory for temporary command input:
+ * temporary dcmd pointer
+ *
+ * Deallocates memory of the temporary command for use in the construction of
+ * the internal DCMD.
+ */
+void
+mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
+{
+ if (tmp->tmp_dcmd_phys_addr)
+ bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
+ if (tmp->tmp_dcmd_mem != NULL)
+ bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
+ if (tmp->tmp_dcmd_tag != NULL)
+ bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
+}
+
+/*
+ * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input:
+ * Adapter soft state Previously issued cmd to be aborted
+ *
+ * This function is used to abort previously issued commands, such as AEN and
+ * RAID map sync map commands. The abort command is sent as a DCMD internal
+ * command and subsequently the driver will wait for a return status. The
+ * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
+ */
+static int
+mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd_to_abort)
+{
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_abort_frame *abort_fr;
+ u_int8_t retcode = 0;
+ unsigned long total_time = 0;
+ u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
+ return (1);
+ }
+ abort_fr = &cmd->frame->abort;
+
+ /* Prepare and issue the abort frame */
+ abort_fr->cmd = MFI_CMD_ABORT;
+ abort_fr->cmd_status = 0xFF;
+ abort_fr->flags = 0;
+ abort_fr->abort_context = cmd_to_abort->index;
+ abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
+ abort_fr->abort_mfi_phys_addr_hi = 0;
+
+ cmd->sync_cmd = 1;
+ cmd->cmd_status = 0xFF;
+
+ if (mrsas_issue_dcmd(sc, cmd)) {
+ device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
+ return (1);
+ }
+ /* Wait for this cmd to complete */
+ sc->chan = (void *)&cmd;
+ while (1) {
+ if (cmd->cmd_status == 0xFF) {
+ tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
+ } else
+ break;
+ total_time++;
+ if (total_time >= max_wait) {
+ device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
+ retcode = 1;
+ break;
+ }
+ }
+
+ cmd->sync_cmd = 0;
+ mrsas_release_mfi_cmd(cmd);
+ return (retcode);
+}
+
+/*
+ * mrsas_complete_abort: Completes aborting a command input:
+ * Adapter soft state Cmd that was issued to abort another cmd
+ *
+ * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
+ * change after sending the command. This function is called from
* mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
*/
-void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+void
+mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
- if (cmd->sync_cmd) {
- cmd->sync_cmd = 0;
- cmd->cmd_status = 0;
- sc->chan = (void*)&cmd;
- wakeup_one((void *)&sc->chan);
- }
- return;
+ if (cmd->sync_cmd) {
+ cmd->sync_cmd = 0;
+ cmd->cmd_status = 0;
+ sc->chan = (void *)&cmd;
+ wakeup_one((void *)&sc->chan);
+ }
+ return;
}
-/**
- * mrsas_aen_handler: Callback function for AEN processing from thread context.
- * input: Adapter soft state
+/*
+ * mrsas_aen_handler: AEN processing callback function from thread context
+ * input: Adapter soft state
*
+ * Asynchronous event handler
*/
-void mrsas_aen_handler(struct mrsas_softc *sc)
+void
+mrsas_aen_handler(struct mrsas_softc *sc)
{
union mrsas_evt_class_locale class_locale;
- int doscan = 0;
+ int doscan = 0;
u_int32_t seq_num;
int error;
@@ -3553,38 +3912,37 @@ void mrsas_aen_handler(struct mrsas_softc *sc)
device_printf(sc->mrsas_dev, "invalid instance!\n");
return;
}
-
if (sc->evt_detail_mem) {
switch (sc->evt_detail_mem->code) {
- case MR_EVT_PD_INSERTED:
- mrsas_get_pd_list(sc);
- mrsas_bus_scan_sim(sc, sc->sim_1);
- doscan = 0;
- break;
- case MR_EVT_PD_REMOVED:
- mrsas_get_pd_list(sc);
- mrsas_bus_scan_sim(sc, sc->sim_1);
- doscan = 0;
- break;
- case MR_EVT_LD_OFFLINE:
- case MR_EVT_CFG_CLEARED:
- case MR_EVT_LD_DELETED:
- mrsas_bus_scan_sim(sc, sc->sim_0);
- doscan = 0;
- break;
- case MR_EVT_LD_CREATED:
- mrsas_get_ld_list(sc);
- mrsas_bus_scan_sim(sc, sc->sim_0);
- doscan = 0;
- break;
- case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
- case MR_EVT_FOREIGN_CFG_IMPORTED:
- case MR_EVT_LD_STATE_CHANGE:
- doscan = 1;
- break;
- default:
- doscan = 0;
- break;
+ case MR_EVT_PD_INSERTED:
+ mrsas_get_pd_list(sc);
+ mrsas_bus_scan_sim(sc, sc->sim_1);
+ doscan = 0;
+ break;
+ case MR_EVT_PD_REMOVED:
+ mrsas_get_pd_list(sc);
+ mrsas_bus_scan_sim(sc, sc->sim_1);
+ doscan = 0;
+ break;
+ case MR_EVT_LD_OFFLINE:
+ case MR_EVT_CFG_CLEARED:
+ case MR_EVT_LD_DELETED:
+ mrsas_bus_scan_sim(sc, sc->sim_0);
+ doscan = 0;
+ break;
+ case MR_EVT_LD_CREATED:
+ mrsas_get_ld_list(sc);
+ mrsas_bus_scan_sim(sc, sc->sim_0);
+ doscan = 0;
+ break;
+ case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+ case MR_EVT_FOREIGN_CFG_IMPORTED:
+ case MR_EVT_LD_STATE_CHANGE:
+ doscan = 1;
+ break;
+ default:
+ doscan = 0;
+ break;
}
} else {
device_printf(sc->mrsas_dev, "invalid evt_detail\n");
@@ -3598,20 +3956,19 @@ void mrsas_aen_handler(struct mrsas_softc *sc)
mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
mrsas_bus_scan_sim(sc, sc->sim_0);
}
-
seq_num = sc->evt_detail_mem->seq_num + 1;
- // Register AEN with FW for latest sequence number plus 1
+ /* Register AEN with FW for latest sequence number plus 1 */
class_locale.members.reserved = 0;
class_locale.members.locale = MR_EVT_LOCALE_ALL;
class_locale.members.class = MR_EVT_CLASS_DEBUG;
-
- if (sc->aen_cmd != NULL )
- return ;
+
+ if (sc->aen_cmd != NULL)
+ return;
mtx_lock(&sc->aen_lock);
error = mrsas_register_aen(sc, seq_num,
- class_locale.word);
+ class_locale.word);
mtx_unlock(&sc->aen_lock);
if (error)
@@ -3620,24 +3977,29 @@ void mrsas_aen_handler(struct mrsas_softc *sc)
}
-/**
- * mrsas_complete_aen: Completes AEN command
- * input: Adapter soft state
- * Cmd that was issued to abort another cmd
+/*
+ * mrsas_complete_aen: Completes AEN command
+ * input: Adapter soft state
+ * Cmd that was issued to abort another cmd
*
- * This function will be called from ISR and will continue
- * event processing from thread context by enqueuing task
- * in ev_tq (callback function "mrsas_aen_handler").
+ * This function will be called from ISR and will continue event processing from
+ * thread context by enqueuing task in ev_tq (callback function
+ * "mrsas_aen_handler").
*/
-void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+void
+mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
/*
- * Don't signal app if it is just an aborted previously registered aen
- */
+ * Don't signal app if it is just an aborted previously registered
+ * aen
+ */
if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
- /* TO DO (?) */
- }
- else
+ sc->mrsas_aen_triggered = 1;
+ if (sc->mrsas_poll_waiting) {
+ sc->mrsas_poll_waiting = 0;
+ selwakeup(&sc->mrsas_select);
+ }
+ } else
cmd->abort_aen = 0;
sc->aen_cmd = NULL;
@@ -3650,23 +4012,23 @@ void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
}
static device_method_t mrsas_methods[] = {
- DEVMETHOD(device_probe, mrsas_probe),
- DEVMETHOD(device_attach, mrsas_attach),
- DEVMETHOD(device_detach, mrsas_detach),
- DEVMETHOD(device_suspend, mrsas_suspend),
- DEVMETHOD(device_resume, mrsas_resume),
- DEVMETHOD(bus_print_child, bus_generic_print_child),
- DEVMETHOD(bus_driver_added, bus_generic_driver_added),
- { 0, 0 }
+ DEVMETHOD(device_probe, mrsas_probe),
+ DEVMETHOD(device_attach, mrsas_attach),
+ DEVMETHOD(device_detach, mrsas_detach),
+ DEVMETHOD(device_suspend, mrsas_suspend),
+ DEVMETHOD(device_resume, mrsas_resume),
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_driver_added, bus_generic_driver_added),
+ {0, 0}
};
static driver_t mrsas_driver = {
- "mrsas",
- mrsas_methods,
- sizeof(struct mrsas_softc)
+ "mrsas",
+ mrsas_methods,
+ sizeof(struct mrsas_softc)
};
-static devclass_t mrsas_devclass;
-DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
-MODULE_DEPEND(mrsas, cam, 1,1,1);
+static devclass_t mrsas_devclass;
+DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
+MODULE_DEPEND(mrsas, cam, 1, 1, 1);
diff --git a/sys/dev/mrsas/mrsas.h b/sys/dev/mrsas/mrsas.h
index af43daa..87f73a9 100644
--- a/sys/dev/mrsas/mrsas.h
+++ b/sys/dev/mrsas/mrsas.h
@@ -1,43 +1,38 @@
/*
- * Copyright (c) 2014, LSI Corp.
- * All rights reserved.
- * Authors: Marian Choy
+ * Copyright (c) 2014, LSI Corp. All rights reserved. Authors: Marian Choy
* Support: freebsdraid@lsi.com
*
* Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * 3. Neither the name of the <ORGANIZATION> nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer. 2. Redistributions
+ * in binary form must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution. 3. Neither the name of the
+ * <ORGANIZATION> nor the names of its contributors may be used to endorse or
+ * promote products derived from this software without specific prior written
+ * permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
- * The views and conclusions contained in the software and documentation
- * are those of the authors and should not be interpreted as representing
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing
* official policies,either expressed or implied, of the FreeBSD Project.
*
- * Send feedback to: <megaraidfbsd@lsi.com>
- * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
- * ATTN: MegaRaid FreeBSD
+ * Send feedback to: <megaraidfbsd@lsi.com> Mail to: LSI Corporation, 1621
+ * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
*
*/
@@ -45,92 +40,103 @@
__FBSDID("$FreeBSD$");
#ifndef MRSAS_H
-#define MRSAS_H
+#define MRSAS_H
-#include <sys/param.h> /* defines used in kernel.h */
+#include <sys/param.h> /* defines used in kernel.h */
#include <sys/module.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/errno.h>
-#include <sys/kernel.h> /* types used in module initialization */
-#include <sys/conf.h> /* cdevsw struct */
-#include <sys/uio.h> /* uio struct */
+#include <sys/kernel.h> /* types used in module initialization */
+#include <sys/conf.h> /* cdevsw struct */
+#include <sys/uio.h> /* uio struct */
#include <sys/malloc.h>
-#include <sys/bus.h> /* structs, prototypes for pci bus stuff */
-
-#include <machine/bus.h>
+#include <sys/bus.h> /* structs, prototypes for pci bus
+ * stuff */
#include <sys/rman.h>
-#include <machine/resource.h>
-#include <machine/atomic.h>
-
-#include <dev/pci/pcivar.h> /* For pci_get macros! */
-#include <dev/pci/pcireg.h>
-
#include <sys/types.h>
+#include <sys/lock.h>
+#include <sys/sema.h>
#include <sys/sysctl.h>
#include <sys/stat.h>
#include <sys/taskqueue.h>
#include <sys/poll.h>
#include <sys/selinfo.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/atomic.h>
+
+#include <dev/pci/pcivar.h> /* For pci_get macros! */
+#include <dev/pci/pcireg.h>
+
+
+#define IOCTL_SEMA_DESCRIPTION "mrsas semaphore for MFI pool"
+
/*
* Device IDs and PCI
*/
-#define MRSAS_TBOLT 0x005b
-#define MRSAS_INVADER 0x005d
-#define MRSAS_FURY 0x005f
-#define MRSAS_PCI_BAR0 0x10
-#define MRSAS_PCI_BAR1 0x14
-#define MRSAS_PCI_BAR2 0x1C
+#define MRSAS_TBOLT 0x005b
+#define MRSAS_INVADER 0x005d
+#define MRSAS_FURY 0x005f
+#define MRSAS_PCI_BAR0 0x10
+#define MRSAS_PCI_BAR1 0x14
+#define MRSAS_PCI_BAR2 0x1C
/*
- * Firmware State Defines
+ * Firmware State Defines
*/
-#define MRSAS_FWSTATE_MAXCMD_MASK 0x0000FFFF
-#define MRSAS_FWSTATE_SGE_MASK 0x00FF0000
-#define MRSAS_FW_STATE_CHNG_INTERRUPT 1
+#define MRSAS_FWSTATE_MAXCMD_MASK 0x0000FFFF
+#define MRSAS_FWSTATE_SGE_MASK 0x00FF0000
+#define MRSAS_FW_STATE_CHNG_INTERRUPT 1
/*
* Message Frame Defines
*/
-#define MRSAS_SENSE_LEN 96
-#define MRSAS_FUSION_MAX_RESET_TRIES 3
+#define MRSAS_SENSE_LEN 96
+#define MRSAS_FUSION_MAX_RESET_TRIES 3
+
+/*
+ * Miscellaneous Defines
+ */
+#define BYTE_ALIGNMENT 1
+#define MRSAS_MAX_NAME_LENGTH 32
+#define MRSAS_VERSION "06.705.10.01-fbsd"
+#define MRSAS_ULONG_MAX 0xFFFFFFFFFFFFFFFF
+#define MRSAS_DEFAULT_TIMEOUT 0x14 /* Temporarily set */
+#define DONE 0
+#define MRSAS_PAGE_SIZE 4096
+#define MRSAS_RESET_NOTICE_INTERVAL 5
+#define MRSAS_IO_TIMEOUT 180000 /* 180 second timeout */
+#define MRSAS_LDIO_QUEUE_DEPTH 70 /* 70 percent as default */
+#define THRESHOLD_REPLY_COUNT 50
+#define MAX_MSIX_COUNT 128
/*
- * Miscellaneous Defines
+ * Boolean types
*/
-#define BYTE_ALIGNMENT 1
-#define MRSAS_MAX_NAME_LENGTH 32
-#define MRSAS_VERSION "06.704.01.01-fbsd"
-#define MRSAS_ULONG_MAX 0xFFFFFFFFFFFFFFFF
-#define MRSAS_DEFAULT_TIMEOUT 0x14 //temp
-#define DONE 0
-#define MRSAS_PAGE_SIZE 4096
-#define MRSAS_RESET_NOTICE_INTERVAL 5
-#define MRSAS_IO_TIMEOUT 180000 /* 180 second timeout */
-#define MRSAS_LDIO_QUEUE_DEPTH 70 /* 70 percent as default */
-#define THRESHOLD_REPLY_COUNT 50
-
-/*
- Boolean types
-*/
#if (__FreeBSD_version < 901000)
- typedef enum _boolean { false, true } boolean;
+typedef enum _boolean {
+ false, true
+} boolean;
+
#endif
-enum err { SUCCESS, FAIL };
+enum err {
+ SUCCESS, FAIL
+};
MALLOC_DECLARE(M_MRSAS);
SYSCTL_DECL(_hw_mrsas);
-#define MRSAS_INFO (1 << 0)
-#define MRSAS_TRACE (1 << 1)
-#define MRSAS_FAULT (1 << 2)
-#define MRSAS_OCR (1 << 3)
-#define MRSAS_TOUT MRSAS_OCR
-#define MRSAS_AEN (1 << 4)
-#define MRSAS_PRL11 (1 << 5)
+#define MRSAS_INFO (1 << 0)
+#define MRSAS_TRACE (1 << 1)
+#define MRSAS_FAULT (1 << 2)
+#define MRSAS_OCR (1 << 3)
+#define MRSAS_TOUT MRSAS_OCR
+#define MRSAS_AEN (1 << 4)
+#define MRSAS_PRL11 (1 << 5)
-#define mrsas_dprint(sc, level, msg, args...) \
+#define mrsas_dprint(sc, level, msg, args...) \
do { \
if (sc->mrsas_debug & level) \
device_printf(sc->mrsas_dev, msg, ##args); \
@@ -143,75 +149,75 @@ do { \
****************************************************************************/
typedef struct _RAID_CONTEXT {
- u_int8_t Type:4; // 0x00
- u_int8_t nseg:4; // 0x00
- u_int8_t resvd0; // 0x01
- u_int16_t timeoutValue; // 0x02 -0x03
- u_int8_t regLockFlags; // 0x04
- u_int8_t resvd1; // 0x05
- u_int16_t VirtualDiskTgtId; // 0x06 -0x07
- u_int64_t regLockRowLBA; // 0x08 - 0x0F
- u_int32_t regLockLength; // 0x10 - 0x13
- u_int16_t nextLMId; // 0x14 - 0x15
- u_int8_t exStatus; // 0x16
- u_int8_t status; // 0x17 status
- u_int8_t RAIDFlags; // 0x18 resvd[7:6],ioSubType[5:4],resvd[3:1],preferredCpu[0]
- u_int8_t numSGE; // 0x19 numSge; not including chain entries
- u_int16_t configSeqNum; // 0x1A -0x1B
- u_int8_t spanArm; // 0x1C span[7:5], arm[4:0]
- u_int8_t resvd2[3]; // 0x1D-0x1f
-} RAID_CONTEXT;
+ u_int8_t Type:4;
+ u_int8_t nseg:4;
+ u_int8_t resvd0;
+ u_int16_t timeoutValue;
+ u_int8_t regLockFlags;
+ u_int8_t resvd1;
+ u_int16_t VirtualDiskTgtId;
+ u_int64_t regLockRowLBA;
+ u_int32_t regLockLength;
+ u_int16_t nextLMId;
+ u_int8_t exStatus;
+ u_int8_t status;
+ u_int8_t RAIDFlags;
+ u_int8_t numSGE;
+ u_int16_t configSeqNum;
+ u_int8_t spanArm;
+ u_int8_t resvd2[3];
+} RAID_CONTEXT;
/*************************************************************************
* MPI2 Defines
************************************************************************/
-#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
-#define MPI2_WHOINIT_HOST_DRIVER (0x04)
-#define MPI2_VERSION_MAJOR (0x02)
-#define MPI2_VERSION_MINOR (0x00)
-#define MPI2_VERSION_MAJOR_MASK (0xFF00)
-#define MPI2_VERSION_MAJOR_SHIFT (8)
-#define MPI2_VERSION_MINOR_MASK (0x00FF)
-#define MPI2_VERSION_MINOR_SHIFT (0)
-#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
+#define MPI2_WHOINIT_HOST_DRIVER (0x04)
+#define MPI2_VERSION_MAJOR (0x02)
+#define MPI2_VERSION_MINOR (0x00)
+#define MPI2_VERSION_MAJOR_MASK (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT (8)
+#define MPI2_VERSION_MINOR_MASK (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT (0)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
MPI2_VERSION_MINOR)
-#define MPI2_HEADER_VERSION_UNIT (0x10)
-#define MPI2_HEADER_VERSION_DEV (0x00)
-#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
-#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
-#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
-#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
-#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | MPI2_HEADER_VERSION_DEV)
-#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
-#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
-#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
-#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
-#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
-#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
-#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
-#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
-#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
-#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
-#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
-#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
-#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
-#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
-#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
-#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
-#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
-#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
-#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
-#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
-#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
-#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
-#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
-#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
-#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
+#define MPI2_HEADER_VERSION_UNIT (0x10)
+#define MPI2_HEADER_VERSION_DEV (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | MPI2_HEADER_VERSION_DEV)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
+#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
+#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
+#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
+#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
+#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
+#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
+#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
+#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
+#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
+#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
#ifndef MPI2_POINTER
-#define MPI2_POINTER *
+#define MPI2_POINTER *
#endif
@@ -219,642 +225,725 @@ typedef struct _RAID_CONTEXT {
* MPI2 Structures
***************************************/
-typedef struct _MPI25_IEEE_SGE_CHAIN64
-{
- u_int64_t Address;
- u_int32_t Length;
- u_int16_t Reserved1;
- u_int8_t NextChainOffset;
- u_int8_t Flags;
-} MPI25_IEEE_SGE_CHAIN64, MPI2_POINTER PTR_MPI25_IEEE_SGE_CHAIN64,
- Mpi25IeeeSgeChain64_t, MPI2_POINTER pMpi25IeeeSgeChain64_t;
-
-typedef struct _MPI2_SGE_SIMPLE_UNION
-{
- u_int32_t FlagsLength;
- union
- {
- u_int32_t Address32;
- u_int64_t Address64;
- } u;
-} MPI2_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_SIMPLE_UNION,
- Mpi2SGESimpleUnion_t, MPI2_POINTER pMpi2SGESimpleUnion_t;
-
-typedef struct
-{
- u_int8_t CDB[20]; /* 0x00 */
- u_int32_t PrimaryReferenceTag; /* 0x14 */
- u_int16_t PrimaryApplicationTag; /* 0x18 */
- u_int16_t PrimaryApplicationTagMask; /* 0x1A */
- u_int32_t TransferLength; /* 0x1C */
-} MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32,
- Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t;
-
-typedef struct _MPI2_SGE_CHAIN_UNION
-{
- u_int16_t Length;
- u_int8_t NextChainOffset;
- u_int8_t Flags;
- union
- {
- u_int32_t Address32;
- u_int64_t Address64;
- } u;
-} MPI2_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_SGE_CHAIN_UNION,
- Mpi2SGEChainUnion_t, MPI2_POINTER pMpi2SGEChainUnion_t;
-
-typedef struct _MPI2_IEEE_SGE_SIMPLE32
-{
- u_int32_t Address;
- u_int32_t FlagsLength;
-} MPI2_IEEE_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE32,
- Mpi2IeeeSgeSimple32_t, MPI2_POINTER pMpi2IeeeSgeSimple32_t;
-typedef struct _MPI2_IEEE_SGE_SIMPLE64
-{
- u_int64_t Address;
- u_int32_t Length;
- u_int16_t Reserved1;
- u_int8_t Reserved2;
- u_int8_t Flags;
-} MPI2_IEEE_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE64,
- Mpi2IeeeSgeSimple64_t, MPI2_POINTER pMpi2IeeeSgeSimple64_t;
-
-typedef union _MPI2_IEEE_SGE_SIMPLE_UNION
-{
- MPI2_IEEE_SGE_SIMPLE32 Simple32;
- MPI2_IEEE_SGE_SIMPLE64 Simple64;
-} MPI2_IEEE_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE_UNION,
- Mpi2IeeeSgeSimpleUnion_t, MPI2_POINTER pMpi2IeeeSgeSimpleUnion_t;
-
-typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32;
-typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64;
-
-typedef union _MPI2_IEEE_SGE_CHAIN_UNION
-{
- MPI2_IEEE_SGE_CHAIN32 Chain32;
- MPI2_IEEE_SGE_CHAIN64 Chain64;
-} MPI2_IEEE_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_CHAIN_UNION,
- Mpi2IeeeSgeChainUnion_t, MPI2_POINTER pMpi2IeeeSgeChainUnion_t;
+typedef struct _MPI25_IEEE_SGE_CHAIN64 {
+ u_int64_t Address;
+ u_int32_t Length;
+ u_int16_t Reserved1;
+ u_int8_t NextChainOffset;
+ u_int8_t Flags;
+} MPI25_IEEE_SGE_CHAIN64, MPI2_POINTER PTR_MPI25_IEEE_SGE_CHAIN64,
+Mpi25IeeeSgeChain64_t, MPI2_POINTER pMpi25IeeeSgeChain64_t;
+
+typedef struct _MPI2_SGE_SIMPLE_UNION {
+ u_int32_t FlagsLength;
+ union {
+ u_int32_t Address32;
+ u_int64_t Address64;
+ } u;
+} MPI2_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_SIMPLE_UNION,
+Mpi2SGESimpleUnion_t, MPI2_POINTER pMpi2SGESimpleUnion_t;
-typedef union _MPI2_SGE_IO_UNION
-{
- MPI2_SGE_SIMPLE_UNION MpiSimple;
- MPI2_SGE_CHAIN_UNION MpiChain;
- MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
- MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
-} MPI2_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_SGE_IO_UNION,
- Mpi2SGEIOUnion_t, MPI2_POINTER pMpi2SGEIOUnion_t;
-
-typedef union
-{
- u_int8_t CDB32[32];
- MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
- MPI2_SGE_SIMPLE_UNION SGE;
-} MPI2_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_UNION,
- Mpi2ScsiIoCdb_t, MPI2_POINTER pMpi2ScsiIoCdb_t;
+typedef struct {
+ u_int8_t CDB[20]; /* 0x00 */
+ u_int32_t PrimaryReferenceTag; /* 0x14 */
+ u_int16_t PrimaryApplicationTag;/* 0x18 */
+ u_int16_t PrimaryApplicationTagMask; /* 0x1A */
+ u_int32_t TransferLength; /* 0x1C */
+} MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32,
+Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t;
+
+typedef struct _MPI2_SGE_CHAIN_UNION {
+ u_int16_t Length;
+ u_int8_t NextChainOffset;
+ u_int8_t Flags;
+ union {
+ u_int32_t Address32;
+ u_int64_t Address64;
+ } u;
+} MPI2_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_SGE_CHAIN_UNION,
+Mpi2SGEChainUnion_t, MPI2_POINTER pMpi2SGEChainUnion_t;
+
+typedef struct _MPI2_IEEE_SGE_SIMPLE32 {
+ u_int32_t Address;
+ u_int32_t FlagsLength;
+} MPI2_IEEE_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE32,
+Mpi2IeeeSgeSimple32_t, MPI2_POINTER pMpi2IeeeSgeSimple32_t;
+typedef struct _MPI2_IEEE_SGE_SIMPLE64 {
+ u_int64_t Address;
+ u_int32_t Length;
+ u_int16_t Reserved1;
+ u_int8_t Reserved2;
+ u_int8_t Flags;
+} MPI2_IEEE_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE64,
+Mpi2IeeeSgeSimple64_t, MPI2_POINTER pMpi2IeeeSgeSimple64_t;
+
+typedef union _MPI2_IEEE_SGE_SIMPLE_UNION {
+ MPI2_IEEE_SGE_SIMPLE32 Simple32;
+ MPI2_IEEE_SGE_SIMPLE64 Simple64;
+} MPI2_IEEE_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE_UNION,
+Mpi2IeeeSgeSimpleUnion_t, MPI2_POINTER pMpi2IeeeSgeSimpleUnion_t;
+
+typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32;
+typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64;
+
+typedef union _MPI2_IEEE_SGE_CHAIN_UNION {
+ MPI2_IEEE_SGE_CHAIN32 Chain32;
+ MPI2_IEEE_SGE_CHAIN64 Chain64;
+} MPI2_IEEE_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_CHAIN_UNION,
+Mpi2IeeeSgeChainUnion_t, MPI2_POINTER pMpi2IeeeSgeChainUnion_t;
+
+typedef union _MPI2_SGE_IO_UNION {
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_SGE_CHAIN_UNION MpiChain;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
+} MPI2_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_SGE_IO_UNION,
+Mpi2SGEIOUnion_t, MPI2_POINTER pMpi2SGEIOUnion_t;
+
+typedef union {
+ u_int8_t CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_SGE_SIMPLE_UNION SGE;
+} MPI2_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_UNION,
+Mpi2ScsiIoCdb_t, MPI2_POINTER pMpi2ScsiIoCdb_t;
/*
- * RAID SCSI IO Request Message
- * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
+ * RAID SCSI IO Request Message Total SGE count will be one less than
+ * _MPI2_SCSI_IO_REQUEST
*/
-typedef struct _MPI2_RAID_SCSI_IO_REQUEST
-{
- u_int16_t DevHandle; /* 0x00 */
- u_int8_t ChainOffset; /* 0x02 */
- u_int8_t Function; /* 0x03 */
- u_int16_t Reserved1; /* 0x04 */
- u_int8_t Reserved2; /* 0x06 */
- u_int8_t MsgFlags; /* 0x07 */
- u_int8_t VP_ID; /* 0x08 */
- u_int8_t VF_ID; /* 0x09 */
- u_int16_t Reserved3; /* 0x0A */
- u_int32_t SenseBufferLowAddress; /* 0x0C */
- u_int16_t SGLFlags; /* 0x10 */
- u_int8_t SenseBufferLength; /* 0x12 */
- u_int8_t Reserved4; /* 0x13 */
- u_int8_t SGLOffset0; /* 0x14 */
- u_int8_t SGLOffset1; /* 0x15 */
- u_int8_t SGLOffset2; /* 0x16 */
- u_int8_t SGLOffset3; /* 0x17 */
- u_int32_t SkipCount; /* 0x18 */
- u_int32_t DataLength; /* 0x1C */
- u_int32_t BidirectionalDataLength; /* 0x20 */
- u_int16_t IoFlags; /* 0x24 */
- u_int16_t EEDPFlags; /* 0x26 */
- u_int32_t EEDPBlockSize; /* 0x28 */
- u_int32_t SecondaryReferenceTag; /* 0x2C */
- u_int16_t SecondaryApplicationTag; /* 0x30 */
- u_int16_t ApplicationTagTranslationMask; /* 0x32 */
- u_int8_t LUN[8]; /* 0x34 */
- u_int32_t Control; /* 0x3C */
- MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
- RAID_CONTEXT RaidContext; /* 0x60 */
- MPI2_SGE_IO_UNION SGL; /* 0x80 */
-} MRSAS_RAID_SCSI_IO_REQUEST, MPI2_POINTER PTR_MRSAS_RAID_SCSI_IO_REQUEST,
- MRSASRaidSCSIIORequest_t, MPI2_POINTER pMRSASRaidSCSIIORequest_t;
+typedef struct _MPI2_RAID_SCSI_IO_REQUEST {
+ u_int16_t DevHandle; /* 0x00 */
+ u_int8_t ChainOffset; /* 0x02 */
+ u_int8_t Function; /* 0x03 */
+ u_int16_t Reserved1; /* 0x04 */
+ u_int8_t Reserved2; /* 0x06 */
+ u_int8_t MsgFlags; /* 0x07 */
+ u_int8_t VP_ID; /* 0x08 */
+ u_int8_t VF_ID; /* 0x09 */
+ u_int16_t Reserved3; /* 0x0A */
+ u_int32_t SenseBufferLowAddress;/* 0x0C */
+ u_int16_t SGLFlags; /* 0x10 */
+ u_int8_t SenseBufferLength; /* 0x12 */
+ u_int8_t Reserved4; /* 0x13 */
+ u_int8_t SGLOffset0; /* 0x14 */
+ u_int8_t SGLOffset1; /* 0x15 */
+ u_int8_t SGLOffset2; /* 0x16 */
+ u_int8_t SGLOffset3; /* 0x17 */
+ u_int32_t SkipCount; /* 0x18 */
+ u_int32_t DataLength; /* 0x1C */
+ u_int32_t BidirectionalDataLength; /* 0x20 */
+ u_int16_t IoFlags; /* 0x24 */
+ u_int16_t EEDPFlags; /* 0x26 */
+ u_int32_t EEDPBlockSize; /* 0x28 */
+ u_int32_t SecondaryReferenceTag;/* 0x2C */
+ u_int16_t SecondaryApplicationTag; /* 0x30 */
+ u_int16_t ApplicationTagTranslationMask; /* 0x32 */
+ u_int8_t LUN[8]; /* 0x34 */
+ u_int32_t Control; /* 0x3C */
+ MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
+ RAID_CONTEXT RaidContext; /* 0x60 */
+ MPI2_SGE_IO_UNION SGL; /* 0x80 */
+} MRSAS_RAID_SCSI_IO_REQUEST, MPI2_POINTER PTR_MRSAS_RAID_SCSI_IO_REQUEST,
+MRSASRaidSCSIIORequest_t, MPI2_POINTER pMRSASRaidSCSIIORequest_t;
/*
* MPT RAID MFA IO Descriptor.
*/
typedef struct _MRSAS_RAID_MFA_IO_DESCRIPTOR {
- u_int32_t RequestFlags : 8;
- u_int32_t MessageAddress1 : 24; /* bits 31:8*/
- u_int32_t MessageAddress2; /* bits 61:32 */
-} MRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR,*PMRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR;
+ u_int32_t RequestFlags:8;
+ u_int32_t MessageAddress1:24; /* bits 31:8 */
+ u_int32_t MessageAddress2; /* bits 61:32 */
+} MRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR, *PMRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR;
/* Default Request Descriptor */
-typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR
-{
- u_int8_t RequestFlags; /* 0x00 */
- u_int8_t MSIxIndex; /* 0x01 */
- u_int16_t SMID; /* 0x02 */
- u_int16_t LMID; /* 0x04 */
- u_int16_t DescriptorTypeDependent; /* 0x06 */
-} MPI2_DEFAULT_REQUEST_DESCRIPTOR,
- MPI2_POINTER PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR,
- Mpi2DefaultRequestDescriptor_t, MPI2_POINTER pMpi2DefaultRequestDescriptor_t;
-
+typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR {
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t DescriptorTypeDependent; /* 0x06 */
+} MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+
+ MPI2_POINTER PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+Mpi2DefaultRequestDescriptor_t, MPI2_POINTER pMpi2DefaultRequestDescriptor_t;
+
/* High Priority Request Descriptor */
-typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR
-{
- u_int8_t RequestFlags; /* 0x00 */
- u_int8_t MSIxIndex; /* 0x01 */
- u_int16_t SMID; /* 0x02 */
- u_int16_t LMID; /* 0x04 */
- u_int16_t Reserved1; /* 0x06 */
-} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
- MPI2_POINTER PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
- Mpi2HighPriorityRequestDescriptor_t,
- MPI2_POINTER pMpi2HighPriorityRequestDescriptor_t;
-
+typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t Reserved1; /* 0x06 */
+} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+
+ MPI2_POINTER PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+Mpi2HighPriorityRequestDescriptor_t, MPI2_POINTER pMpi2HighPriorityRequestDescriptor_t;
+
/* SCSI IO Request Descriptor */
-typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR
-{
- u_int8_t RequestFlags; /* 0x00 */
- u_int8_t MSIxIndex; /* 0x01 */
- u_int16_t SMID; /* 0x02 */
- u_int16_t LMID; /* 0x04 */
- u_int16_t DevHandle; /* 0x06 */
-} MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
- MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
- Mpi2SCSIIORequestDescriptor_t, MPI2_POINTER pMpi2SCSIIORequestDescriptor_t;
+typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t DevHandle; /* 0x06 */
+} MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+
+ MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+Mpi2SCSIIORequestDescriptor_t, MPI2_POINTER pMpi2SCSIIORequestDescriptor_t;
/* SCSI Target Request Descriptor */
-typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR
-{
- u_int8_t RequestFlags; /* 0x00 */
- u_int8_t MSIxIndex; /* 0x01 */
- u_int16_t SMID; /* 0x02 */
- u_int16_t LMID; /* 0x04 */
- u_int16_t IoIndex; /* 0x06 */
-} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
- MPI2_POINTER PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
- Mpi2SCSITargetRequestDescriptor_t,
- MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t;
+typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t IoIndex; /* 0x06 */
+} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+
+ MPI2_POINTER PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+Mpi2SCSITargetRequestDescriptor_t, MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t;
/* RAID Accelerator Request Descriptor */
-typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR
-{
- u_int8_t RequestFlags; /* 0x00 */
- u_int8_t MSIxIndex; /* 0x01 */
- u_int16_t SMID; /* 0x02 */
- u_int16_t LMID; /* 0x04 */
- u_int16_t Reserved; /* 0x06 */
-} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
- MPI2_POINTER PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
- Mpi2RAIDAcceleratorRequestDescriptor_t,
- MPI2_POINTER pMpi2RAIDAcceleratorRequestDescriptor_t;
+typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t Reserved; /* 0x06 */
+} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+
+ MPI2_POINTER PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+Mpi2RAIDAcceleratorRequestDescriptor_t, MPI2_POINTER pMpi2RAIDAcceleratorRequestDescriptor_t;
/* union of Request Descriptors */
-typedef union _MRSAS_REQUEST_DESCRIPTOR_UNION
-{
- MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
- MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
- MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
- MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
- MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
- MRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo;
- union {
- struct {
- u_int32_t low;
- u_int32_t high;
- } u;
- u_int64_t Words;
- } addr;
-} MRSAS_REQUEST_DESCRIPTOR_UNION;
+typedef union _MRSAS_REQUEST_DESCRIPTOR_UNION {
+ MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
+ MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
+ MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
+ MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
+ MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
+ MRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo;
+ union {
+ struct {
+ u_int32_t low;
+ u_int32_t high;
+ } u;
+ u_int64_t Words;
+ } addr;
+} MRSAS_REQUEST_DESCRIPTOR_UNION;
/* Default Reply Descriptor */
-typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR
-{
- u_int8_t ReplyFlags; /* 0x00 */
- u_int8_t MSIxIndex; /* 0x01 */
- u_int16_t DescriptorTypeDependent1; /* 0x02 */
- u_int32_t DescriptorTypeDependent2; /* 0x04 */
-} MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR,
- Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t;
+typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR {
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t DescriptorTypeDependent1; /* 0x02 */
+ u_int32_t DescriptorTypeDependent2; /* 0x04 */
+} MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR,
+Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t;
/* Address Reply Descriptor */
-typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR
-{
- u_int8_t ReplyFlags; /* 0x00 */
- u_int8_t MSIxIndex; /* 0x01 */
- u_int16_t SMID; /* 0x02 */
- u_int32_t ReplyFrameAddress; /* 0x04 */
-} MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR,
- Mpi2AddressReplyDescriptor_t, MPI2_POINTER pMpi2AddressReplyDescriptor_t;
+typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR {
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int32_t ReplyFrameAddress; /* 0x04 */
+} MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR,
+Mpi2AddressReplyDescriptor_t, MPI2_POINTER pMpi2AddressReplyDescriptor_t;
/* SCSI IO Success Reply Descriptor */
-typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
-{
- u_int8_t ReplyFlags; /* 0x00 */
- u_int8_t MSIxIndex; /* 0x01 */
- u_int16_t SMID; /* 0x02 */
- u_int16_t TaskTag; /* 0x04 */
- u_int16_t Reserved1; /* 0x06 */
-} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
- MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
- Mpi2SCSIIOSuccessReplyDescriptor_t,
- MPI2_POINTER pMpi2SCSIIOSuccessReplyDescriptor_t;
+typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t TaskTag; /* 0x04 */
+ u_int16_t Reserved1; /* 0x06 */
+} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+
+ MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+Mpi2SCSIIOSuccessReplyDescriptor_t, MPI2_POINTER pMpi2SCSIIOSuccessReplyDescriptor_t;
/* TargetAssist Success Reply Descriptor */
-typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR
-{
- u_int8_t ReplyFlags; /* 0x00 */
- u_int8_t MSIxIndex; /* 0x01 */
- u_int16_t SMID; /* 0x02 */
- u_int8_t SequenceNumber; /* 0x04 */
- u_int8_t Reserved1; /* 0x05 */
- u_int16_t IoIndex; /* 0x06 */
-} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
- MPI2_POINTER PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
- Mpi2TargetAssistSuccessReplyDescriptor_t,
- MPI2_POINTER pMpi2TargetAssistSuccessReplyDescriptor_t;
+typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int8_t SequenceNumber; /* 0x04 */
+ u_int8_t Reserved1; /* 0x05 */
+ u_int16_t IoIndex; /* 0x06 */
+} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+
+ MPI2_POINTER PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+Mpi2TargetAssistSuccessReplyDescriptor_t, MPI2_POINTER pMpi2TargetAssistSuccessReplyDescriptor_t;
/* Target Command Buffer Reply Descriptor */
-typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR
-{
- u_int8_t ReplyFlags; /* 0x00 */
- u_int8_t MSIxIndex; /* 0x01 */
- u_int8_t VP_ID; /* 0x02 */
- u_int8_t Flags; /* 0x03 */
- u_int16_t InitiatorDevHandle; /* 0x04 */
- u_int16_t IoIndex; /* 0x06 */
-} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
- MPI2_POINTER PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
- Mpi2TargetCommandBufferReplyDescriptor_t,
- MPI2_POINTER pMpi2TargetCommandBufferReplyDescriptor_t;
+typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int8_t VP_ID; /* 0x02 */
+ u_int8_t Flags; /* 0x03 */
+ u_int16_t InitiatorDevHandle; /* 0x04 */
+ u_int16_t IoIndex; /* 0x06 */
+} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+
+ MPI2_POINTER PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+Mpi2TargetCommandBufferReplyDescriptor_t, MPI2_POINTER pMpi2TargetCommandBufferReplyDescriptor_t;
/* RAID Accelerator Success Reply Descriptor */
-typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
-{
- u_int8_t ReplyFlags; /* 0x00 */
- u_int8_t MSIxIndex; /* 0x01 */
- u_int16_t SMID; /* 0x02 */
- u_int32_t Reserved; /* 0x04 */
-} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
- MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
- Mpi2RAIDAcceleratorSuccessReplyDescriptor_t,
- MPI2_POINTER pMpi2RAIDAcceleratorSuccessReplyDescriptor_t;
+typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int32_t Reserved; /* 0x04 */
+} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+
+ MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+Mpi2RAIDAcceleratorSuccessReplyDescriptor_t, MPI2_POINTER pMpi2RAIDAcceleratorSuccessReplyDescriptor_t;
/* union of Reply Descriptors */
-typedef union _MPI2_REPLY_DESCRIPTORS_UNION
-{
- MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
- MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
- MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
- MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
- MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
- MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
- u_int64_t Words;
-} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION,
- Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t;
+typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
+ MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
+ MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
+ MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
+ MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
+ MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
+ MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
+ u_int64_t Words;
+} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION,
+Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t;
typedef struct {
- volatile unsigned int val;
+ volatile unsigned int val;
} atomic_t;
-#define atomic_read(v) atomic_load_acq_int(&(v)->val)
-#define atomic_set(v,i) atomic_store_rel_int(&(v)->val, i)
-#define atomic_dec(v) atomic_fetchadd_int(&(v)->val, -1)
-#define atomic_inc(v) atomic_fetchadd_int(&(v)->val, 1)
+#define atomic_read(v) atomic_load_acq_int(&(v)->val)
+#define atomic_set(v,i) atomic_store_rel_int(&(v)->val, i)
+#define atomic_dec(v) atomic_fetchadd_int(&(v)->val, -1)
+#define atomic_inc(v) atomic_fetchadd_int(&(v)->val, 1)
/* IOCInit Request message */
-typedef struct _MPI2_IOC_INIT_REQUEST
-{
- u_int8_t WhoInit; /* 0x00 */
- u_int8_t Reserved1; /* 0x01 */
- u_int8_t ChainOffset; /* 0x02 */
- u_int8_t Function; /* 0x03 */
- u_int16_t Reserved2; /* 0x04 */
- u_int8_t Reserved3; /* 0x06 */
- u_int8_t MsgFlags; /* 0x07 */
- u_int8_t VP_ID; /* 0x08 */
- u_int8_t VF_ID; /* 0x09 */
- u_int16_t Reserved4; /* 0x0A */
- u_int16_t MsgVersion; /* 0x0C */
- u_int16_t HeaderVersion; /* 0x0E */
- u_int32_t Reserved5; /* 0x10 */
- u_int16_t Reserved6; /* 0x14 */
- u_int8_t Reserved7; /* 0x16 */
- u_int8_t HostMSIxVectors; /* 0x17 */
- u_int16_t Reserved8; /* 0x18 */
- u_int16_t SystemRequestFrameSize; /* 0x1A */
- u_int16_t ReplyDescriptorPostQueueDepth; /* 0x1C */
- u_int16_t ReplyFreeQueueDepth; /* 0x1E */
- u_int32_t SenseBufferAddressHigh; /* 0x20 */
- u_int32_t SystemReplyAddressHigh; /* 0x24 */
- u_int64_t SystemRequestFrameBaseAddress; /* 0x28 */
- u_int64_t ReplyDescriptorPostQueueAddress;/* 0x30 */
- u_int64_t ReplyFreeQueueAddress; /* 0x38 */
- u_int64_t TimeStamp; /* 0x40 */
-} MPI2_IOC_INIT_REQUEST, MPI2_POINTER PTR_MPI2_IOC_INIT_REQUEST,
- Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t;
+typedef struct _MPI2_IOC_INIT_REQUEST {
+ u_int8_t WhoInit; /* 0x00 */
+ u_int8_t Reserved1; /* 0x01 */
+ u_int8_t ChainOffset; /* 0x02 */
+ u_int8_t Function; /* 0x03 */
+ u_int16_t Reserved2; /* 0x04 */
+ u_int8_t Reserved3; /* 0x06 */
+ u_int8_t MsgFlags; /* 0x07 */
+ u_int8_t VP_ID; /* 0x08 */
+ u_int8_t VF_ID; /* 0x09 */
+ u_int16_t Reserved4; /* 0x0A */
+ u_int16_t MsgVersion; /* 0x0C */
+ u_int16_t HeaderVersion; /* 0x0E */
+ u_int32_t Reserved5; /* 0x10 */
+ u_int16_t Reserved6; /* 0x14 */
+ u_int8_t Reserved7; /* 0x16 */
+ u_int8_t HostMSIxVectors; /* 0x17 */
+ u_int16_t Reserved8; /* 0x18 */
+ u_int16_t SystemRequestFrameSize; /* 0x1A */
+ u_int16_t ReplyDescriptorPostQueueDepth; /* 0x1C */
+ u_int16_t ReplyFreeQueueDepth; /* 0x1E */
+ u_int32_t SenseBufferAddressHigh; /* 0x20 */
+ u_int32_t SystemReplyAddressHigh; /* 0x24 */
+ u_int64_t SystemRequestFrameBaseAddress; /* 0x28 */
+ u_int64_t ReplyDescriptorPostQueueAddress; /* 0x30 */
+ u_int64_t ReplyFreeQueueAddress;/* 0x38 */
+ u_int64_t TimeStamp; /* 0x40 */
+} MPI2_IOC_INIT_REQUEST, MPI2_POINTER PTR_MPI2_IOC_INIT_REQUEST,
+Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t;
/*
* MR private defines
*/
-#define MR_PD_INVALID 0xFFFF
-#define MAX_SPAN_DEPTH 8
-#define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
-#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
-#define MAX_ROW_SIZE 32
-#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
-#define MAX_LOGICAL_DRIVES 64
-#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
-#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
-#define MAX_ARRAYS 128
-#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
-#define MAX_PHYSICAL_DEVICES 256
-#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
-#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 // get the mapping information of this LD
-
-
-/*******************************************************************
- * RAID map related structures
+#define MR_PD_INVALID 0xFFFF
+#define MAX_SPAN_DEPTH 8
+#define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
+#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
+#define MAX_ROW_SIZE 32
+#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
+#define MAX_LOGICAL_DRIVES 64
+#define MAX_LOGICAL_DRIVES_EXT 256
+
+#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
+#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
+
+#define MAX_ARRAYS 128
+#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
+
+#define MAX_ARRAYS_EXT 256
+#define MAX_API_ARRAYS_EXT MAX_ARRAYS_EXT
+
+#define MAX_PHYSICAL_DEVICES 256
+#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
+#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
+
+
+#define MRSAS_MAX_PD_CHANNELS 1
+#define MRSAS_MAX_LD_CHANNELS 1
+#define MRSAS_MAX_DEV_PER_CHANNEL 256
+#define MRSAS_DEFAULT_INIT_ID -1
+#define MRSAS_MAX_LUN 8
+#define MRSAS_DEFAULT_CMD_PER_LUN 256
+#define MRSAS_MAX_PD (MRSAS_MAX_PD_CHANNELS * \
+ MRSAS_MAX_DEV_PER_CHANNEL)
+#define MRSAS_MAX_LD_IDS (MRSAS_MAX_LD_CHANNELS * \
+ MRSAS_MAX_DEV_PER_CHANNEL)
+
+
+#define VD_EXT_DEBUG 0
+
+
+/*******************************************************************
+ * RAID map related structures
********************************************************************/
-
+#pragma pack(1)
typedef struct _MR_DEV_HANDLE_INFO {
- u_int16_t curDevHdl; // the device handle currently used by fw to issue the command.
- u_int8_t validHandles; // bitmap of valid device handles.
- u_int8_t reserved;
- u_int16_t devHandle[2]; // 0x04 dev handles for all the paths.
-} MR_DEV_HANDLE_INFO;
-
+ u_int16_t curDevHdl;
+ u_int8_t validHandles;
+ u_int8_t reserved;
+ u_int16_t devHandle[2];
+} MR_DEV_HANDLE_INFO;
+
+#pragma pack()
+
typedef struct _MR_ARRAY_INFO {
- u_int16_t pd[MAX_RAIDMAP_ROW_SIZE];
-} MR_ARRAY_INFO; // 0x40, Total Size
-
+ u_int16_t pd[MAX_RAIDMAP_ROW_SIZE];
+} MR_ARRAY_INFO;
+
typedef struct _MR_QUAD_ELEMENT {
- u_int64_t logStart; // 0x00
- u_int64_t logEnd; // 0x08
- u_int64_t offsetInSpan; // 0x10
- u_int32_t diff; // 0x18
- u_int32_t reserved1; // 0x1C
-} MR_QUAD_ELEMENT; // 0x20, Total size
-
+ u_int64_t logStart;
+ u_int64_t logEnd;
+ u_int64_t offsetInSpan;
+ u_int32_t diff;
+ u_int32_t reserved1;
+} MR_QUAD_ELEMENT;
+
typedef struct _MR_SPAN_INFO {
- u_int32_t noElements; // 0x00
- u_int32_t reserved1; // 0x04
- MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH]; // 0x08
-} MR_SPAN_INFO; // 0x108, Total size
-
-typedef struct _MR_LD_SPAN_ { // SPAN structure
- u_int64_t startBlk; // 0x00, starting block number in array
- u_int64_t numBlks; // 0x08, number of blocks
- u_int16_t arrayRef; // 0x10, array reference
- u_int8_t spanRowSize; // 0x11, span row size
- u_int8_t spanRowDataSize; // 0x12, span row data size
- u_int8_t reserved[4]; // 0x13, reserved
-} MR_LD_SPAN; // 0x18, Total Size
+ u_int32_t noElements;
+ u_int32_t reserved1;
+ MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH];
+} MR_SPAN_INFO;
+
+typedef struct _MR_LD_SPAN_ {
+ u_int64_t startBlk;
+ u_int64_t numBlks;
+ u_int16_t arrayRef;
+ u_int8_t spanRowSize;
+ u_int8_t spanRowDataSize;
+ u_int8_t reserved[4];
+} MR_LD_SPAN;
typedef struct _MR_SPAN_BLOCK_INFO {
- u_int64_t num_rows; // number of rows/span
- MR_LD_SPAN span; // 0x08
- MR_SPAN_INFO block_span_info; // 0x20
-} MR_SPAN_BLOCK_INFO;
+ u_int64_t num_rows;
+ MR_LD_SPAN span;
+ MR_SPAN_INFO block_span_info;
+} MR_SPAN_BLOCK_INFO;
typedef struct _MR_LD_RAID {
- struct {
- u_int32_t fpCapable :1;
- u_int32_t reserved5 :3;
- u_int32_t ldPiMode :4;
- u_int32_t pdPiMode :4; // Every Pd has to be same.
- u_int32_t encryptionType :8; // FDE or ctlr encryption (MR_LD_ENCRYPTION_TYPE)
- u_int32_t fpWriteCapable :1;
- u_int32_t fpReadCapable :1;
- u_int32_t fpWriteAcrossStripe :1;
- u_int32_t fpReadAcrossStripe :1;
- u_int32_t fpNonRWCapable :1; // TRUE if supporting Non RW IO
- u_int32_t reserved4 :7;
- } capability; // 0x00
- u_int32_t reserved6;
- u_int64_t size; // 0x08, LD size in blocks
-
- u_int8_t spanDepth; // 0x10, Total Number of Spans
- u_int8_t level; // 0x11, RAID level
- u_int8_t stripeShift; // 0x12, shift-count to get stripe size (0=512, 1=1K, 7=64K, etc.)
- u_int8_t rowSize; // 0x13, number of disks in a row
-
- u_int8_t rowDataSize; // 0x14, number of data disks in a row
- u_int8_t writeMode; // 0x15, WRITE_THROUGH or WRITE_BACK
- u_int8_t PRL; // 0x16, To differentiate between RAID1 and RAID1E
- u_int8_t SRL; // 0x17
-
- u_int16_t targetId; // 0x18, ld Target Id.
- u_int8_t ldState; // 0x1a, state of ld, state corresponds to MR_LD_STATE
- u_int8_t regTypeReqOnWrite;// 0x1b, Pre calculate region type requests based on MFC etc..
- u_int8_t modFactor; // 0x1c, same as rowSize,
- u_int8_t regTypeReqOnRead; // 0x1d, region lock type used for read, valid only if regTypeOnReadIsValid=1
- u_int16_t seqNum; // 0x1e, LD sequence number
-
- struct {
- u_int32_t ldSyncRequired:1; // This LD requires sync command before completing
- u_int32_t regTypeReqOnReadLsValid:1; // Qualifier for regTypeOnRead
- u_int32_t reserved:30;
- } flags; // 0x20
-
- u_int8_t LUN[8]; // 0x24, 8 byte LUN field used for SCSI
- u_int8_t fpIoTimeoutForLd; // 0x2C, timeout value for FP IOs
- u_int8_t reserved2[3]; // 0x2D
- u_int32_t logicalBlockLength; // 0x30 Logical block size for the LD
- struct {
- u_int32_t LdPiExp:4; // 0x34, P_I_EXPONENT for ReadCap 16
- u_int32_t LdLogicalBlockExp:4; // 0x34, LOGICAL BLOCKS PER PHYS BLOCK
- u_int32_t reserved1:24; // 0x34
- } exponent;
- u_int8_t reserved3[0x80-0x38]; // 0x38
-} MR_LD_RAID; // 0x80, Total Size
+ struct {
+ u_int32_t fpCapable:1;
+ u_int32_t reserved5:3;
+ u_int32_t ldPiMode:4;
+ u_int32_t pdPiMode:4;
+ u_int32_t encryptionType:8;
+ u_int32_t fpWriteCapable:1;
+ u_int32_t fpReadCapable:1;
+ u_int32_t fpWriteAcrossStripe:1;
+ u_int32_t fpReadAcrossStripe:1;
+ u_int32_t fpNonRWCapable:1;
+ u_int32_t reserved4:7;
+ } capability;
+ u_int32_t reserved6;
+ u_int64_t size;
+
+ u_int8_t spanDepth;
+ u_int8_t level;
+ u_int8_t stripeShift;
+ u_int8_t rowSize;
+
+ u_int8_t rowDataSize;
+ u_int8_t writeMode;
+ u_int8_t PRL;
+ u_int8_t SRL;
+
+ u_int16_t targetId;
+ u_int8_t ldState;
+ u_int8_t regTypeReqOnWrite;
+ u_int8_t modFactor;
+ u_int8_t regTypeReqOnRead;
+ u_int16_t seqNum;
+
+ struct {
+ u_int32_t ldSyncRequired:1;
+ u_int32_t regTypeReqOnReadLsValid:1;
+ u_int32_t reserved:30;
+ } flags;
+
+ u_int8_t LUN[8];
+ u_int8_t fpIoTimeoutForLd;
+ u_int8_t reserved2[3];
+ u_int32_t logicalBlockLength;
+ struct {
+ u_int32_t LdPiExp:4;
+ u_int32_t LdLogicalBlockExp:4;
+ u_int32_t reserved1:24;
+ } exponent;
+ u_int8_t reserved3[0x80 - 0x38];
+} MR_LD_RAID;
typedef struct _MR_LD_SPAN_MAP {
- MR_LD_RAID ldRaid; // 0x00
- u_int8_t dataArmMap[MAX_RAIDMAP_ROW_SIZE]; // 0x80, needed for GET_ARM() - R0/1/5 only.
- MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH]; // 0xA0
-} MR_LD_SPAN_MAP; // 0x9E0
+ MR_LD_RAID ldRaid;
+ u_int8_t dataArmMap[MAX_RAIDMAP_ROW_SIZE];
+ MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH];
+} MR_LD_SPAN_MAP;
typedef struct _MR_FW_RAID_MAP {
- u_int32_t totalSize; // total size of this structure, including this field.
- union {
- struct { // Simple method of version checking variables
- u_int32_t maxLd;
- u_int32_t maxSpanDepth;
- u_int32_t maxRowSize;
- u_int32_t maxPdCount;
- u_int32_t maxArrays;
- } validationInfo;
- u_int32_t version[5];
- u_int32_t reserved1[5];
- } raid_desc;
- u_int32_t ldCount; // count of lds.
- u_int32_t Reserved1;
- u_int8_t ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+MAX_RAIDMAP_VIEWS]; // 0x20
- // This doesn't correspond to
- // FW Ld Tgt Id to LD, but will purge. For example: if tgt Id is 4
- // and FW LD is 2, and there is only one LD, FW will populate the
- // array like this. [0xFF, 0xFF, 0xFF, 0xFF, 0x0,.....]. This is to
- // help reduce the entire strcture size if there are few LDs or
- // driver is looking info for 1 LD only.
- u_int8_t fpPdIoTimeoutSec; // timeout value used by driver in FP IOs
- u_int8_t reserved2[7];
- MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS]; // 0x00a8
- MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; // 0x20a8
- MR_LD_SPAN_MAP ldSpanMap[1]; // 0x28a8-[0-MAX_RAIDMAP_LOGICAL_DRIVES+MAX_RAIDMAP_VIEWS+1];
-} MR_FW_RAID_MAP; // 0x3288, Total Size
-
-typedef struct _LD_LOAD_BALANCE_INFO
-{
- u_int8_t loadBalanceFlag;
- u_int8_t reserved1;
- u_int16_t raid1DevHandle[2];
- atomic_t scsi_pending_cmds[2];
- u_int64_t last_accessed_block[2];
-} LD_LOAD_BALANCE_INFO, *PLD_LOAD_BALANCE_INFO;
+ u_int32_t totalSize;
+ union {
+ struct {
+ u_int32_t maxLd;
+ u_int32_t maxSpanDepth;
+ u_int32_t maxRowSize;
+ u_int32_t maxPdCount;
+ u_int32_t maxArrays;
+ } validationInfo;
+ u_int32_t version[5];
+ u_int32_t reserved1[5];
+ } raid_desc;
+ u_int32_t ldCount;
+ u_int32_t Reserved1;
+
+ /*
+ * This doesn't correspond to FW Ld Tgt Id to LD, but will purge. For
+ * example: if tgt Id is 4 and FW LD is 2, and there is only one LD,
+ * FW will populate the array like this. [0xFF, 0xFF, 0xFF, 0xFF,
+ * 0x0,.....]. This is to help reduce the entire strcture size if
+ * there are few LDs or driver is looking info for 1 LD only.
+ */
+ u_int8_t ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS];
+ u_int8_t fpPdIoTimeoutSec;
+ u_int8_t reserved2[7];
+ MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS];
+ MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ MR_LD_SPAN_MAP ldSpanMap[1];
+} MR_FW_RAID_MAP;
+
+
+typedef struct _MR_FW_RAID_MAP_EXT {
+ /* Not used in new map */
+ u_int32_t reserved;
+
+ union {
+ struct {
+ u_int32_t maxLd;
+ u_int32_t maxSpanDepth;
+ u_int32_t maxRowSize;
+ u_int32_t maxPdCount;
+ u_int32_t maxArrays;
+ } validationInfo;
+ u_int32_t version[5];
+ u_int32_t reserved1[5];
+ } fw_raid_desc;
+
+ u_int8_t fpPdIoTimeoutSec;
+ u_int8_t reserved2[7];
+
+ u_int16_t ldCount;
+ u_int16_t arCount;
+ u_int16_t spanCount;
+ u_int16_t reserve3;
+
+ MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ u_int8_t ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+ MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
+ MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT];
+} MR_FW_RAID_MAP_EXT;
+
+
+typedef struct _MR_DRV_RAID_MAP {
+ /*
+ * Total size of this structure, including this field. This feild
+ * will be manupulated by driver for ext raid map, else pick the
+ * value from firmware raid map.
+ */
+ u_int32_t totalSize;
+
+ union {
+ struct {
+ u_int32_t maxLd;
+ u_int32_t maxSpanDepth;
+ u_int32_t maxRowSize;
+ u_int32_t maxPdCount;
+ u_int32_t maxArrays;
+ } validationInfo;
+ u_int32_t version[5];
+ u_int32_t reserved1[5];
+ } drv_raid_desc;
+
+ /* timeout value used by driver in FP IOs */
+ u_int8_t fpPdIoTimeoutSec;
+ u_int8_t reserved2[7];
+
+ u_int16_t ldCount;
+ u_int16_t arCount;
+ u_int16_t spanCount;
+ u_int16_t reserve3;
+
+ MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ u_int8_t ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+ MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
+ MR_LD_SPAN_MAP ldSpanMap[1];
+
+} MR_DRV_RAID_MAP;
+
+/*
+ * Driver raid map size is same as raid map ext MR_DRV_RAID_MAP_ALL is
+ * created to sync with old raid. And it is mainly for code re-use purpose.
+ */
+
+#pragma pack(1)
+typedef struct _MR_DRV_RAID_MAP_ALL {
+
+ MR_DRV_RAID_MAP raidMap;
+ MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1];
+} MR_DRV_RAID_MAP_ALL;
+
+#pragma pack()
+
+typedef struct _LD_LOAD_BALANCE_INFO {
+ u_int8_t loadBalanceFlag;
+ u_int8_t reserved1;
+ u_int16_t raid1DevHandle[2];
+ atomic_t scsi_pending_cmds[2];
+ u_int64_t last_accessed_block[2];
+} LD_LOAD_BALANCE_INFO, *PLD_LOAD_BALANCE_INFO;
/* SPAN_SET is info caclulated from span info from Raid map per ld */
typedef struct _LD_SPAN_SET {
- u_int64_t log_start_lba;
- u_int64_t log_end_lba;
- u_int64_t span_row_start;
- u_int64_t span_row_end;
- u_int64_t data_strip_start;
- u_int64_t data_strip_end;
- u_int64_t data_row_start;
- u_int64_t data_row_end;
- u_int8_t strip_offset[MAX_SPAN_DEPTH];
- u_int32_t span_row_data_width;
- u_int32_t diff;
- u_int32_t reserved[2];
-}LD_SPAN_SET, *PLD_SPAN_SET;
+ u_int64_t log_start_lba;
+ u_int64_t log_end_lba;
+ u_int64_t span_row_start;
+ u_int64_t span_row_end;
+ u_int64_t data_strip_start;
+ u_int64_t data_strip_end;
+ u_int64_t data_row_start;
+ u_int64_t data_row_end;
+ u_int8_t strip_offset[MAX_SPAN_DEPTH];
+ u_int32_t span_row_data_width;
+ u_int32_t diff;
+ u_int32_t reserved[2];
+} LD_SPAN_SET, *PLD_SPAN_SET;
typedef struct LOG_BLOCK_SPAN_INFO {
- LD_SPAN_SET span_set[MAX_SPAN_DEPTH];
-}LD_SPAN_INFO, *PLD_SPAN_INFO;
+ LD_SPAN_SET span_set[MAX_SPAN_DEPTH];
+} LD_SPAN_INFO, *PLD_SPAN_INFO;
#pragma pack(1)
typedef struct _MR_FW_RAID_MAP_ALL {
- MR_FW_RAID_MAP raidMap;
- MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
-} MR_FW_RAID_MAP_ALL;
+ MR_FW_RAID_MAP raidMap;
+ MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
+} MR_FW_RAID_MAP_ALL;
+
#pragma pack()
struct IO_REQUEST_INFO {
- u_int64_t ldStartBlock;
- u_int32_t numBlocks;
- u_int16_t ldTgtId;
- u_int8_t isRead;
- u_int16_t devHandle;
- u_int64_t pdBlock;
- u_int8_t fpOkForIo;
+ u_int64_t ldStartBlock;
+ u_int32_t numBlocks;
+ u_int16_t ldTgtId;
+ u_int8_t isRead;
+ u_int16_t devHandle;
+ u_int64_t pdBlock;
+ u_int8_t fpOkForIo;
u_int8_t IoforUnevenSpan;
- u_int8_t start_span;
- u_int8_t reserved;
- u_int64_t start_row;
+ u_int8_t start_span;
+ u_int8_t reserved;
+ u_int64_t start_row;
};
typedef struct _MR_LD_TARGET_SYNC {
- u_int8_t targetId;
- u_int8_t reserved;
- u_int16_t seqNum;
-} MR_LD_TARGET_SYNC;
-
-#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
-#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
-#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
-#define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
-#define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
-#define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
-#define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
+ u_int8_t targetId;
+ u_int8_t reserved;
+ u_int16_t seqNum;
+} MR_LD_TARGET_SYNC;
+
+#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
+#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
+#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
+#define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
+#define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+#define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
union desc_value {
- u_int64_t word;
- struct {
- u_int32_t low;
- u_int32_t high;
- } u;
+ u_int64_t word;
+ struct {
+ u_int32_t low;
+ u_int32_t high;
+ } u;
};
-/*******************************************************************
- * Temporary command
+/*******************************************************************
+ * Temporary command
********************************************************************/
struct mrsas_tmp_dcmd {
- bus_dma_tag_t tmp_dcmd_tag; // tag for tmp DMCD cmd
- bus_dmamap_t tmp_dcmd_dmamap; // dmamap for tmp DCMD cmd
- void *tmp_dcmd_mem; // virtual addr of tmp DCMD cmd
- bus_addr_t tmp_dcmd_phys_addr; //physical addr of tmp DCMD
+ bus_dma_tag_t tmp_dcmd_tag;
+ bus_dmamap_t tmp_dcmd_dmamap;
+ void *tmp_dcmd_mem;
+ bus_addr_t tmp_dcmd_phys_addr;
};
-/*******************************************************************
- * Register set, included legacy controllers 1068 and 1078,
+/*******************************************************************
+ * Register set, included legacy controllers 1068 and 1078,
* structure extended for 1078 registers
- ********************************************************************/
+ *******************************************************************/
#pragma pack(1)
typedef struct _mrsas_register_set {
- u_int32_t doorbell; /*0000h*/
- u_int32_t fusion_seq_offset; /*0004h*/
- u_int32_t fusion_host_diag; /*0008h*/
- u_int32_t reserved_01; /*000Ch*/
+ u_int32_t doorbell; /* 0000h */
+ u_int32_t fusion_seq_offset; /* 0004h */
+ u_int32_t fusion_host_diag; /* 0008h */
+ u_int32_t reserved_01; /* 000Ch */
- u_int32_t inbound_msg_0; /*0010h*/
- u_int32_t inbound_msg_1; /*0014h*/
- u_int32_t outbound_msg_0; /*0018h*/
- u_int32_t outbound_msg_1; /*001Ch*/
+ u_int32_t inbound_msg_0; /* 0010h */
+ u_int32_t inbound_msg_1; /* 0014h */
+ u_int32_t outbound_msg_0; /* 0018h */
+ u_int32_t outbound_msg_1; /* 001Ch */
- u_int32_t inbound_doorbell; /*0020h*/
- u_int32_t inbound_intr_status; /*0024h*/
- u_int32_t inbound_intr_mask; /*0028h*/
+ u_int32_t inbound_doorbell; /* 0020h */
+ u_int32_t inbound_intr_status; /* 0024h */
+ u_int32_t inbound_intr_mask; /* 0028h */
- u_int32_t outbound_doorbell; /*002Ch*/
- u_int32_t outbound_intr_status; /*0030h*/
- u_int32_t outbound_intr_mask; /*0034h*/
+ u_int32_t outbound_doorbell; /* 002Ch */
+ u_int32_t outbound_intr_status; /* 0030h */
+ u_int32_t outbound_intr_mask; /* 0034h */
- u_int32_t reserved_1[2]; /*0038h*/
+ u_int32_t reserved_1[2]; /* 0038h */
- u_int32_t inbound_queue_port; /*0040h*/
- u_int32_t outbound_queue_port; /*0044h*/
+ u_int32_t inbound_queue_port; /* 0040h */
+ u_int32_t outbound_queue_port; /* 0044h */
- u_int32_t reserved_2[9]; /*0048h*/
- u_int32_t reply_post_host_index; /*006Ch*/
- u_int32_t reserved_2_2[12]; /*0070h*/
+ u_int32_t reserved_2[9]; /* 0048h */
+ u_int32_t reply_post_host_index;/* 006Ch */
+ u_int32_t reserved_2_2[12]; /* 0070h */
- u_int32_t outbound_doorbell_clear; /*00A0h*/
+ u_int32_t outbound_doorbell_clear; /* 00A0h */
- u_int32_t reserved_3[3]; /*00A4h*/
+ u_int32_t reserved_3[3]; /* 00A4h */
- u_int32_t outbound_scratch_pad ; /*00B0h*/
- u_int32_t outbound_scratch_pad_2; /*00B4h*/
+ u_int32_t outbound_scratch_pad; /* 00B0h */
+ u_int32_t outbound_scratch_pad_2; /* 00B4h */
- u_int32_t reserved_4[2]; /*00B8h*/
+ u_int32_t reserved_4[2]; /* 00B8h */
- u_int32_t inbound_low_queue_port ; /*00C0h*/
+ u_int32_t inbound_low_queue_port; /* 00C0h */
- u_int32_t inbound_high_queue_port ; /*00C4h*/
+ u_int32_t inbound_high_queue_port; /* 00C4h */
- u_int32_t reserved_5; /*00C8h*/
- u_int32_t res_6[11]; /*CCh*/
- u_int32_t host_diag;
- u_int32_t seq_offset;
- u_int32_t index_registers[807]; /*00CCh*/
+ u_int32_t reserved_5; /* 00C8h */
+ u_int32_t res_6[11]; /* CCh */
+ u_int32_t host_diag;
+ u_int32_t seq_offset;
+ u_int32_t index_registers[807]; /* 00CCh */
+} mrsas_reg_set;
-} mrsas_reg_set;
#pragma pack()
/*******************************************************************
@@ -867,454 +956,437 @@ typedef struct _mrsas_register_set {
/*
* FW posts its state in upper 4 bits of outbound_msg_0 register
*/
-#define MFI_STATE_MASK 0xF0000000
-#define MFI_STATE_UNDEFINED 0x00000000
-#define MFI_STATE_BB_INIT 0x10000000
-#define MFI_STATE_FW_INIT 0x40000000
-#define MFI_STATE_WAIT_HANDSHAKE 0x60000000
-#define MFI_STATE_FW_INIT_2 0x70000000
-#define MFI_STATE_DEVICE_SCAN 0x80000000
-#define MFI_STATE_BOOT_MESSAGE_PENDING 0x90000000
-#define MFI_STATE_FLUSH_CACHE 0xA0000000
-#define MFI_STATE_READY 0xB0000000
-#define MFI_STATE_OPERATIONAL 0xC0000000
-#define MFI_STATE_FAULT 0xF0000000
-#define MFI_RESET_REQUIRED 0x00000001
-#define MFI_RESET_ADAPTER 0x00000002
-#define MEGAMFI_FRAME_SIZE 64
-#define MRSAS_MFI_FRAME_SIZE 1024
-#define MRSAS_MFI_SENSE_SIZE 128
+#define MFI_STATE_MASK 0xF0000000
+#define MFI_STATE_UNDEFINED 0x00000000
+#define MFI_STATE_BB_INIT 0x10000000
+#define MFI_STATE_FW_INIT 0x40000000
+#define MFI_STATE_WAIT_HANDSHAKE 0x60000000
+#define MFI_STATE_FW_INIT_2 0x70000000
+#define MFI_STATE_DEVICE_SCAN 0x80000000
+#define MFI_STATE_BOOT_MESSAGE_PENDING 0x90000000
+#define MFI_STATE_FLUSH_CACHE 0xA0000000
+#define MFI_STATE_READY 0xB0000000
+#define MFI_STATE_OPERATIONAL 0xC0000000
+#define MFI_STATE_FAULT 0xF0000000
+#define MFI_RESET_REQUIRED 0x00000001
+#define MFI_RESET_ADAPTER 0x00000002
+#define MEGAMFI_FRAME_SIZE 64
+#define MRSAS_MFI_FRAME_SIZE 1024
+#define MRSAS_MFI_SENSE_SIZE 128
/*
* During FW init, clear pending cmds & reset state using inbound_msg_0
*
- * ABORT : Abort all pending cmds
- * READY : Move from OPERATIONAL to READY state; discard queue info
- * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??)
- * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
- * HOTPLUG : Resume from Hotplug
- * MFI_STOP_ADP : Send signal to FW to stop processing
+ * ABORT : Abort all pending cmds READY : Move from OPERATIONAL to
+ * READY state; discard queue info MFIMODE : Discard (possible) low MFA
+ * posted in 64-bit mode (??) CLR_HANDSHAKE: FW is waiting for HANDSHAKE from
+ * BIOS or Driver HOTPLUG : Resume from Hotplug MFI_STOP_ADP : Send
+ * signal to FW to stop processing
*/
-#define WRITE_SEQUENCE_OFFSET (0x0000000FC) // I20
-#define HOST_DIAGNOSTIC_OFFSET (0x000000F8) // I20
-#define DIAG_WRITE_ENABLE (0x00000080)
-#define DIAG_RESET_ADAPTER (0x00000004)
-
-#define MFI_ADP_RESET 0x00000040
-#define MFI_INIT_ABORT 0x00000001
-#define MFI_INIT_READY 0x00000002
-#define MFI_INIT_MFIMODE 0x00000004
-#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008
-#define MFI_INIT_HOTPLUG 0x00000010
-#define MFI_STOP_ADP 0x00000020
-#define MFI_RESET_FLAGS MFI_INIT_READY| \
- MFI_INIT_MFIMODE| \
- MFI_INIT_ABORT
+#define WRITE_SEQUENCE_OFFSET (0x0000000FC)
+#define HOST_DIAGNOSTIC_OFFSET (0x000000F8)
+#define DIAG_WRITE_ENABLE (0x00000080)
+#define DIAG_RESET_ADAPTER (0x00000004)
+
+#define MFI_ADP_RESET 0x00000040
+#define MFI_INIT_ABORT 0x00000001
+#define MFI_INIT_READY 0x00000002
+#define MFI_INIT_MFIMODE 0x00000004
+#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008
+#define MFI_INIT_HOTPLUG 0x00000010
+#define MFI_STOP_ADP 0x00000020
+#define MFI_RESET_FLAGS MFI_INIT_READY| \
+ MFI_INIT_MFIMODE| \
+ MFI_INIT_ABORT
/*
- * MFI frame flags
+ * MFI frame flags
*/
-#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000
-#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001
-#define MFI_FRAME_SGL32 0x0000
-#define MFI_FRAME_SGL64 0x0002
-#define MFI_FRAME_SENSE32 0x0000
-#define MFI_FRAME_SENSE64 0x0004
-#define MFI_FRAME_DIR_NONE 0x0000
-#define MFI_FRAME_DIR_WRITE 0x0008
-#define MFI_FRAME_DIR_READ 0x0010
-#define MFI_FRAME_DIR_BOTH 0x0018
-#define MFI_FRAME_IEEE 0x0020
+#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000
+#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001
+#define MFI_FRAME_SGL32 0x0000
+#define MFI_FRAME_SGL64 0x0002
+#define MFI_FRAME_SENSE32 0x0000
+#define MFI_FRAME_SENSE64 0x0004
+#define MFI_FRAME_DIR_NONE 0x0000
+#define MFI_FRAME_DIR_WRITE 0x0008
+#define MFI_FRAME_DIR_READ 0x0010
+#define MFI_FRAME_DIR_BOTH 0x0018
+#define MFI_FRAME_IEEE 0x0020
/*
* Definition for cmd_status
*/
-#define MFI_CMD_STATUS_POLL_MODE 0xFF
+#define MFI_CMD_STATUS_POLL_MODE 0xFF
/*
* MFI command opcodes
*/
-#define MFI_CMD_INIT 0x00
-#define MFI_CMD_LD_READ 0x01
-#define MFI_CMD_LD_WRITE 0x02
-#define MFI_CMD_LD_SCSI_IO 0x03
-#define MFI_CMD_PD_SCSI_IO 0x04
-#define MFI_CMD_DCMD 0x05
-#define MFI_CMD_ABORT 0x06
-#define MFI_CMD_SMP 0x07
-#define MFI_CMD_STP 0x08
-#define MFI_CMD_INVALID 0xff
-
-#define MR_DCMD_CTRL_GET_INFO 0x01010000
-#define MR_DCMD_LD_GET_LIST 0x03010000
-#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
-#define MR_FLUSH_CTRL_CACHE 0x01
-#define MR_FLUSH_DISK_CACHE 0x02
-
-#define MR_DCMD_CTRL_SHUTDOWN 0x01050000
-#define MR_DCMD_HIBERNATE_SHUTDOWN 0x01060000
-#define MR_ENABLE_DRIVE_SPINDOWN 0x01
-
-#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100
-#define MR_DCMD_CTRL_EVENT_GET 0x01040300
-#define MR_DCMD_CTRL_EVENT_WAIT 0x01040500
-#define MR_DCMD_LD_GET_PROPERTIES 0x03030000
-
-#define MR_DCMD_CLUSTER 0x08000000
-#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
-#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
-#define MR_DCMD_PD_LIST_QUERY 0x02010100
-
-#define MR_DCMD_CTRL_MISC_CPX 0x0100e200
-#define MR_DCMD_CTRL_MISC_CPX_INIT_DATA_GET 0x0100e201
-#define MR_DCMD_CTRL_MISC_CPX_QUEUE_DATA 0x0100e202
-#define MR_DCMD_CTRL_MISC_CPX_UNREGISTER 0x0100e203
-#define MAX_MR_ROW_SIZE 32
-#define MR_CPX_DIR_WRITE 1
-#define MR_CPX_DIR_READ 0
-#define MR_CPX_VERSION 1
-
-#define MR_DCMD_CTRL_IO_METRICS_GET 0x01170200 // get IO metrics
-
-#define MR_EVT_CFG_CLEARED 0x0004
-
-#define MR_EVT_LD_STATE_CHANGE 0x0051
-#define MR_EVT_PD_INSERTED 0x005b
-#define MR_EVT_PD_REMOVED 0x0070
-#define MR_EVT_LD_CREATED 0x008a
-#define MR_EVT_LD_DELETED 0x008b
-#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
-#define MR_EVT_LD_OFFLINE 0x00fc
-#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
-#define MR_EVT_CTRL_PERF_COLLECTION 0x017e
+#define MFI_CMD_INIT 0x00
+#define MFI_CMD_LD_READ 0x01
+#define MFI_CMD_LD_WRITE 0x02
+#define MFI_CMD_LD_SCSI_IO 0x03
+#define MFI_CMD_PD_SCSI_IO 0x04
+#define MFI_CMD_DCMD 0x05
+#define MFI_CMD_ABORT 0x06
+#define MFI_CMD_SMP 0x07
+#define MFI_CMD_STP 0x08
+#define MFI_CMD_INVALID 0xff
+
+#define MR_DCMD_CTRL_GET_INFO 0x01010000
+#define MR_DCMD_LD_GET_LIST 0x03010000
+#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
+#define MR_FLUSH_CTRL_CACHE 0x01
+#define MR_FLUSH_DISK_CACHE 0x02
+
+#define MR_DCMD_CTRL_SHUTDOWN 0x01050000
+#define MR_DCMD_HIBERNATE_SHUTDOWN 0x01060000
+#define MR_ENABLE_DRIVE_SPINDOWN 0x01
+
+#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100
+#define MR_DCMD_CTRL_EVENT_GET 0x01040300
+#define MR_DCMD_CTRL_EVENT_WAIT 0x01040500
+#define MR_DCMD_LD_GET_PROPERTIES 0x03030000
+
+#define MR_DCMD_CLUSTER 0x08000000
+#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
+#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
+#define MR_DCMD_PD_LIST_QUERY 0x02010100
+
+#define MR_DCMD_CTRL_MISC_CPX 0x0100e200
+#define MR_DCMD_CTRL_MISC_CPX_INIT_DATA_GET 0x0100e201
+#define MR_DCMD_CTRL_MISC_CPX_QUEUE_DATA 0x0100e202
+#define MR_DCMD_CTRL_MISC_CPX_UNREGISTER 0x0100e203
+#define MAX_MR_ROW_SIZE 32
+#define MR_CPX_DIR_WRITE 1
+#define MR_CPX_DIR_READ 0
+#define MR_CPX_VERSION 1
+
+#define MR_DCMD_CTRL_IO_METRICS_GET 0x01170200
+
+#define MR_EVT_CFG_CLEARED 0x0004
+
+#define MR_EVT_LD_STATE_CHANGE 0x0051
+#define MR_EVT_PD_INSERTED 0x005b
+#define MR_EVT_PD_REMOVED 0x0070
+#define MR_EVT_LD_CREATED 0x008a
+#define MR_EVT_LD_DELETED 0x008b
+#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
+#define MR_EVT_LD_OFFLINE 0x00fc
+#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
+#define MR_EVT_CTRL_PERF_COLLECTION 0x017e
/*
* MFI command completion codes
*/
enum MFI_STAT {
- MFI_STAT_OK = 0x00,
- MFI_STAT_INVALID_CMD = 0x01,
- MFI_STAT_INVALID_DCMD = 0x02,
- MFI_STAT_INVALID_PARAMETER = 0x03,
- MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04,
- MFI_STAT_ABORT_NOT_POSSIBLE = 0x05,
- MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06,
- MFI_STAT_APP_IN_USE = 0x07,
- MFI_STAT_APP_NOT_INITIALIZED = 0x08,
- MFI_STAT_ARRAY_INDEX_INVALID = 0x09,
- MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a,
- MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b,
- MFI_STAT_DEVICE_NOT_FOUND = 0x0c,
- MFI_STAT_DRIVE_TOO_SMALL = 0x0d,
- MFI_STAT_FLASH_ALLOC_FAIL = 0x0e,
- MFI_STAT_FLASH_BUSY = 0x0f,
- MFI_STAT_FLASH_ERROR = 0x10,
- MFI_STAT_FLASH_IMAGE_BAD = 0x11,
- MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12,
- MFI_STAT_FLASH_NOT_OPEN = 0x13,
- MFI_STAT_FLASH_NOT_STARTED = 0x14,
- MFI_STAT_FLUSH_FAILED = 0x15,
- MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16,
- MFI_STAT_LD_CC_IN_PROGRESS = 0x17,
- MFI_STAT_LD_INIT_IN_PROGRESS = 0x18,
- MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19,
- MFI_STAT_LD_MAX_CONFIGURED = 0x1a,
- MFI_STAT_LD_NOT_OPTIMAL = 0x1b,
- MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c,
- MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d,
- MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e,
- MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f,
- MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20,
- MFI_STAT_MFC_HW_ERROR = 0x21,
- MFI_STAT_NO_HW_PRESENT = 0x22,
- MFI_STAT_NOT_FOUND = 0x23,
- MFI_STAT_NOT_IN_ENCL = 0x24,
- MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25,
- MFI_STAT_PD_TYPE_WRONG = 0x26,
- MFI_STAT_PR_DISABLED = 0x27,
- MFI_STAT_ROW_INDEX_INVALID = 0x28,
- MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29,
- MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a,
- MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b,
- MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c,
- MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d,
- MFI_STAT_SCSI_IO_FAILED = 0x2e,
- MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f,
- MFI_STAT_SHUTDOWN_FAILED = 0x30,
- MFI_STAT_TIME_NOT_SET = 0x31,
- MFI_STAT_WRONG_STATE = 0x32,
- MFI_STAT_LD_OFFLINE = 0x33,
- MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34,
- MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35,
- MFI_STAT_RESERVATION_IN_PROGRESS = 0x36,
- MFI_STAT_I2C_ERRORS_DETECTED = 0x37,
- MFI_STAT_PCI_ERRORS_DETECTED = 0x38,
- MFI_STAT_CONFIG_SEQ_MISMATCH = 0x67,
-
- MFI_STAT_INVALID_STATUS = 0xFF
+ MFI_STAT_OK = 0x00,
+ MFI_STAT_INVALID_CMD = 0x01,
+ MFI_STAT_INVALID_DCMD = 0x02,
+ MFI_STAT_INVALID_PARAMETER = 0x03,
+ MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04,
+ MFI_STAT_ABORT_NOT_POSSIBLE = 0x05,
+ MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06,
+ MFI_STAT_APP_IN_USE = 0x07,
+ MFI_STAT_APP_NOT_INITIALIZED = 0x08,
+ MFI_STAT_ARRAY_INDEX_INVALID = 0x09,
+ MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a,
+ MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b,
+ MFI_STAT_DEVICE_NOT_FOUND = 0x0c,
+ MFI_STAT_DRIVE_TOO_SMALL = 0x0d,
+ MFI_STAT_FLASH_ALLOC_FAIL = 0x0e,
+ MFI_STAT_FLASH_BUSY = 0x0f,
+ MFI_STAT_FLASH_ERROR = 0x10,
+ MFI_STAT_FLASH_IMAGE_BAD = 0x11,
+ MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12,
+ MFI_STAT_FLASH_NOT_OPEN = 0x13,
+ MFI_STAT_FLASH_NOT_STARTED = 0x14,
+ MFI_STAT_FLUSH_FAILED = 0x15,
+ MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16,
+ MFI_STAT_LD_CC_IN_PROGRESS = 0x17,
+ MFI_STAT_LD_INIT_IN_PROGRESS = 0x18,
+ MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19,
+ MFI_STAT_LD_MAX_CONFIGURED = 0x1a,
+ MFI_STAT_LD_NOT_OPTIMAL = 0x1b,
+ MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c,
+ MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d,
+ MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e,
+ MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f,
+ MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20,
+ MFI_STAT_MFC_HW_ERROR = 0x21,
+ MFI_STAT_NO_HW_PRESENT = 0x22,
+ MFI_STAT_NOT_FOUND = 0x23,
+ MFI_STAT_NOT_IN_ENCL = 0x24,
+ MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25,
+ MFI_STAT_PD_TYPE_WRONG = 0x26,
+ MFI_STAT_PR_DISABLED = 0x27,
+ MFI_STAT_ROW_INDEX_INVALID = 0x28,
+ MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29,
+ MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a,
+ MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b,
+ MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c,
+ MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d,
+ MFI_STAT_SCSI_IO_FAILED = 0x2e,
+ MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f,
+ MFI_STAT_SHUTDOWN_FAILED = 0x30,
+ MFI_STAT_TIME_NOT_SET = 0x31,
+ MFI_STAT_WRONG_STATE = 0x32,
+ MFI_STAT_LD_OFFLINE = 0x33,
+ MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34,
+ MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35,
+ MFI_STAT_RESERVATION_IN_PROGRESS = 0x36,
+ MFI_STAT_I2C_ERRORS_DETECTED = 0x37,
+ MFI_STAT_PCI_ERRORS_DETECTED = 0x38,
+ MFI_STAT_CONFIG_SEQ_MISMATCH = 0x67,
+
+ MFI_STAT_INVALID_STATUS = 0xFF
};
/*
* Number of mailbox bytes in DCMD message frame
*/
-#define MFI_MBOX_SIZE 12
+#define MFI_MBOX_SIZE 12
enum MR_EVT_CLASS {
- MR_EVT_CLASS_DEBUG = -2,
- MR_EVT_CLASS_PROGRESS = -1,
- MR_EVT_CLASS_INFO = 0,
- MR_EVT_CLASS_WARNING = 1,
- MR_EVT_CLASS_CRITICAL = 2,
- MR_EVT_CLASS_FATAL = 3,
- MR_EVT_CLASS_DEAD = 4,
+ MR_EVT_CLASS_DEBUG = -2,
+ MR_EVT_CLASS_PROGRESS = -1,
+ MR_EVT_CLASS_INFO = 0,
+ MR_EVT_CLASS_WARNING = 1,
+ MR_EVT_CLASS_CRITICAL = 2,
+ MR_EVT_CLASS_FATAL = 3,
+ MR_EVT_CLASS_DEAD = 4,
};
enum MR_EVT_LOCALE {
- MR_EVT_LOCALE_LD = 0x0001,
- MR_EVT_LOCALE_PD = 0x0002,
- MR_EVT_LOCALE_ENCL = 0x0004,
- MR_EVT_LOCALE_BBU = 0x0008,
- MR_EVT_LOCALE_SAS = 0x0010,
- MR_EVT_LOCALE_CTRL = 0x0020,
- MR_EVT_LOCALE_CONFIG = 0x0040,
- MR_EVT_LOCALE_CLUSTER = 0x0080,
- MR_EVT_LOCALE_ALL = 0xffff,
+ MR_EVT_LOCALE_LD = 0x0001,
+ MR_EVT_LOCALE_PD = 0x0002,
+ MR_EVT_LOCALE_ENCL = 0x0004,
+ MR_EVT_LOCALE_BBU = 0x0008,
+ MR_EVT_LOCALE_SAS = 0x0010,
+ MR_EVT_LOCALE_CTRL = 0x0020,
+ MR_EVT_LOCALE_CONFIG = 0x0040,
+ MR_EVT_LOCALE_CLUSTER = 0x0080,
+ MR_EVT_LOCALE_ALL = 0xffff,
};
enum MR_EVT_ARGS {
- MR_EVT_ARGS_NONE,
- MR_EVT_ARGS_CDB_SENSE,
- MR_EVT_ARGS_LD,
- MR_EVT_ARGS_LD_COUNT,
- MR_EVT_ARGS_LD_LBA,
- MR_EVT_ARGS_LD_OWNER,
- MR_EVT_ARGS_LD_LBA_PD_LBA,
- MR_EVT_ARGS_LD_PROG,
- MR_EVT_ARGS_LD_STATE,
- MR_EVT_ARGS_LD_STRIP,
- MR_EVT_ARGS_PD,
- MR_EVT_ARGS_PD_ERR,
- MR_EVT_ARGS_PD_LBA,
- MR_EVT_ARGS_PD_LBA_LD,
- MR_EVT_ARGS_PD_PROG,
- MR_EVT_ARGS_PD_STATE,
- MR_EVT_ARGS_PCI,
- MR_EVT_ARGS_RATE,
- MR_EVT_ARGS_STR,
- MR_EVT_ARGS_TIME,
- MR_EVT_ARGS_ECC,
- MR_EVT_ARGS_LD_PROP,
- MR_EVT_ARGS_PD_SPARE,
- MR_EVT_ARGS_PD_INDEX,
- MR_EVT_ARGS_DIAG_PASS,
- MR_EVT_ARGS_DIAG_FAIL,
- MR_EVT_ARGS_PD_LBA_LBA,
- MR_EVT_ARGS_PORT_PHY,
- MR_EVT_ARGS_PD_MISSING,
- MR_EVT_ARGS_PD_ADDRESS,
- MR_EVT_ARGS_BITMAP,
- MR_EVT_ARGS_CONNECTOR,
- MR_EVT_ARGS_PD_PD,
- MR_EVT_ARGS_PD_FRU,
- MR_EVT_ARGS_PD_PATHINFO,
- MR_EVT_ARGS_PD_POWER_STATE,
- MR_EVT_ARGS_GENERIC,
+ MR_EVT_ARGS_NONE,
+ MR_EVT_ARGS_CDB_SENSE,
+ MR_EVT_ARGS_LD,
+ MR_EVT_ARGS_LD_COUNT,
+ MR_EVT_ARGS_LD_LBA,
+ MR_EVT_ARGS_LD_OWNER,
+ MR_EVT_ARGS_LD_LBA_PD_LBA,
+ MR_EVT_ARGS_LD_PROG,
+ MR_EVT_ARGS_LD_STATE,
+ MR_EVT_ARGS_LD_STRIP,
+ MR_EVT_ARGS_PD,
+ MR_EVT_ARGS_PD_ERR,
+ MR_EVT_ARGS_PD_LBA,
+ MR_EVT_ARGS_PD_LBA_LD,
+ MR_EVT_ARGS_PD_PROG,
+ MR_EVT_ARGS_PD_STATE,
+ MR_EVT_ARGS_PCI,
+ MR_EVT_ARGS_RATE,
+ MR_EVT_ARGS_STR,
+ MR_EVT_ARGS_TIME,
+ MR_EVT_ARGS_ECC,
+ MR_EVT_ARGS_LD_PROP,
+ MR_EVT_ARGS_PD_SPARE,
+ MR_EVT_ARGS_PD_INDEX,
+ MR_EVT_ARGS_DIAG_PASS,
+ MR_EVT_ARGS_DIAG_FAIL,
+ MR_EVT_ARGS_PD_LBA_LBA,
+ MR_EVT_ARGS_PORT_PHY,
+ MR_EVT_ARGS_PD_MISSING,
+ MR_EVT_ARGS_PD_ADDRESS,
+ MR_EVT_ARGS_BITMAP,
+ MR_EVT_ARGS_CONNECTOR,
+ MR_EVT_ARGS_PD_PD,
+ MR_EVT_ARGS_PD_FRU,
+ MR_EVT_ARGS_PD_PATHINFO,
+ MR_EVT_ARGS_PD_POWER_STATE,
+ MR_EVT_ARGS_GENERIC,
};
/*
- * Thunderbolt (and later) Defines
+ * Thunderbolt (and later) Defines
*/
-#define MRSAS_MAX_SZ_CHAIN_FRAME 1024
-#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
-#define MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
-#define MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
-#define MRSAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1
-#define MRSAS_LOAD_BALANCE_FLAG 0x1
-#define MRSAS_DCMD_MBOX_PEND_FLAG 0x1
-#define HOST_DIAG_WRITE_ENABLE 0x80
-#define HOST_DIAG_RESET_ADAPTER 0x4
-#define MRSAS_TBOLT_MAX_RESET_TRIES 3
-#define MRSAS_MAX_MFI_CMDS 32
+#define MRSAS_MAX_SZ_CHAIN_FRAME 1024
+#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
+#define MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
+#define MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
+#define MRSAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1
+#define MRSAS_LOAD_BALANCE_FLAG 0x1
+#define MRSAS_DCMD_MBOX_PEND_FLAG 0x1
+#define HOST_DIAG_WRITE_ENABLE 0x80
+#define HOST_DIAG_RESET_ADAPTER 0x4
+#define MRSAS_TBOLT_MAX_RESET_TRIES 3
+#define MRSAS_MAX_MFI_CMDS 32
/*
- * Invader Defines
+ * Invader Defines
*/
-#define MPI2_TYPE_CUDA 0x2
-#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000
-#define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00
-#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
-#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
-#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
-
-/*
- * T10 PI defines
+#define MPI2_TYPE_CUDA 0x2
+#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000
+#define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00
+#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
+#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
+#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
+
+/*
+ * T10 PI defines
*/
-#define MR_PROT_INFO_TYPE_CONTROLLER 0x8
-#define MRSAS_SCSI_VARIABLE_LENGTH_CMD 0x7f
-#define MRSAS_SCSI_SERVICE_ACTION_READ32 0x9
-#define MRSAS_SCSI_SERVICE_ACTION_WRITE32 0xB
-#define MRSAS_SCSI_ADDL_CDB_LEN 0x18
-#define MRSAS_RD_WR_PROTECT_CHECK_ALL 0x20
-#define MRSAS_RD_WR_PROTECT_CHECK_NONE 0x60
-#define MRSAS_SCSIBLOCKSIZE 512
+#define MR_PROT_INFO_TYPE_CONTROLLER 0x8
+#define MRSAS_SCSI_VARIABLE_LENGTH_CMD 0x7f
+#define MRSAS_SCSI_SERVICE_ACTION_READ32 0x9
+#define MRSAS_SCSI_SERVICE_ACTION_WRITE32 0xB
+#define MRSAS_SCSI_ADDL_CDB_LEN 0x18
+#define MRSAS_RD_WR_PROTECT_CHECK_ALL 0x20
+#define MRSAS_RD_WR_PROTECT_CHECK_NONE 0x60
+#define MRSAS_SCSIBLOCKSIZE 512
/*
* Raid context flags
*/
-#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4
-#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30
typedef enum MR_RAID_FLAGS_IO_SUB_TYPE {
- MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
- MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
-} MR_RAID_FLAGS_IO_SUB_TYPE;
+ MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
+ MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
+} MR_RAID_FLAGS_IO_SUB_TYPE;
/*
* Request descriptor types
*/
-#define MRSAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
-#define MRSAS_REQ_DESCRIPT_FLAGS_MFA 0x1
-#define MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2
-#define MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
-#define MRSAS_FP_CMD_LEN 16
-#define MRSAS_FUSION_IN_RESET 0
-
-#define RAID_CTX_SPANARM_ARM_SHIFT (0)
-#define RAID_CTX_SPANARM_ARM_MASK (0x1f)
-#define RAID_CTX_SPANARM_SPAN_SHIFT (5)
-#define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
-
-/*
+#define MRSAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
+#define MRSAS_REQ_DESCRIPT_FLAGS_MFA 0x1
+#define MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2
+#define MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
+#define MRSAS_FP_CMD_LEN 16
+#define MRSAS_FUSION_IN_RESET 0
+
+#define RAID_CTX_SPANARM_ARM_SHIFT (0)
+#define RAID_CTX_SPANARM_ARM_MASK (0x1f)
+#define RAID_CTX_SPANARM_SPAN_SHIFT (5)
+#define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
+
+/*
* Define region lock types
*/
-typedef enum _REGION_TYPE {
- REGION_TYPE_UNUSED = 0, // lock is currently not active
- REGION_TYPE_SHARED_READ = 1, // shared lock (for reads)
- REGION_TYPE_SHARED_WRITE = 2,
- REGION_TYPE_EXCLUSIVE = 3, // exclusive lock (for writes)
-} REGION_TYPE;
-
-/*
- * MR private defines
- */
-#define MR_PD_INVALID 0xFFFF
-#define MAX_SPAN_DEPTH 8
-#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
-#define MAX_ROW_SIZE 32
-#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
-#define MAX_LOGICAL_DRIVES 64
-#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
-#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
-#define MAX_ARRAYS 128
-#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
-#define MAX_PHYSICAL_DEVICES 256
-#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
-#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
+typedef enum _REGION_TYPE {
+ REGION_TYPE_UNUSED = 0,
+ REGION_TYPE_SHARED_READ = 1,
+ REGION_TYPE_SHARED_WRITE = 2,
+ REGION_TYPE_EXCLUSIVE = 3,
+} REGION_TYPE;
+
/*
- * SCSI-CAM Related Defines
+ * SCSI-CAM Related Defines
*/
-#define MRSAS_SCSI_MAX_LUNS 0 //zero for now
-#define MRSAS_SCSI_INITIATOR_ID 255
-#define MRSAS_SCSI_MAX_CMDS 8
-#define MRSAS_SCSI_MAX_CDB_LEN 16
-#define MRSAS_SCSI_SENSE_BUFFERSIZE 96
-#define MRSAS_MAX_SGL 70
-#define MRSAS_MAX_IO_SIZE (256 * 1024)
-#define MRSAS_INTERNAL_CMDS 32
+#define MRSAS_SCSI_MAX_LUNS 0
+#define MRSAS_SCSI_INITIATOR_ID 255
+#define MRSAS_SCSI_MAX_CMDS 8
+#define MRSAS_SCSI_MAX_CDB_LEN 16
+#define MRSAS_SCSI_SENSE_BUFFERSIZE 96
+#define MRSAS_MAX_SGL 70
+#define MRSAS_MAX_IO_SIZE (256 * 1024)
+#define MRSAS_INTERNAL_CMDS 32
/* Request types */
-#define MRSAS_REQ_TYPE_INTERNAL_CMD 0x0
-#define MRSAS_REQ_TYPE_AEN_FETCH 0x1
-#define MRSAS_REQ_TYPE_PASSTHRU 0x2
-#define MRSAS_REQ_TYPE_GETSET_PARAM 0x3
-#define MRSAS_REQ_TYPE_SCSI_IO 0x4
+#define MRSAS_REQ_TYPE_INTERNAL_CMD 0x0
+#define MRSAS_REQ_TYPE_AEN_FETCH 0x1
+#define MRSAS_REQ_TYPE_PASSTHRU 0x2
+#define MRSAS_REQ_TYPE_GETSET_PARAM 0x3
+#define MRSAS_REQ_TYPE_SCSI_IO 0x4
/* Request states */
-#define MRSAS_REQ_STATE_FREE 0
-#define MRSAS_REQ_STATE_BUSY 1
-#define MRSAS_REQ_STATE_TRAN 2
-#define MRSAS_REQ_STATE_COMPLETE 3
+#define MRSAS_REQ_STATE_FREE 0
+#define MRSAS_REQ_STATE_BUSY 1
+#define MRSAS_REQ_STATE_TRAN 2
+#define MRSAS_REQ_STATE_COMPLETE 3
enum mrsas_req_flags {
- MRSAS_DIR_UNKNOWN = 0x1,
- MRSAS_DIR_IN = 0x2,
- MRSAS_DIR_OUT = 0x4,
- MRSAS_DIR_NONE = 0x8,
+ MRSAS_DIR_UNKNOWN = 0x1,
+ MRSAS_DIR_IN = 0x2,
+ MRSAS_DIR_OUT = 0x4,
+ MRSAS_DIR_NONE = 0x8,
};
-/*
- * Adapter Reset States
+/*
+ * Adapter Reset States
*/
enum {
- MRSAS_HBA_OPERATIONAL = 0,
- MRSAS_ADPRESET_SM_INFAULT = 1,
- MRSAS_ADPRESET_SM_FW_RESET_SUCCESS = 2,
- MRSAS_ADPRESET_SM_OPERATIONAL = 3,
- MRSAS_HW_CRITICAL_ERROR = 4,
- MRSAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD,
+ MRSAS_HBA_OPERATIONAL = 0,
+ MRSAS_ADPRESET_SM_INFAULT = 1,
+ MRSAS_ADPRESET_SM_FW_RESET_SUCCESS = 2,
+ MRSAS_ADPRESET_SM_OPERATIONAL = 3,
+ MRSAS_HW_CRITICAL_ERROR = 4,
+ MRSAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD,
};
-/*
- * MPT Command Structure
+/*
+ * MPT Command Structure
*/
struct mrsas_mpt_cmd {
- MRSAS_RAID_SCSI_IO_REQUEST *io_request;
- bus_addr_t io_request_phys_addr;
- MPI2_SGE_IO_UNION *chain_frame;
- bus_addr_t chain_frame_phys_addr;
- u_int32_t sge_count;
- u_int8_t *sense;
- bus_addr_t sense_phys_addr;
- u_int8_t retry_for_fw_reset;
- MRSAS_REQUEST_DESCRIPTOR_UNION *request_desc;
- u_int32_t sync_cmd_idx; //For getting MFI cmd from list when complete
- u_int32_t index;
- u_int8_t flags;
- u_int8_t load_balance;
- bus_size_t length; // request length
- u_int32_t error_code; // error during request dmamap load
- bus_dmamap_t data_dmamap;
- void *data;
- union ccb *ccb_ptr; // pointer to ccb
- struct callout cm_callout;
- struct mrsas_softc *sc;
- TAILQ_ENTRY(mrsas_mpt_cmd) next;
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+ bus_addr_t io_request_phys_addr;
+ MPI2_SGE_IO_UNION *chain_frame;
+ bus_addr_t chain_frame_phys_addr;
+ u_int32_t sge_count;
+ u_int8_t *sense;
+ bus_addr_t sense_phys_addr;
+ u_int8_t retry_for_fw_reset;
+ MRSAS_REQUEST_DESCRIPTOR_UNION *request_desc;
+ u_int32_t sync_cmd_idx;
+ u_int32_t index;
+ u_int8_t flags;
+ u_int8_t load_balance;
+ bus_size_t length;
+ u_int32_t error_code;
+ bus_dmamap_t data_dmamap;
+ void *data;
+ union ccb *ccb_ptr;
+ struct callout cm_callout;
+ struct mrsas_softc *sc;
+ TAILQ_ENTRY(mrsas_mpt_cmd) next;
};
-/*
- * MFI Command Structure
+/*
+ * MFI Command Structure
*/
struct mrsas_mfi_cmd {
- union mrsas_frame *frame;
- bus_dmamap_t frame_dmamap; // mfi frame dmamap
- void *frame_mem; // mfi frame virtual addr
- bus_addr_t frame_phys_addr; // mfi frame physical addr
- u_int8_t *sense;
- bus_dmamap_t sense_dmamap; // mfi sense dmamap
- void *sense_mem; // mfi sense virtual addr
- bus_addr_t sense_phys_addr;
- u_int32_t index;
- u_int8_t sync_cmd;
- u_int8_t cmd_status;
- u_int8_t abort_aen;
- u_int8_t retry_for_fw_reset;
- struct mrsas_softc *sc;
- union ccb *ccb_ptr;
- union {
- struct {
- u_int16_t smid;
- u_int16_t resvd;
- } context;
- u_int32_t frame_count;
- } cmd_id;
- TAILQ_ENTRY(mrsas_mfi_cmd) next;
+ union mrsas_frame *frame;
+ bus_dmamap_t frame_dmamap;
+ void *frame_mem;
+ bus_addr_t frame_phys_addr;
+ u_int8_t *sense;
+ bus_dmamap_t sense_dmamap;
+ void *sense_mem;
+ bus_addr_t sense_phys_addr;
+ u_int32_t index;
+ u_int8_t sync_cmd;
+ u_int8_t cmd_status;
+ u_int8_t abort_aen;
+ u_int8_t retry_for_fw_reset;
+ struct mrsas_softc *sc;
+ union ccb *ccb_ptr;
+ union {
+ struct {
+ u_int16_t smid;
+ u_int16_t resvd;
+ } context;
+ u_int32_t frame_count;
+ } cmd_id;
+ TAILQ_ENTRY(mrsas_mfi_cmd) next;
};
@@ -1322,61 +1394,62 @@ struct mrsas_mfi_cmd {
* define constants for device list query options
*/
enum MR_PD_QUERY_TYPE {
- MR_PD_QUERY_TYPE_ALL = 0,
- MR_PD_QUERY_TYPE_STATE = 1,
- MR_PD_QUERY_TYPE_POWER_STATE = 2,
- MR_PD_QUERY_TYPE_MEDIA_TYPE = 3,
- MR_PD_QUERY_TYPE_SPEED = 4,
- MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5,
+ MR_PD_QUERY_TYPE_ALL = 0,
+ MR_PD_QUERY_TYPE_STATE = 1,
+ MR_PD_QUERY_TYPE_POWER_STATE = 2,
+ MR_PD_QUERY_TYPE_MEDIA_TYPE = 3,
+ MR_PD_QUERY_TYPE_SPEED = 4,
+ MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5,
};
-#define MR_EVT_CFG_CLEARED 0x0004
-#define MR_EVT_LD_STATE_CHANGE 0x0051
-#define MR_EVT_PD_INSERTED 0x005b
-#define MR_EVT_PD_REMOVED 0x0070
-#define MR_EVT_LD_CREATED 0x008a
-#define MR_EVT_LD_DELETED 0x008b
-#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
-#define MR_EVT_LD_OFFLINE 0x00fc
-#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
+#define MR_EVT_CFG_CLEARED 0x0004
+#define MR_EVT_LD_STATE_CHANGE 0x0051
+#define MR_EVT_PD_INSERTED 0x005b
+#define MR_EVT_PD_REMOVED 0x0070
+#define MR_EVT_LD_CREATED 0x008a
+#define MR_EVT_LD_DELETED 0x008b
+#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
+#define MR_EVT_LD_OFFLINE 0x00fc
+#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
enum MR_PD_STATE {
- MR_PD_STATE_UNCONFIGURED_GOOD = 0x00,
- MR_PD_STATE_UNCONFIGURED_BAD = 0x01,
- MR_PD_STATE_HOT_SPARE = 0x02,
- MR_PD_STATE_OFFLINE = 0x10,
- MR_PD_STATE_FAILED = 0x11,
- MR_PD_STATE_REBUILD = 0x14,
- MR_PD_STATE_ONLINE = 0x18,
- MR_PD_STATE_COPYBACK = 0x20,
- MR_PD_STATE_SYSTEM = 0x40,
- };
-
- /*
+ MR_PD_STATE_UNCONFIGURED_GOOD = 0x00,
+ MR_PD_STATE_UNCONFIGURED_BAD = 0x01,
+ MR_PD_STATE_HOT_SPARE = 0x02,
+ MR_PD_STATE_OFFLINE = 0x10,
+ MR_PD_STATE_FAILED = 0x11,
+ MR_PD_STATE_REBUILD = 0x14,
+ MR_PD_STATE_ONLINE = 0x18,
+ MR_PD_STATE_COPYBACK = 0x20,
+ MR_PD_STATE_SYSTEM = 0x40,
+};
+
+/*
* defines the physical drive address structure
*/
#pragma pack(1)
struct MR_PD_ADDRESS {
- u_int16_t deviceId;
- u_int16_t enclDeviceId;
-
- union {
- struct {
- u_int8_t enclIndex;
- u_int8_t slotNumber;
- } mrPdAddress;
- struct {
- u_int8_t enclPosition;
- u_int8_t enclConnectorIndex;
- } mrEnclAddress;
- } u1;
- u_int8_t scsiDevType;
- union {
- u_int8_t connectedPortBitmap;
- u_int8_t connectedPortNumbers;
- } u2;
- u_int64_t sasAddr[2];
+ u_int16_t deviceId;
+ u_int16_t enclDeviceId;
+
+ union {
+ struct {
+ u_int8_t enclIndex;
+ u_int8_t slotNumber;
+ } mrPdAddress;
+ struct {
+ u_int8_t enclPosition;
+ u_int8_t enclConnectorIndex;
+ } mrEnclAddress;
+ } u1;
+ u_int8_t scsiDevType;
+ union {
+ u_int8_t connectedPortBitmap;
+ u_int8_t connectedPortNumbers;
+ } u2;
+ u_int64_t sasAddr[2];
};
+
#pragma pack()
/*
@@ -1384,31 +1457,33 @@ struct MR_PD_ADDRESS {
*/
#pragma pack(1)
struct MR_PD_LIST {
- u_int32_t size;
- u_int32_t count;
- struct MR_PD_ADDRESS addr[1];
+ u_int32_t size;
+ u_int32_t count;
+ struct MR_PD_ADDRESS addr[1];
};
+
#pragma pack()
#pragma pack(1)
struct mrsas_pd_list {
- u_int16_t tid;
- u_int8_t driveType;
- u_int8_t driveState;
+ u_int16_t tid;
+ u_int8_t driveType;
+ u_int8_t driveState;
};
+
#pragma pack()
- /*
+/*
* defines the logical drive reference structure
*/
-typedef union _MR_LD_REF { // LD reference structure
- struct {
- u_int8_t targetId; // LD target id (0 to MAX_TARGET_ID)
- u_int8_t reserved; // reserved to make in line with MR_PD_REF
- u_int16_t seqNum; // Sequence Number
- } ld_context;
- u_int32_t ref; // shorthand reference to full 32-bits
-} MR_LD_REF; // 4 bytes
+typedef union _MR_LD_REF {
+ struct {
+ u_int8_t targetId;
+ u_int8_t reserved;
+ u_int16_t seqNum;
+ } ld_context;
+ u_int32_t ref;
+} MR_LD_REF;
/*
@@ -1416,15 +1491,16 @@ typedef union _MR_LD_REF { // LD reference structure
*/
#pragma pack(1)
struct MR_LD_LIST {
- u_int32_t ldCount; // number of LDs
- u_int32_t reserved; // pad to 8-byte boundary
- struct {
- MR_LD_REF ref; // LD reference
- u_int8_t state; // current LD state (MR_LD_STATE)
- u_int8_t reserved[3]; // pad to 8-byte boundary
- u_int64_t size; // LD size
- } ldList[MAX_LOGICAL_DRIVES];
-};
+ u_int32_t ldCount;
+ u_int32_t reserved;
+ struct {
+ MR_LD_REF ref;
+ u_int8_t state;
+ u_int8_t reserved[3];
+ u_int64_t size;
+ } ldList[MAX_LOGICAL_DRIVES_EXT];
+};
+
#pragma pack()
/*
@@ -1432,412 +1508,401 @@ struct MR_LD_LIST {
*/
#pragma pack(1)
struct mrsas_ctrl_prop {
- u_int16_t seq_num;
- u_int16_t pred_fail_poll_interval;
- u_int16_t intr_throttle_count;
- u_int16_t intr_throttle_timeouts;
- u_int8_t rebuild_rate;
- u_int8_t patrol_read_rate;
- u_int8_t bgi_rate;
- u_int8_t cc_rate;
- u_int8_t recon_rate;
- u_int8_t cache_flush_interval;
- u_int8_t spinup_drv_count;
- u_int8_t spinup_delay;
- u_int8_t cluster_enable;
- u_int8_t coercion_mode;
- u_int8_t alarm_enable;
- u_int8_t disable_auto_rebuild;
- u_int8_t disable_battery_warn;
- u_int8_t ecc_bucket_size;
- u_int16_t ecc_bucket_leak_rate;
- u_int8_t restore_hotspare_on_insertion;
- u_int8_t expose_encl_devices;
- u_int8_t maintainPdFailHistory;
- u_int8_t disallowHostRequestReordering;
- u_int8_t abortCCOnError; // set TRUE to abort CC on detecting an inconsistency
- u_int8_t loadBalanceMode; // load balance mode (MR_LOAD_BALANCE_MODE)
- u_int8_t disableAutoDetectBackplane; // 0 - use auto detect logic of backplanes
- // like SGPIO, i2c SEP using h/w mechansim
- // like GPIO pins.
- // 1 - disable auto detect SGPIO,
- // 2 - disable i2c SEP auto detect
- // 3 - disable both auto detect
- u_int8_t snapVDSpace; // % of source LD to be reserved for a VDs snapshot in
- // snapshot repository, for metadata and user data.
- // 1=5%, 2=10%, 3=15% and so on.
- /*
- * Add properties that can be controlled by a bit in the following structure.
- */
- struct {
- u_int32_t copyBackDisabled : 1; // set TRUE to disable copyBack
- // (0=copback enabled)
- u_int32_t SMARTerEnabled : 1;
- u_int32_t prCorrectUnconfiguredAreas : 1;
- u_int32_t useFdeOnly : 1;
- u_int32_t disableNCQ : 1;
- u_int32_t SSDSMARTerEnabled : 1;
- u_int32_t SSDPatrolReadEnabled : 1;
- u_int32_t enableSpinDownUnconfigured : 1;
- u_int32_t autoEnhancedImport : 1;
- u_int32_t enableSecretKeyControl : 1;
- u_int32_t disableOnlineCtrlReset : 1;
- u_int32_t allowBootWithPinnedCache : 1;
- u_int32_t disableSpinDownHS : 1;
- u_int32_t enableJBOD : 1;
- u_int32_t reserved :18;
- } OnOffProperties;
- u_int8_t autoSnapVDSpace; // % of source LD to be reserved for auto
- // snapshot in snapshot repository, for
- // metadata and user data.
- // 1=5%, 2=10%, 3=15% and so on.
- u_int8_t viewSpace; // snapshot writeable VIEWs capacity as a %
- // of source LD capacity. 0=READ only.
- // 1=5%, 2=10%, 3=15% and so on
- u_int16_t spinDownTime; // # of idle minutes before device is spun
- // down (0=use FW defaults).
- u_int8_t reserved[24];
-
-};
+ u_int16_t seq_num;
+ u_int16_t pred_fail_poll_interval;
+ u_int16_t intr_throttle_count;
+ u_int16_t intr_throttle_timeouts;
+ u_int8_t rebuild_rate;
+ u_int8_t patrol_read_rate;
+ u_int8_t bgi_rate;
+ u_int8_t cc_rate;
+ u_int8_t recon_rate;
+ u_int8_t cache_flush_interval;
+ u_int8_t spinup_drv_count;
+ u_int8_t spinup_delay;
+ u_int8_t cluster_enable;
+ u_int8_t coercion_mode;
+ u_int8_t alarm_enable;
+ u_int8_t disable_auto_rebuild;
+ u_int8_t disable_battery_warn;
+ u_int8_t ecc_bucket_size;
+ u_int16_t ecc_bucket_leak_rate;
+ u_int8_t restore_hotspare_on_insertion;
+ u_int8_t expose_encl_devices;
+ u_int8_t maintainPdFailHistory;
+ u_int8_t disallowHostRequestReordering;
+ u_int8_t abortCCOnError;
+ u_int8_t loadBalanceMode;
+ u_int8_t disableAutoDetectBackplane;
+ u_int8_t snapVDSpace;
+ /*
+ * Add properties that can be controlled by a bit in the following
+ * structure.
+ */
+ struct {
+ u_int32_t copyBackDisabled:1;
+ u_int32_t SMARTerEnabled:1;
+ u_int32_t prCorrectUnconfiguredAreas:1;
+ u_int32_t useFdeOnly:1;
+ u_int32_t disableNCQ:1;
+ u_int32_t SSDSMARTerEnabled:1;
+ u_int32_t SSDPatrolReadEnabled:1;
+ u_int32_t enableSpinDownUnconfigured:1;
+ u_int32_t autoEnhancedImport:1;
+ u_int32_t enableSecretKeyControl:1;
+ u_int32_t disableOnlineCtrlReset:1;
+ u_int32_t allowBootWithPinnedCache:1;
+ u_int32_t disableSpinDownHS:1;
+ u_int32_t enableJBOD:1;
+ u_int32_t disableCacheBypass:1;
+ u_int32_t useDiskActivityForLocate:1;
+ u_int32_t enablePI:1;
+ u_int32_t preventPIImport:1;
+ u_int32_t useGlobalSparesForEmergency:1;
+ u_int32_t useUnconfGoodForEmergency:1;
+ u_int32_t useEmergencySparesforSMARTer:1;
+ u_int32_t forceSGPIOForQuadOnly:1;
+ u_int32_t enableConfigAutoBalance:1;
+ u_int32_t enableVirtualCache:1;
+ u_int32_t enableAutoLockRecovery:1;
+ u_int32_t disableImmediateIO:1;
+ u_int32_t disableT10RebuildAssist:1;
+ u_int32_t ignore64ldRestriction:1;
+ u_int32_t enableSwZone:1;
+ u_int32_t limitMaxRateSATA3G:1;
+ u_int32_t reserved:2;
+ } OnOffProperties;
+ u_int8_t autoSnapVDSpace;
+ u_int8_t viewSpace;
+ u_int16_t spinDownTime;
+ u_int8_t reserved[24];
+
+};
+
#pragma pack()
/*
* SAS controller information
*/
-//#pragma pack(1)
struct mrsas_ctrl_info {
- /*
- * PCI device information
- */
- struct {
- u_int16_t vendor_id;
- u_int16_t device_id;
- u_int16_t sub_vendor_id;
- u_int16_t sub_device_id;
- u_int8_t reserved[24];
- } __packed pci;
- /*
- * Host interface information
- */
- struct {
- u_int8_t PCIX:1;
- u_int8_t PCIE:1;
- u_int8_t iSCSI:1;
- u_int8_t SAS_3G:1;
- u_int8_t reserved_0:4;
- u_int8_t reserved_1[6];
- u_int8_t port_count;
- u_int64_t port_addr[8];
- } __packed host_interface;
- /*
- * Device (backend) interface information
- */
- struct {
- u_int8_t SPI:1;
- u_int8_t SAS_3G:1;
- u_int8_t SATA_1_5G:1;
- u_int8_t SATA_3G:1;
- u_int8_t reserved_0:4;
- u_int8_t reserved_1[6];
- u_int8_t port_count;
- u_int64_t port_addr[8];
- } __packed device_interface;
-
- /*
- * List of components residing in flash. All str are null terminated
- */
- u_int32_t image_check_word;
- u_int32_t image_component_count;
-
- struct {
- char name[8];
- char version[32];
- char build_date[16];
- char built_time[16];
- } __packed image_component[8];
- /*
- * List of flash components that have been flashed on the card, but
- * are not in use, pending reset of the adapter. This list will be
- * empty if a flash operation has not occurred. All stings are null
- * terminated
- */
- u_int32_t pending_image_component_count;
-
- struct {
- char name[8];
- char version[32];
- char build_date[16];
- char build_time[16];
- } __packed pending_image_component[8];
-
- u_int8_t max_arms;
- u_int8_t max_spans;
- u_int8_t max_arrays;
- u_int8_t max_lds;
- char product_name[80];
- char serial_no[32];
-
- /*
- * Other physical/controller/operation information. Indicates the
- * presence of the hardware
- */
- struct {
- u_int32_t bbu:1;
- u_int32_t alarm:1;
- u_int32_t nvram:1;
- u_int32_t uart:1;
- u_int32_t reserved:28;
- } __packed hw_present;
-
- u_int32_t current_fw_time;
-
- /*
- * Maximum data transfer sizes
- */
- u_int16_t max_concurrent_cmds;
- u_int16_t max_sge_count;
- u_int32_t max_request_size;
-
- /*
- * Logical and physical device counts
- */
- u_int16_t ld_present_count;
- u_int16_t ld_degraded_count;
- u_int16_t ld_offline_count;
-
- u_int16_t pd_present_count;
- u_int16_t pd_disk_present_count;
- u_int16_t pd_disk_pred_failure_count;
- u_int16_t pd_disk_failed_count;
-
- /*
- * Memory size information
- */
- u_int16_t nvram_size;
- u_int16_t memory_size;
- u_int16_t flash_size;
-
- /*
- * Error counters
- */
- u_int16_t mem_correctable_error_count;
- u_int16_t mem_uncorrectable_error_count;
-
- /*
- * Cluster information
- */
- u_int8_t cluster_permitted;
- u_int8_t cluster_active;
-
- /*
- * Additional max data transfer sizes
- */
- u_int16_t max_strips_per_io;
-
- /*
- * Controller capabilities structures
- */
- struct {
- u_int32_t raid_level_0:1;
- u_int32_t raid_level_1:1;
- u_int32_t raid_level_5:1;
- u_int32_t raid_level_1E:1;
- u_int32_t raid_level_6:1;
- u_int32_t reserved:27;
- } __packed raid_levels;
-
- struct {
- u_int32_t rbld_rate:1;
- u_int32_t cc_rate:1;
- u_int32_t bgi_rate:1;
- u_int32_t recon_rate:1;
- u_int32_t patrol_rate:1;
- u_int32_t alarm_control:1;
- u_int32_t cluster_supported:1;
- u_int32_t bbu:1;
- u_int32_t spanning_allowed:1;
- u_int32_t dedicated_hotspares:1;
- u_int32_t revertible_hotspares:1;
- u_int32_t foreign_config_import:1;
- u_int32_t self_diagnostic:1;
- u_int32_t mixed_redundancy_arr:1;
- u_int32_t global_hot_spares:1;
- u_int32_t reserved:17;
- } __packed adapter_operations;
-
- struct {
- u_int32_t read_policy:1;
- u_int32_t write_policy:1;
- u_int32_t io_policy:1;
- u_int32_t access_policy:1;
- u_int32_t disk_cache_policy:1;
- u_int32_t reserved:27;
- } __packed ld_operations;
-
- struct {
- u_int8_t min;
- u_int8_t max;
- u_int8_t reserved[2];
- } __packed stripe_sz_ops;
-
- struct {
- u_int32_t force_online:1;
- u_int32_t force_offline:1;
- u_int32_t force_rebuild:1;
- u_int32_t reserved:29;
- } __packed pd_operations;
-
- struct {
- u_int32_t ctrl_supports_sas:1;
- u_int32_t ctrl_supports_sata:1;
- u_int32_t allow_mix_in_encl:1;
- u_int32_t allow_mix_in_ld:1;
- u_int32_t allow_sata_in_cluster:1;
- u_int32_t reserved:27;
- } __packed pd_mix_support;
-
- /*
- * Define ECC single-bit-error bucket information
- */
- u_int8_t ecc_bucket_count;
- u_int8_t reserved_2[11];
-
- /*
- * Include the controller properties (changeable items)
- */
- struct mrsas_ctrl_prop properties;
-
- /*
- * Define FW pkg version (set in envt v'bles on OEM basis)
- */
- char package_version[0x60];
+ /*
+ * PCI device information
+ */
+ struct {
+ u_int16_t vendor_id;
+ u_int16_t device_id;
+ u_int16_t sub_vendor_id;
+ u_int16_t sub_device_id;
+ u_int8_t reserved[24];
+ } __packed pci;
+ /*
+ * Host interface information
+ */
+ struct {
+ u_int8_t PCIX:1;
+ u_int8_t PCIE:1;
+ u_int8_t iSCSI:1;
+ u_int8_t SAS_3G:1;
+ u_int8_t reserved_0:4;
+ u_int8_t reserved_1[6];
+ u_int8_t port_count;
+ u_int64_t port_addr[8];
+ } __packed host_interface;
+ /*
+ * Device (backend) interface information
+ */
+ struct {
+ u_int8_t SPI:1;
+ u_int8_t SAS_3G:1;
+ u_int8_t SATA_1_5G:1;
+ u_int8_t SATA_3G:1;
+ u_int8_t reserved_0:4;
+ u_int8_t reserved_1[6];
+ u_int8_t port_count;
+ u_int64_t port_addr[8];
+ } __packed device_interface;
+
+ u_int32_t image_check_word;
+ u_int32_t image_component_count;
+
+ struct {
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char built_time[16];
+ } __packed image_component[8];
+
+ u_int32_t pending_image_component_count;
+
+ struct {
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char build_time[16];
+ } __packed pending_image_component[8];
+
+ u_int8_t max_arms;
+ u_int8_t max_spans;
+ u_int8_t max_arrays;
+ u_int8_t max_lds;
+ char product_name[80];
+ char serial_no[32];
/*
- * If adapterOperations.supportMoreThan8Phys is set, and deviceInterface.portCount is greater than 8,
- * SAS Addrs for first 8 ports shall be populated in deviceInterface.portAddr, and the rest shall be
- * populated in deviceInterfacePortAddr2.
- */
- u_int64_t deviceInterfacePortAddr2[8]; //0x6a0
- u_int8_t reserved3[128]; //0x6e0
-
- struct { //0x760
- u_int16_t minPdRaidLevel_0 : 4;
- u_int16_t maxPdRaidLevel_0 : 12;
-
- u_int16_t minPdRaidLevel_1 : 4;
- u_int16_t maxPdRaidLevel_1 : 12;
-
- u_int16_t minPdRaidLevel_5 : 4;
- u_int16_t maxPdRaidLevel_5 : 12;
-
- u_int16_t minPdRaidLevel_1E : 4;
- u_int16_t maxPdRaidLevel_1E : 12;
-
- u_int16_t minPdRaidLevel_6 : 4;
- u_int16_t maxPdRaidLevel_6 : 12;
-
- u_int16_t minPdRaidLevel_10 : 4;
- u_int16_t maxPdRaidLevel_10 : 12;
-
- u_int16_t minPdRaidLevel_50 : 4;
- u_int16_t maxPdRaidLevel_50 : 12;
-
- u_int16_t minPdRaidLevel_60 : 4;
- u_int16_t maxPdRaidLevel_60 : 12;
-
- u_int16_t minPdRaidLevel_1E_RLQ0 : 4;
- u_int16_t maxPdRaidLevel_1E_RLQ0 : 12;
-
- u_int16_t minPdRaidLevel_1E0_RLQ0 : 4;
- u_int16_t maxPdRaidLevel_1E0_RLQ0 : 12;
-
- u_int16_t reserved[6];
- } pdsForRaidLevels;
-
- u_int16_t maxPds; //0x780
- u_int16_t maxDedHSPs; //0x782
- u_int16_t maxGlobalHSPs; //0x784
- u_int16_t ddfSize; //0x786
- u_int8_t maxLdsPerArray; //0x788
- u_int8_t partitionsInDDF; //0x789
- u_int8_t lockKeyBinding; //0x78a
- u_int8_t maxPITsPerLd; //0x78b
- u_int8_t maxViewsPerLd; //0x78c
- u_int8_t maxTargetId; //0x78d
- u_int16_t maxBvlVdSize; //0x78e
-
- u_int16_t maxConfigurableSSCSize; //0x790
- u_int16_t currentSSCsize; //0x792
-
- char expanderFwVersion[12]; //0x794
-
- u_int16_t PFKTrialTimeRemaining; //0x7A0
-
- u_int16_t cacheMemorySize; //0x7A2
-
- struct { //0x7A4
- u_int32_t supportPIcontroller :1;
- u_int32_t supportLdPIType1 :1;
- u_int32_t supportLdPIType2 :1;
- u_int32_t supportLdPIType3 :1;
- u_int32_t supportLdBBMInfo :1;
- u_int32_t supportShieldState :1;
- u_int32_t blockSSDWriteCacheChange :1;
- u_int32_t supportSuspendResumeBGops :1;
- u_int32_t supportEmergencySpares :1;
- u_int32_t supportSetLinkSpeed :1;
- u_int32_t supportBootTimePFKChange :1;
- u_int32_t supportJBOD :1;
- u_int32_t disableOnlinePFKChange :1;
- u_int32_t supportPerfTuning :1;
- u_int32_t supportSSDPatrolRead :1;
- u_int32_t realTimeScheduler :1;
-
- u_int32_t supportResetNow :1;
- u_int32_t supportEmulatedDrives :1;
- u_int32_t headlessMode :1;
- u_int32_t dedicatedHotSparesLimited :1;
-
-
- u_int32_t supportUnevenSpans :1;
- u_int32_t reserved :11;
- } adapterOperations2;
-
- u_int8_t driverVersion[32]; //0x7A8
- u_int8_t maxDAPdCountSpinup60; //0x7C8
- u_int8_t temperatureROC; //0x7C9
- u_int8_t temperatureCtrl; //0x7CA
- u_int8_t reserved4; //0x7CB
- u_int16_t maxConfigurablePds; //0x7CC
-
-
- u_int8_t reserved5[2]; //0x7CD reserved for future use
+ * Other physical/controller/operation information. Indicates the
+ * presence of the hardware
+ */
+ struct {
+ u_int32_t bbu:1;
+ u_int32_t alarm:1;
+ u_int32_t nvram:1;
+ u_int32_t uart:1;
+ u_int32_t reserved:28;
+ } __packed hw_present;
+
+ u_int32_t current_fw_time;
/*
- * HA cluster information
- */
+ * Maximum data transfer sizes
+ */
+ u_int16_t max_concurrent_cmds;
+ u_int16_t max_sge_count;
+ u_int32_t max_request_size;
+
+ /*
+ * Logical and physical device counts
+ */
+ u_int16_t ld_present_count;
+ u_int16_t ld_degraded_count;
+ u_int16_t ld_offline_count;
+
+ u_int16_t pd_present_count;
+ u_int16_t pd_disk_present_count;
+ u_int16_t pd_disk_pred_failure_count;
+ u_int16_t pd_disk_failed_count;
+
+ /*
+ * Memory size information
+ */
+ u_int16_t nvram_size;
+ u_int16_t memory_size;
+ u_int16_t flash_size;
+
+ /*
+ * Error counters
+ */
+ u_int16_t mem_correctable_error_count;
+ u_int16_t mem_uncorrectable_error_count;
+
+ /*
+ * Cluster information
+ */
+ u_int8_t cluster_permitted;
+ u_int8_t cluster_active;
+
+ /*
+ * Additional max data transfer sizes
+ */
+ u_int16_t max_strips_per_io;
+
+ /*
+ * Controller capabilities structures
+ */
struct {
- u_int32_t peerIsPresent :1;
- u_int32_t peerIsIncompatible :1;
-
- u_int32_t hwIncompatible :1;
- u_int32_t fwVersionMismatch :1;
- u_int32_t ctrlPropIncompatible :1;
- u_int32_t premiumFeatureMismatch :1;
- u_int32_t reserved :26;
- } cluster;
-
- char clusterId[16]; //0x7D4
-
- u_int8_t pad[0x800-0x7E4]; //0x7E4
-} __packed;
-
-/*
- * Ld and PD Max Support Defines
- */
-#define MRSAS_MAX_PD 256
-#define MRSAS_MAX_LD 64
+ u_int32_t raid_level_0:1;
+ u_int32_t raid_level_1:1;
+ u_int32_t raid_level_5:1;
+ u_int32_t raid_level_1E:1;
+ u_int32_t raid_level_6:1;
+ u_int32_t reserved:27;
+ } __packed raid_levels;
+
+ struct {
+ u_int32_t rbld_rate:1;
+ u_int32_t cc_rate:1;
+ u_int32_t bgi_rate:1;
+ u_int32_t recon_rate:1;
+ u_int32_t patrol_rate:1;
+ u_int32_t alarm_control:1;
+ u_int32_t cluster_supported:1;
+ u_int32_t bbu:1;
+ u_int32_t spanning_allowed:1;
+ u_int32_t dedicated_hotspares:1;
+ u_int32_t revertible_hotspares:1;
+ u_int32_t foreign_config_import:1;
+ u_int32_t self_diagnostic:1;
+ u_int32_t mixed_redundancy_arr:1;
+ u_int32_t global_hot_spares:1;
+ u_int32_t reserved:17;
+ } __packed adapter_operations;
+
+ struct {
+ u_int32_t read_policy:1;
+ u_int32_t write_policy:1;
+ u_int32_t io_policy:1;
+ u_int32_t access_policy:1;
+ u_int32_t disk_cache_policy:1;
+ u_int32_t reserved:27;
+ } __packed ld_operations;
+
+ struct {
+ u_int8_t min;
+ u_int8_t max;
+ u_int8_t reserved[2];
+ } __packed stripe_sz_ops;
+
+ struct {
+ u_int32_t force_online:1;
+ u_int32_t force_offline:1;
+ u_int32_t force_rebuild:1;
+ u_int32_t reserved:29;
+ } __packed pd_operations;
+
+ struct {
+ u_int32_t ctrl_supports_sas:1;
+ u_int32_t ctrl_supports_sata:1;
+ u_int32_t allow_mix_in_encl:1;
+ u_int32_t allow_mix_in_ld:1;
+ u_int32_t allow_sata_in_cluster:1;
+ u_int32_t reserved:27;
+ } __packed pd_mix_support;
+
+ /*
+ * Define ECC single-bit-error bucket information
+ */
+ u_int8_t ecc_bucket_count;
+ u_int8_t reserved_2[11];
+
+ /*
+ * Include the controller properties (changeable items)
+ */
+ struct mrsas_ctrl_prop properties;
+
+ /*
+ * Define FW pkg version (set in envt v'bles on OEM basis)
+ */
+ char package_version[0x60];
+
+ u_int64_t deviceInterfacePortAddr2[8];
+ u_int8_t reserved3[128];
+
+ struct {
+ u_int16_t minPdRaidLevel_0:4;
+ u_int16_t maxPdRaidLevel_0:12;
+
+ u_int16_t minPdRaidLevel_1:4;
+ u_int16_t maxPdRaidLevel_1:12;
+
+ u_int16_t minPdRaidLevel_5:4;
+ u_int16_t maxPdRaidLevel_5:12;
+
+ u_int16_t minPdRaidLevel_1E:4;
+ u_int16_t maxPdRaidLevel_1E:12;
+
+ u_int16_t minPdRaidLevel_6:4;
+ u_int16_t maxPdRaidLevel_6:12;
+
+ u_int16_t minPdRaidLevel_10:4;
+ u_int16_t maxPdRaidLevel_10:12;
+
+ u_int16_t minPdRaidLevel_50:4;
+ u_int16_t maxPdRaidLevel_50:12;
+
+ u_int16_t minPdRaidLevel_60:4;
+ u_int16_t maxPdRaidLevel_60:12;
+
+ u_int16_t minPdRaidLevel_1E_RLQ0:4;
+ u_int16_t maxPdRaidLevel_1E_RLQ0:12;
+
+ u_int16_t minPdRaidLevel_1E0_RLQ0:4;
+ u_int16_t maxPdRaidLevel_1E0_RLQ0:12;
+
+ u_int16_t reserved[6];
+ } pdsForRaidLevels;
+
+ u_int16_t maxPds; /* 0x780 */
+ u_int16_t maxDedHSPs; /* 0x782 */
+ u_int16_t maxGlobalHSPs; /* 0x784 */
+ u_int16_t ddfSize; /* 0x786 */
+ u_int8_t maxLdsPerArray; /* 0x788 */
+ u_int8_t partitionsInDDF; /* 0x789 */
+ u_int8_t lockKeyBinding; /* 0x78a */
+ u_int8_t maxPITsPerLd; /* 0x78b */
+ u_int8_t maxViewsPerLd; /* 0x78c */
+ u_int8_t maxTargetId; /* 0x78d */
+ u_int16_t maxBvlVdSize; /* 0x78e */
+
+ u_int16_t maxConfigurableSSCSize; /* 0x790 */
+ u_int16_t currentSSCsize; /* 0x792 */
+
+ char expanderFwVersion[12]; /* 0x794 */
+
+ u_int16_t PFKTrialTimeRemaining;/* 0x7A0 */
+
+ u_int16_t cacheMemorySize; /* 0x7A2 */
+
+ struct { /* 0x7A4 */
+ u_int32_t supportPIcontroller:1;
+ u_int32_t supportLdPIType1:1;
+ u_int32_t supportLdPIType2:1;
+ u_int32_t supportLdPIType3:1;
+ u_int32_t supportLdBBMInfo:1;
+ u_int32_t supportShieldState:1;
+ u_int32_t blockSSDWriteCacheChange:1;
+ u_int32_t supportSuspendResumeBGops:1;
+ u_int32_t supportEmergencySpares:1;
+ u_int32_t supportSetLinkSpeed:1;
+ u_int32_t supportBootTimePFKChange:1;
+ u_int32_t supportJBOD:1;
+ u_int32_t disableOnlinePFKChange:1;
+ u_int32_t supportPerfTuning:1;
+ u_int32_t supportSSDPatrolRead:1;
+ u_int32_t realTimeScheduler:1;
+
+ u_int32_t supportResetNow:1;
+ u_int32_t supportEmulatedDrives:1;
+ u_int32_t headlessMode:1;
+ u_int32_t dedicatedHotSparesLimited:1;
+
+
+ u_int32_t supportUnevenSpans:1;
+ u_int32_t reserved:11;
+ } adapterOperations2;
+
+ u_int8_t driverVersion[32]; /* 0x7A8 */
+ u_int8_t maxDAPdCountSpinup60; /* 0x7C8 */
+ u_int8_t temperatureROC; /* 0x7C9 */
+ u_int8_t temperatureCtrl; /* 0x7CA */
+ u_int8_t reserved4; /* 0x7CB */
+ u_int16_t maxConfigurablePds; /* 0x7CC */
+
+
+ u_int8_t reserved5[2]; /* 0x7CD reserved */
+
+ struct {
+ u_int32_t peerIsPresent:1;
+ u_int32_t peerIsIncompatible:1;
+
+ u_int32_t hwIncompatible:1;
+ u_int32_t fwVersionMismatch:1;
+ u_int32_t ctrlPropIncompatible:1;
+ u_int32_t premiumFeatureMismatch:1;
+ u_int32_t reserved:26;
+ } cluster;
+
+ char clusterId[16]; /* 0x7D4 */
+
+ char reserved6[4]; /* 0x7E4 RESERVED FOR IOV */
+
+ struct { /* 0x7E8 */
+ u_int32_t resrved:5;
+ u_int32_t supportMaxExtLDs:1;
+ u_int32_t reserved1:26;
+ } adapterOperations3;
+
+ u_int8_t pad[0x800 - 0x7EC]; /* 0x7EC */
+} __packed;
/*
* When SCSI mid-layer calls driver's reset routine, driver waits for
@@ -1846,304 +1911,334 @@ struct mrsas_ctrl_info {
* it is waiting for the commands to complete, it prints a diagnostic message
* every MRSAS_RESET_NOTICE_INTERVAL seconds
*/
-#define MRSAS_RESET_WAIT_TIME 180
-#define MRSAS_INTERNAL_CMD_WAIT_TIME 180
-#define MRSAS_IOC_INIT_WAIT_TIME 60
-#define MRSAS_RESET_NOTICE_INTERVAL 5
-#define MRSAS_IOCTL_CMD 0
-#define MRSAS_DEFAULT_CMD_TIMEOUT 90
-#define MRSAS_THROTTLE_QUEUE_DEPTH 16
-
-/*
+#define MRSAS_RESET_WAIT_TIME 180
+#define MRSAS_INTERNAL_CMD_WAIT_TIME 180
+#define MRSAS_IOC_INIT_WAIT_TIME 60
+#define MRSAS_RESET_NOTICE_INTERVAL 5
+#define MRSAS_IOCTL_CMD 0
+#define MRSAS_DEFAULT_CMD_TIMEOUT 90
+#define MRSAS_THROTTLE_QUEUE_DEPTH 16
+
+/*
+ * MSI-x regsiters offset defines
+ */
+#define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C)
+#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
+#define MR_MAX_REPLY_QUEUES_OFFSET (0x0000001F)
+#define MR_MAX_REPLY_QUEUES_EXT_OFFSET (0x003FC000)
+#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
+#define MR_MAX_MSIX_REG_ARRAY 16
+
+/*
* FW reports the maximum of number of commands that it can accept (maximum
* commands that can be outstanding) at any time. The driver must report a
* lower number to the mid layer because it can issue a few internal commands
* itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs
* is shown below
*/
-#define MRSAS_INT_CMDS 32
-#define MRSAS_SKINNY_INT_CMDS 5
-#define MRSAS_MAX_MSIX_QUEUES 16
+#define MRSAS_INT_CMDS 32
+#define MRSAS_SKINNY_INT_CMDS 5
+#define MRSAS_MAX_MSIX_QUEUES 128
/*
- * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
- * SGLs based on the size of bus_addr_t
+ * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit SGLs
+ * based on the size of bus_addr_t
*/
-#define IS_DMA64 (sizeof(bus_addr_t) == 8)
-
-#define MFI_XSCALE_OMR0_CHANGE_INTERRUPT 0x00000001 // MFI state change interrupt
-#define MFI_INTR_FLAG_REPLY_MESSAGE 0x00000001
-#define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE 0x00000002
-#define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT 0x00000004 //MFI state change interrupt
-
-#define MFI_OB_INTR_STATUS_MASK 0x00000002
-#define MFI_POLL_TIMEOUT_SECS 60
-
-#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
-#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
-#define MFI_GEN2_ENABLE_INTERRUPT_MASK 0x00000001
-#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000
-#define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001)
-#define MFI_1068_PCSR_OFFSET 0x84
-#define MFI_1068_FW_HANDSHAKE_OFFSET 0x64
-#define MFI_1068_FW_READY 0xDDDD0000
+#define IS_DMA64 (sizeof(bus_addr_t) == 8)
+
+#define MFI_XSCALE_OMR0_CHANGE_INTERRUPT 0x00000001
+#define MFI_INTR_FLAG_REPLY_MESSAGE 0x00000001
+#define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE 0x00000002
+#define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT 0x00000004
+
+#define MFI_OB_INTR_STATUS_MASK 0x00000002
+#define MFI_POLL_TIMEOUT_SECS 60
+
+#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
+#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
+#define MFI_GEN2_ENABLE_INTERRUPT_MASK 0x00000001
+#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000
+#define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001)
+#define MFI_1068_PCSR_OFFSET 0x84
+#define MFI_1068_FW_HANDSHAKE_OFFSET 0x64
+#define MFI_1068_FW_READY 0xDDDD0000
+
+typedef union _MFI_CAPABILITIES {
+ struct {
+ u_int32_t support_fp_remote_lun:1;
+ u_int32_t support_additional_msix:1;
+ u_int32_t support_fastpath_wb:1;
+ u_int32_t support_max_255lds:1;
+ u_int32_t reserved:28;
+ } mfi_capabilities;
+ u_int32_t reg;
+} MFI_CAPABILITIES;
#pragma pack(1)
struct mrsas_sge32 {
- u_int32_t phys_addr;
- u_int32_t length;
+ u_int32_t phys_addr;
+ u_int32_t length;
};
+
#pragma pack()
#pragma pack(1)
struct mrsas_sge64 {
- u_int64_t phys_addr;
- u_int32_t length;
+ u_int64_t phys_addr;
+ u_int32_t length;
};
+
#pragma pack()
#pragma pack()
union mrsas_sgl {
- struct mrsas_sge32 sge32[1];
- struct mrsas_sge64 sge64[1];
+ struct mrsas_sge32 sge32[1];
+ struct mrsas_sge64 sge64[1];
};
+
#pragma pack()
#pragma pack(1)
struct mrsas_header {
- u_int8_t cmd; /*00e */
- u_int8_t sense_len; /*01h */
- u_int8_t cmd_status; /*02h */
- u_int8_t scsi_status; /*03h */
-
- u_int8_t target_id; /*04h */
- u_int8_t lun; /*05h */
- u_int8_t cdb_len; /*06h */
- u_int8_t sge_count; /*07h */
-
- u_int32_t context; /*08h */
- u_int32_t pad_0; /*0Ch */
-
- u_int16_t flags; /*10h */
- u_int16_t timeout; /*12h */
- u_int32_t data_xferlen; /*14h */
+ u_int8_t cmd; /* 00e */
+ u_int8_t sense_len; /* 01h */
+ u_int8_t cmd_status; /* 02h */
+ u_int8_t scsi_status; /* 03h */
+
+ u_int8_t target_id; /* 04h */
+ u_int8_t lun; /* 05h */
+ u_int8_t cdb_len; /* 06h */
+ u_int8_t sge_count; /* 07h */
+
+ u_int32_t context; /* 08h */
+ u_int32_t pad_0; /* 0Ch */
+
+ u_int16_t flags; /* 10h */
+ u_int16_t timeout; /* 12h */
+ u_int32_t data_xferlen; /* 14h */
};
+
#pragma pack()
#pragma pack(1)
struct mrsas_init_frame {
- u_int8_t cmd; /*00h */
- u_int8_t reserved_0; /*01h */
- u_int8_t cmd_status; /*02h */
-
- u_int8_t reserved_1; /*03h */
- u_int32_t reserved_2; /*04h */
-
- u_int32_t context; /*08h */
- u_int32_t pad_0; /*0Ch */
-
- u_int16_t flags; /*10h */
- u_int16_t reserved_3; /*12h */
- u_int32_t data_xfer_len; /*14h */
-
- u_int32_t queue_info_new_phys_addr_lo; /*18h */
- u_int32_t queue_info_new_phys_addr_hi; /*1Ch */
- u_int32_t queue_info_old_phys_addr_lo; /*20h */
- u_int32_t queue_info_old_phys_addr_hi; /*24h */
- u_int32_t driver_ver_lo; /*28h */
- u_int32_t driver_ver_hi; /*2Ch */
- u_int32_t reserved_4[4]; /*30h */
+ u_int8_t cmd; /* 00h */
+ u_int8_t reserved_0; /* 01h */
+ u_int8_t cmd_status; /* 02h */
+
+ u_int8_t reserved_1; /* 03h */
+ MFI_CAPABILITIES driver_operations; /* 04h */
+ u_int32_t context; /* 08h */
+ u_int32_t pad_0; /* 0Ch */
+
+ u_int16_t flags; /* 10h */
+ u_int16_t reserved_3; /* 12h */
+ u_int32_t data_xfer_len; /* 14h */
+
+ u_int32_t queue_info_new_phys_addr_lo; /* 18h */
+ u_int32_t queue_info_new_phys_addr_hi; /* 1Ch */
+ u_int32_t queue_info_old_phys_addr_lo; /* 20h */
+ u_int32_t queue_info_old_phys_addr_hi; /* 24h */
+ u_int32_t driver_ver_lo; /* 28h */
+ u_int32_t driver_ver_hi; /* 2Ch */
+ u_int32_t reserved_4[4]; /* 30h */
};
+
#pragma pack()
#pragma pack(1)
struct mrsas_io_frame {
- u_int8_t cmd; /*00h */
- u_int8_t sense_len; /*01h */
- u_int8_t cmd_status; /*02h */
- u_int8_t scsi_status; /*03h */
+ u_int8_t cmd; /* 00h */
+ u_int8_t sense_len; /* 01h */
+ u_int8_t cmd_status; /* 02h */
+ u_int8_t scsi_status; /* 03h */
- u_int8_t target_id; /*04h */
- u_int8_t access_byte; /*05h */
- u_int8_t reserved_0; /*06h */
- u_int8_t sge_count; /*07h */
+ u_int8_t target_id; /* 04h */
+ u_int8_t access_byte; /* 05h */
+ u_int8_t reserved_0; /* 06h */
+ u_int8_t sge_count; /* 07h */
- u_int32_t context; /*08h */
- u_int32_t pad_0; /*0Ch */
+ u_int32_t context; /* 08h */
+ u_int32_t pad_0; /* 0Ch */
- u_int16_t flags; /*10h */
- u_int16_t timeout; /*12h */
- u_int32_t lba_count; /*14h */
+ u_int16_t flags; /* 10h */
+ u_int16_t timeout; /* 12h */
+ u_int32_t lba_count; /* 14h */
- u_int32_t sense_buf_phys_addr_lo; /*18h */
- u_int32_t sense_buf_phys_addr_hi; /*1Ch */
+ u_int32_t sense_buf_phys_addr_lo; /* 18h */
+ u_int32_t sense_buf_phys_addr_hi; /* 1Ch */
- u_int32_t start_lba_lo; /*20h */
- u_int32_t start_lba_hi; /*24h */
+ u_int32_t start_lba_lo; /* 20h */
+ u_int32_t start_lba_hi; /* 24h */
- union mrsas_sgl sgl; /*28h */
+ union mrsas_sgl sgl; /* 28h */
};
+
#pragma pack()
#pragma pack(1)
struct mrsas_pthru_frame {
- u_int8_t cmd; /*00h */
- u_int8_t sense_len; /*01h */
- u_int8_t cmd_status; /*02h */
- u_int8_t scsi_status; /*03h */
+ u_int8_t cmd; /* 00h */
+ u_int8_t sense_len; /* 01h */
+ u_int8_t cmd_status; /* 02h */
+ u_int8_t scsi_status; /* 03h */
- u_int8_t target_id; /*04h */
- u_int8_t lun; /*05h */
- u_int8_t cdb_len; /*06h */
- u_int8_t sge_count; /*07h */
+ u_int8_t target_id; /* 04h */
+ u_int8_t lun; /* 05h */
+ u_int8_t cdb_len; /* 06h */
+ u_int8_t sge_count; /* 07h */
- u_int32_t context; /*08h */
- u_int32_t pad_0; /*0Ch */
+ u_int32_t context; /* 08h */
+ u_int32_t pad_0; /* 0Ch */
- u_int16_t flags; /*10h */
- u_int16_t timeout; /*12h */
- u_int32_t data_xfer_len; /*14h */
+ u_int16_t flags; /* 10h */
+ u_int16_t timeout; /* 12h */
+ u_int32_t data_xfer_len; /* 14h */
- u_int32_t sense_buf_phys_addr_lo; /*18h */
- u_int32_t sense_buf_phys_addr_hi; /*1Ch */
+ u_int32_t sense_buf_phys_addr_lo; /* 18h */
+ u_int32_t sense_buf_phys_addr_hi; /* 1Ch */
- u_int8_t cdb[16]; /*20h */
- union mrsas_sgl sgl; /*30h */
+ u_int8_t cdb[16]; /* 20h */
+ union mrsas_sgl sgl; /* 30h */
};
+
#pragma pack()
#pragma pack(1)
struct mrsas_dcmd_frame {
- u_int8_t cmd; /*00h */
- u_int8_t reserved_0; /*01h */
- u_int8_t cmd_status; /*02h */
- u_int8_t reserved_1[4]; /*03h */
- u_int8_t sge_count; /*07h */
+ u_int8_t cmd; /* 00h */
+ u_int8_t reserved_0; /* 01h */
+ u_int8_t cmd_status; /* 02h */
+ u_int8_t reserved_1[4]; /* 03h */
+ u_int8_t sge_count; /* 07h */
- u_int32_t context; /*08h */
- u_int32_t pad_0; /*0Ch */
+ u_int32_t context; /* 08h */
+ u_int32_t pad_0; /* 0Ch */
- u_int16_t flags; /*10h */
- u_int16_t timeout; /*12h */
+ u_int16_t flags; /* 10h */
+ u_int16_t timeout; /* 12h */
- u_int32_t data_xfer_len; /*14h */
- u_int32_t opcode; /*18h */
+ u_int32_t data_xfer_len; /* 14h */
+ u_int32_t opcode; /* 18h */
- union { /*1Ch */
- u_int8_t b[12];
- u_int16_t s[6];
- u_int32_t w[3];
- } mbox;
+ union { /* 1Ch */
+ u_int8_t b[12];
+ u_int16_t s[6];
+ u_int32_t w[3];
+ } mbox;
- union mrsas_sgl sgl; /*28h */
+ union mrsas_sgl sgl; /* 28h */
};
+
#pragma pack()
#pragma pack(1)
struct mrsas_abort_frame {
- u_int8_t cmd; /*00h */
- u_int8_t reserved_0; /*01h */
- u_int8_t cmd_status; /*02h */
-
- u_int8_t reserved_1; /*03h */
- u_int32_t reserved_2; /*04h */
+ u_int8_t cmd; /* 00h */
+ u_int8_t reserved_0; /* 01h */
+ u_int8_t cmd_status; /* 02h */
- u_int32_t context; /*08h */
- u_int32_t pad_0; /*0Ch */
+ u_int8_t reserved_1; /* 03h */
+ MFI_CAPABILITIES driver_operations; /* 04h */
+ u_int32_t context; /* 08h */
+ u_int32_t pad_0; /* 0Ch */
- u_int16_t flags; /*10h */
- u_int16_t reserved_3; /*12h */
- u_int32_t reserved_4; /*14h */
+ u_int16_t flags; /* 10h */
+ u_int16_t reserved_3; /* 12h */
+ u_int32_t reserved_4; /* 14h */
- u_int32_t abort_context; /*18h */
- u_int32_t pad_1; /*1Ch */
+ u_int32_t abort_context; /* 18h */
+ u_int32_t pad_1; /* 1Ch */
- u_int32_t abort_mfi_phys_addr_lo; /*20h */
- u_int32_t abort_mfi_phys_addr_hi; /*24h */
+ u_int32_t abort_mfi_phys_addr_lo; /* 20h */
+ u_int32_t abort_mfi_phys_addr_hi; /* 24h */
- u_int32_t reserved_5[6]; /*28h */
+ u_int32_t reserved_5[6]; /* 28h */
};
+
#pragma pack()
#pragma pack(1)
struct mrsas_smp_frame {
- u_int8_t cmd; /*00h */
- u_int8_t reserved_1; /*01h */
- u_int8_t cmd_status; /*02h */
- u_int8_t connection_status; /*03h */
+ u_int8_t cmd; /* 00h */
+ u_int8_t reserved_1; /* 01h */
+ u_int8_t cmd_status; /* 02h */
+ u_int8_t connection_status; /* 03h */
- u_int8_t reserved_2[3]; /*04h */
- u_int8_t sge_count; /*07h */
+ u_int8_t reserved_2[3]; /* 04h */
+ u_int8_t sge_count; /* 07h */
- u_int32_t context; /*08h */
- u_int32_t pad_0; /*0Ch */
+ u_int32_t context; /* 08h */
+ u_int32_t pad_0; /* 0Ch */
- u_int16_t flags; /*10h */
- u_int16_t timeout; /*12h */
+ u_int16_t flags; /* 10h */
+ u_int16_t timeout; /* 12h */
- u_int32_t data_xfer_len; /*14h */
- u_int64_t sas_addr; /*18h */
+ u_int32_t data_xfer_len; /* 14h */
+ u_int64_t sas_addr; /* 18h */
- union {
- struct mrsas_sge32 sge32[2]; /* [0]: resp [1]: req */
- struct mrsas_sge64 sge64[2]; /* [0]: resp [1]: req */
- } sgl;
+ union {
+ struct mrsas_sge32 sge32[2]; /* [0]: resp [1]: req */
+ struct mrsas_sge64 sge64[2]; /* [0]: resp [1]: req */
+ } sgl;
};
+
#pragma pack()
#pragma pack(1)
struct mrsas_stp_frame {
- u_int8_t cmd; /*00h */
- u_int8_t reserved_1; /*01h */
- u_int8_t cmd_status; /*02h */
- u_int8_t reserved_2; /*03h */
+ u_int8_t cmd; /* 00h */
+ u_int8_t reserved_1; /* 01h */
+ u_int8_t cmd_status; /* 02h */
+ u_int8_t reserved_2; /* 03h */
- u_int8_t target_id; /*04h */
- u_int8_t reserved_3[2]; /*05h */
- u_int8_t sge_count; /*07h */
+ u_int8_t target_id; /* 04h */
+ u_int8_t reserved_3[2]; /* 05h */
+ u_int8_t sge_count; /* 07h */
- u_int32_t context; /*08h */
- u_int32_t pad_0; /*0Ch */
+ u_int32_t context; /* 08h */
+ u_int32_t pad_0; /* 0Ch */
- u_int16_t flags; /*10h */
- u_int16_t timeout; /*12h */
+ u_int16_t flags; /* 10h */
+ u_int16_t timeout; /* 12h */
- u_int32_t data_xfer_len; /*14h */
+ u_int32_t data_xfer_len; /* 14h */
- u_int16_t fis[10]; /*18h */
- u_int32_t stp_flags;
+ u_int16_t fis[10]; /* 18h */
+ u_int32_t stp_flags;
- union {
- struct mrsas_sge32 sge32[2]; /* [0]: resp [1]: data */
- struct mrsas_sge64 sge64[2]; /* [0]: resp [1]: data */
- } sgl;
+ union {
+ struct mrsas_sge32 sge32[2]; /* [0]: resp [1]: data */
+ struct mrsas_sge64 sge64[2]; /* [0]: resp [1]: data */
+ } sgl;
};
+
#pragma pack()
union mrsas_frame {
- struct mrsas_header hdr;
- struct mrsas_init_frame init;
- struct mrsas_io_frame io;
- struct mrsas_pthru_frame pthru;
- struct mrsas_dcmd_frame dcmd;
- struct mrsas_abort_frame abort;
- struct mrsas_smp_frame smp;
- struct mrsas_stp_frame stp;
- u_int8_t raw_bytes[64];
+ struct mrsas_header hdr;
+ struct mrsas_init_frame init;
+ struct mrsas_io_frame io;
+ struct mrsas_pthru_frame pthru;
+ struct mrsas_dcmd_frame dcmd;
+ struct mrsas_abort_frame abort;
+ struct mrsas_smp_frame smp;
+ struct mrsas_stp_frame stp;
+ u_int8_t raw_bytes[64];
};
#pragma pack(1)
union mrsas_evt_class_locale {
- struct {
- u_int16_t locale;
- u_int8_t reserved;
- int8_t class;
- } __packed members;
-
- u_int32_t word;
-
+ struct {
+ u_int16_t locale;
+ u_int8_t reserved;
+ int8_t class;
+ } __packed members;
+
+ u_int32_t word;
+
} __packed;
#pragma pack()
@@ -2151,12 +2246,12 @@ union mrsas_evt_class_locale {
#pragma pack(1)
struct mrsas_evt_log_info {
- u_int32_t newest_seq_num;
- u_int32_t oldest_seq_num;
- u_int32_t clear_seq_num;
- u_int32_t shutdown_seq_num;
- u_int32_t boot_seq_num;
-
+ u_int32_t newest_seq_num;
+ u_int32_t oldest_seq_num;
+ u_int32_t clear_seq_num;
+ u_int32_t shutdown_seq_num;
+ u_int32_t boot_seq_num;
+
} __packed;
#pragma pack()
@@ -2280,7 +2375,7 @@ struct mrsas_evt_detail {
} __packed pci;
u_int32_t rate;
- char str[96];
+ char str[96];
struct {
u_int32_t rtc;
@@ -2290,175 +2385,213 @@ struct mrsas_evt_detail {
struct {
u_int32_t ecar;
u_int32_t elog;
- char str[64];
+ char str[64];
} __packed ecc;
u_int8_t b[96];
u_int16_t s[48];
u_int32_t w[24];
u_int64_t d[12];
- } args;
+ } args;
- char description[128];
+ char description[128];
} __packed;
+struct mrsas_irq_context {
+ struct mrsas_softc *sc;
+ uint32_t MSIxIndex;
+};
+
+/* Controller management info added to support Linux Emulator */
+#define MAX_MGMT_ADAPTERS 1024
+
+struct mrsas_mgmt_info {
+ u_int16_t count;
+ struct mrsas_softc *sc_ptr[MAX_MGMT_ADAPTERS];
+ int max_index;
+};
/*******************************************************************
* per-instance data
********************************************************************/
struct mrsas_softc {
- device_t mrsas_dev; // bus device
- struct cdev *mrsas_cdev; // controller device
- uint16_t device_id; // pci device
- struct resource *reg_res; // register interface window
- int reg_res_id; // register resource id
- bus_space_tag_t bus_tag; // bus space tag
- bus_space_handle_t bus_handle; // bus space handle
- bus_dma_tag_t mrsas_parent_tag; // bus dma parent tag
- bus_dma_tag_t verbuf_tag; // verbuf tag
- bus_dmamap_t verbuf_dmamap; // verbuf dmamap
- void *verbuf_mem; // verbuf mem
- bus_addr_t verbuf_phys_addr; // verbuf physical addr
- bus_dma_tag_t sense_tag; // bus dma verbuf tag
- bus_dmamap_t sense_dmamap; // bus dma verbuf dmamap
- void *sense_mem; // pointer to sense buf
- bus_addr_t sense_phys_addr; // bus dma verbuf mem
- bus_dma_tag_t io_request_tag; // bus dma io request tag
- bus_dmamap_t io_request_dmamap; // bus dma io request dmamap
- void *io_request_mem; // bus dma io request mem
- bus_addr_t io_request_phys_addr; // io request physical address
- bus_dma_tag_t chain_frame_tag; // bus dma chain frame tag
- bus_dmamap_t chain_frame_dmamap; // bus dma chain frame dmamap
- void *chain_frame_mem; // bus dma chain frame mem
- bus_addr_t chain_frame_phys_addr; // chain frame phys address
- bus_dma_tag_t reply_desc_tag; // bus dma io request tag
- bus_dmamap_t reply_desc_dmamap; // bus dma io request dmamap
- void *reply_desc_mem; // bus dma io request mem
- bus_addr_t reply_desc_phys_addr; // bus dma io request mem
- bus_dma_tag_t ioc_init_tag; // bus dma io request tag
- bus_dmamap_t ioc_init_dmamap; // bus dma io request dmamap
- void *ioc_init_mem; // bus dma io request mem
- bus_addr_t ioc_init_phys_mem; // io request physical address
- bus_dma_tag_t data_tag; // bus dma data from OS tag
- struct cam_sim *sim_0; // SIM pointer
- struct cam_sim *sim_1; // SIM pointer
- struct cam_path *path_0; // ldio path pointer to CAM
- struct cam_path *path_1; // syspd path pointer to CAM
- struct mtx sim_lock; // sim lock
- struct mtx pci_lock; // serialize pci access
- struct mtx io_lock; // IO lock
- struct mtx ioctl_lock; // IOCTL lock
- struct mtx mpt_cmd_pool_lock; // lock for cmd pool linked list
- struct mtx mfi_cmd_pool_lock; // lock for cmd pool linked list
- struct mtx raidmap_lock; // lock for raid map access/update
- struct mtx aen_lock; // aen lock
- uint32_t max_fw_cmds; // Max commands from FW
- uint32_t max_num_sge; // Max number of SGEs
- struct resource *mrsas_irq; // interrupt interface window
- void *intr_handle; // handle
- int irq_id; // intr resource id
- struct mrsas_mpt_cmd **mpt_cmd_list;
- struct mrsas_mfi_cmd **mfi_cmd_list;
- TAILQ_HEAD(, mrsas_mpt_cmd) mrsas_mpt_cmd_list_head;
- TAILQ_HEAD(, mrsas_mfi_cmd) mrsas_mfi_cmd_list_head;
- bus_addr_t req_frames_desc_phys;
- u_int8_t *req_frames_desc;
- u_int8_t *req_desc;
- bus_addr_t io_request_frames_phys;
- u_int8_t *io_request_frames;
- bus_addr_t reply_frames_desc_phys;
- u_int16_t last_reply_idx;
- u_int32_t reply_q_depth;
- u_int32_t request_alloc_sz;
- u_int32_t reply_alloc_sz;
- u_int32_t io_frames_alloc_sz;
- u_int32_t chain_frames_alloc_sz;
- u_int16_t max_sge_in_main_msg;
- u_int16_t max_sge_in_chain;
- u_int8_t chain_offset_io_request;
- u_int8_t chain_offset_mfi_pthru;
- u_int32_t map_sz;
- u_int64_t map_id;
- struct mrsas_mfi_cmd *map_update_cmd;
- struct mrsas_mfi_cmd *aen_cmd;
- u_int8_t fast_path_io;
- void* chan;
- void* ocr_chan;
- u_int8_t adprecovery;
- u_int8_t remove_in_progress;
- u_int8_t ocr_thread_active;
- u_int8_t do_timedout_reset;
- u_int32_t reset_in_progress;
- u_int32_t reset_count;
- bus_dma_tag_t raidmap_tag[2]; // bus dma tag for RAID map
- bus_dmamap_t raidmap_dmamap[2]; // bus dma dmamap RAID map
- void *raidmap_mem[2]; // bus dma mem RAID map
- bus_addr_t raidmap_phys_addr[2]; // RAID map physical address
- bus_dma_tag_t mficmd_frame_tag; // tag for mfi frame
- bus_dma_tag_t mficmd_sense_tag; // tag for mfi sense
- bus_dma_tag_t evt_detail_tag; // event detail tag
- bus_dmamap_t evt_detail_dmamap; // event detail dmamap
- struct mrsas_evt_detail *evt_detail_mem; // event detail mem
- bus_addr_t evt_detail_phys_addr; // event detail physical addr
- bus_dma_tag_t ctlr_info_tag; // tag for get ctlr info cmd
- bus_dmamap_t ctlr_info_dmamap; // get ctlr info cmd dmamap
- void *ctlr_info_mem; // get ctlr info cmd virtual addr
- bus_addr_t ctlr_info_phys_addr; //get ctlr info cmd physical addr
- u_int32_t max_sectors_per_req;
- u_int8_t disableOnlineCtrlReset;
- atomic_t fw_outstanding;
- u_int32_t mrsas_debug;
- u_int32_t mrsas_io_timeout;
- u_int32_t mrsas_fw_fault_check_delay;
- u_int32_t io_cmds_highwater;
- u_int8_t UnevenSpanSupport;
- struct sysctl_ctx_list sysctl_ctx;
- struct sysctl_oid *sysctl_tree;
- struct proc *ocr_thread;
- u_int32_t last_seq_num;
- bus_dma_tag_t el_info_tag; // tag for get event log info cmd
- bus_dmamap_t el_info_dmamap; // get event log info cmd dmamap
- void *el_info_mem; // get event log info cmd virtual addr
- bus_addr_t el_info_phys_addr; //get event log info cmd physical addr
- struct mrsas_pd_list pd_list[MRSAS_MAX_PD];
- struct mrsas_pd_list local_pd_list[MRSAS_MAX_PD];
- u_int8_t ld_ids[MRSAS_MAX_LD];
- struct taskqueue *ev_tq; //taskqueue for events
- struct task ev_task;
- u_int32_t CurLdCount;
- u_int64_t reset_flags;
- LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES];
- LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES];
+ device_t mrsas_dev;
+ struct cdev *mrsas_cdev;
+ uint16_t device_id;
+ struct resource *reg_res;
+ int reg_res_id;
+ bus_space_tag_t bus_tag;
+ bus_space_handle_t bus_handle;
+ bus_dma_tag_t mrsas_parent_tag;
+ bus_dma_tag_t verbuf_tag;
+ bus_dmamap_t verbuf_dmamap;
+ void *verbuf_mem;
+ bus_addr_t verbuf_phys_addr;
+ bus_dma_tag_t sense_tag;
+ bus_dmamap_t sense_dmamap;
+ void *sense_mem;
+ bus_addr_t sense_phys_addr;
+ bus_dma_tag_t io_request_tag;
+ bus_dmamap_t io_request_dmamap;
+ void *io_request_mem;
+ bus_addr_t io_request_phys_addr;
+ bus_dma_tag_t chain_frame_tag;
+ bus_dmamap_t chain_frame_dmamap;
+ void *chain_frame_mem;
+ bus_addr_t chain_frame_phys_addr;
+ bus_dma_tag_t reply_desc_tag;
+ bus_dmamap_t reply_desc_dmamap;
+ void *reply_desc_mem;
+ bus_addr_t reply_desc_phys_addr;
+ bus_dma_tag_t ioc_init_tag;
+ bus_dmamap_t ioc_init_dmamap;
+ void *ioc_init_mem;
+ bus_addr_t ioc_init_phys_mem;
+ bus_dma_tag_t data_tag;
+ struct cam_sim *sim_0;
+ struct cam_sim *sim_1;
+ struct cam_path *path_0;
+ struct cam_path *path_1;
+ struct mtx sim_lock;
+ struct mtx pci_lock;
+ struct mtx io_lock;
+ struct mtx ioctl_lock;
+ struct mtx mpt_cmd_pool_lock;
+ struct mtx mfi_cmd_pool_lock;
+ struct mtx raidmap_lock;
+ struct mtx aen_lock;
+ struct selinfo mrsas_select;
+ uint32_t mrsas_aen_triggered;
+ uint32_t mrsas_poll_waiting;
+
+ struct sema ioctl_count_sema;
+ uint32_t max_fw_cmds;
+ uint32_t max_num_sge;
+ struct resource *mrsas_irq[MAX_MSIX_COUNT];
+ void *intr_handle[MAX_MSIX_COUNT];
+ int irq_id[MAX_MSIX_COUNT];
+ struct mrsas_irq_context irq_context[MAX_MSIX_COUNT];
+ int msix_vectors;
+ int msix_enable;
+ uint32_t msix_reg_offset[16];
+ struct mrsas_mpt_cmd **mpt_cmd_list;
+ struct mrsas_mfi_cmd **mfi_cmd_list;
+ TAILQ_HEAD(, mrsas_mpt_cmd) mrsas_mpt_cmd_list_head;
+ TAILQ_HEAD(, mrsas_mfi_cmd) mrsas_mfi_cmd_list_head;
+ bus_addr_t req_frames_desc_phys;
+ u_int8_t *req_frames_desc;
+ u_int8_t *req_desc;
+ bus_addr_t io_request_frames_phys;
+ u_int8_t *io_request_frames;
+ bus_addr_t reply_frames_desc_phys;
+ u_int16_t last_reply_idx[MAX_MSIX_COUNT];
+ u_int32_t reply_q_depth;
+ u_int32_t request_alloc_sz;
+ u_int32_t reply_alloc_sz;
+ u_int32_t io_frames_alloc_sz;
+ u_int32_t chain_frames_alloc_sz;
+ u_int16_t max_sge_in_main_msg;
+ u_int16_t max_sge_in_chain;
+ u_int8_t chain_offset_io_request;
+ u_int8_t chain_offset_mfi_pthru;
+ u_int32_t map_sz;
+ u_int64_t map_id;
+ struct mrsas_mfi_cmd *map_update_cmd;
+ struct mrsas_mfi_cmd *aen_cmd;
+ u_int8_t fast_path_io;
+ void *chan;
+ void *ocr_chan;
+ u_int8_t adprecovery;
+ u_int8_t remove_in_progress;
+ u_int8_t ocr_thread_active;
+ u_int8_t do_timedout_reset;
+ u_int32_t reset_in_progress;
+ u_int32_t reset_count;
+ bus_dma_tag_t raidmap_tag[2];
+ bus_dmamap_t raidmap_dmamap[2];
+ void *raidmap_mem[2];
+ bus_addr_t raidmap_phys_addr[2];
+ bus_dma_tag_t mficmd_frame_tag;
+ bus_dma_tag_t mficmd_sense_tag;
+ bus_dma_tag_t evt_detail_tag;
+ bus_dmamap_t evt_detail_dmamap;
+ struct mrsas_evt_detail *evt_detail_mem;
+ bus_addr_t evt_detail_phys_addr;
+ bus_dma_tag_t ctlr_info_tag;
+ bus_dmamap_t ctlr_info_dmamap;
+ void *ctlr_info_mem;
+ bus_addr_t ctlr_info_phys_addr;
+ u_int32_t max_sectors_per_req;
+ u_int8_t disableOnlineCtrlReset;
+ atomic_t fw_outstanding;
+ u_int32_t mrsas_debug;
+ u_int32_t mrsas_io_timeout;
+ u_int32_t mrsas_fw_fault_check_delay;
+ u_int32_t io_cmds_highwater;
+ u_int8_t UnevenSpanSupport;
+ struct sysctl_ctx_list sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+ struct proc *ocr_thread;
+ u_int32_t last_seq_num;
+ bus_dma_tag_t el_info_tag;
+ bus_dmamap_t el_info_dmamap;
+ void *el_info_mem;
+ bus_addr_t el_info_phys_addr;
+ struct mrsas_pd_list pd_list[MRSAS_MAX_PD];
+ struct mrsas_pd_list local_pd_list[MRSAS_MAX_PD];
+ u_int8_t ld_ids[MRSAS_MAX_LD_IDS];
+ struct taskqueue *ev_tq;
+ struct task ev_task;
+ u_int32_t CurLdCount;
+ u_int64_t reset_flags;
+ LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT];
+ LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
+
+ u_int8_t max256vdSupport;
+ u_int16_t fw_supported_vd_count;
+ u_int16_t fw_supported_pd_count;
+
+ u_int16_t drv_supported_vd_count;
+ u_int16_t drv_supported_pd_count;
+
+ u_int32_t max_map_sz;
+ u_int32_t current_map_sz;
+ u_int32_t old_map_sz;
+ u_int32_t new_map_sz;
+ u_int32_t drv_map_sz;
+
+ /* Non dma-able memory. Driver local copy. */
+ MR_DRV_RAID_MAP_ALL *ld_drv_map[2];
};
/* Compatibility shims for different OS versions */
#if __FreeBSD_version >= 800001
-#define mrsas_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
+#define mrsas_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
-#define mrsas_kproc_exit(arg) kproc_exit(arg)
+#define mrsas_kproc_exit(arg) kproc_exit(arg)
#else
-#define mrsas_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
+#define mrsas_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
-#define mrsas_kproc_exit(arg) kthread_exit(arg)
+#define mrsas_kproc_exit(arg) kthread_exit(arg)
#endif
static __inline void
clear_bit(int b, volatile void *p)
{
- atomic_clear_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f));
+ atomic_clear_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f));
}
static __inline void
set_bit(int b, volatile void *p)
{
- atomic_set_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f));
+ atomic_set_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f));
}
static __inline int
test_bit(int b, volatile void *p)
{
- return ((volatile int *)p)[b >> 5] & (1 << (b & 0x1f));
+ return ((volatile int *)p)[b >> 5] & (1 << (b & 0x1f));
}
-#endif /* MRSAS_H */
+#endif /* MRSAS_H */
diff --git a/sys/dev/mrsas/mrsas_cam.c b/sys/dev/mrsas/mrsas_cam.c
index bde974a..c454f7b 100644
--- a/sys/dev/mrsas/mrsas_cam.c
+++ b/sys/dev/mrsas/mrsas_cam.c
@@ -1,42 +1,38 @@
/*
- * Copyright (c) 2014, LSI Corp.
- * All rights reserved.
- * Author: Marian Choy
+ * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
* Support: freebsdraid@lsi.com
*
* Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * 3. Neither the name of the <ORGANIZATION> nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer. 2. Redistributions
+ * in binary form must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution. 3. Neither the name of the
+ * <ORGANIZATION> nor the names of its contributors may be used to endorse or
+ * promote products derived from this software without specific prior written
+ * permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
-*/
+ */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include "dev/mrsas/mrsas.h"
+#include "dev/mrsas/mrsas.h"
#include <cam/cam.h>
#include <cam/cam_ccb.h>
@@ -49,725 +45,735 @@ __FBSDID("$FreeBSD$");
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_message.h>
#include <sys/taskqueue.h>
+#include <sys/kernel.h>
+
+
+#include <sys/time.h> /* XXX for pcpu.h */
+#include <sys/pcpu.h> /* XXX for PCPU_GET */
+#define smp_processor_id() PCPU_GET(cpuid)
/*
* Function prototypes
*/
-int mrsas_cam_attach(struct mrsas_softc *sc);
-//int mrsas_ldio_inq(union ccb *ccb);
-int mrsas_ldio_inq(struct cam_sim *sim, union ccb *ccb);
-int mrsas_bus_scan(struct mrsas_softc *sc);
-int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
-int mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
-int mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
- union ccb *ccb);
-int mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
- union ccb *ccb, struct cam_sim *sim);
-int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
- union ccb *ccb, u_int32_t device_id,
- MRSAS_RAID_SCSI_IO_REQUEST *io_request);
-void mrsas_xpt_freeze(struct mrsas_softc *sc);
-void mrsas_xpt_release(struct mrsas_softc *sc);
-void mrsas_cam_detach(struct mrsas_softc *sc);
-void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
-void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
-void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
-void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
- u_int32_t req_desc_hi);
-void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request, u_int8_t cdb_len,
- struct IO_REQUEST_INFO *io_info, union ccb *ccb,
- MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
- u_int32_t ld_block_size);
+int mrsas_cam_attach(struct mrsas_softc *sc);
+int mrsas_ldio_inq(struct cam_sim *sim, union ccb *ccb);
+int mrsas_bus_scan(struct mrsas_softc *sc);
+int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
+int mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
+int
+mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb);
+int
+mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, struct cam_sim *sim);
+int
+mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, u_int32_t device_id,
+ MRSAS_RAID_SCSI_IO_REQUEST * io_request);
+void mrsas_xpt_freeze(struct mrsas_softc *sc);
+void mrsas_xpt_release(struct mrsas_softc *sc);
+void mrsas_cam_detach(struct mrsas_softc *sc);
+void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
+void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
+void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
+void
+mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
+ u_int32_t req_desc_hi);
+void
+mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request,
+ u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb,
+ MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag,
+ u_int32_t ld_block_size);
static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim);
-static void mrsas_poll(struct cam_sim *sim);
+static void mrsas_cam_poll(struct cam_sim *sim);
static void mrsas_action(struct cam_sim *sim, union ccb *ccb);
static void mrsas_scsiio_timeout(void *data);
-static void mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
- int nseg, int error);
-static int32_t mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
- union ccb *ccb);
-struct mrsas_mpt_cmd * mrsas_get_mpt_cmd(struct mrsas_softc *sc);
-MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
- u_int16_t index);
-
-extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map);
-extern u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map,
+static void
+mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
+ int nseg, int error);
+static int32_t
+mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
+ union ccb *ccb);
+struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
+MRSAS_REQUEST_DESCRIPTOR_UNION *
+ mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index);
+
+extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
+extern u_int32_t
+MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map,
struct mrsas_softc *sc);
extern void mrsas_isr(void *arg);
extern void mrsas_aen_handler(struct mrsas_softc *sc);
-extern u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc,
- struct IO_REQUEST_INFO *io_info,RAID_CONTEXT *pRAID_Context,
- MR_FW_RAID_MAP_ALL *map);
-extern u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
- MR_FW_RAID_MAP_ALL *map);
-extern u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
- struct IO_REQUEST_INFO *io_info);
-extern u_int8_t megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
- u_int64_t block, u_int32_t count);
-
-
-/**
- * mrsas_cam_attach: Main entry to CAM subsystem
- * input: Adapter instance soft state
+extern u_int8_t
+MR_BuildRaidContext(struct mrsas_softc *sc,
+ struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context,
+ MR_DRV_RAID_MAP_ALL * map);
+extern u_int16_t
+MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
+ MR_DRV_RAID_MAP_ALL * map);
+extern u_int16_t
+mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
+ struct IO_REQUEST_INFO *io_info);
+extern u_int8_t
+megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
+ u_int64_t block, u_int32_t count);
+
+
+/*
+ * mrsas_cam_attach: Main entry to CAM subsystem
+ * input: Adapter instance soft state
*
- * This function is called from mrsas_attach() during initialization
- * to perform SIM allocations and XPT bus registration. If the kernel
- * version is 7.4 or earlier, it would also initiate a bus scan.
+ * This function is called from mrsas_attach() during initialization to perform
+ * SIM allocations and XPT bus registration. If the kernel version is 7.4 or
+ * earlier, it would also initiate a bus scan.
*/
-int mrsas_cam_attach(struct mrsas_softc *sc)
+int
+mrsas_cam_attach(struct mrsas_softc *sc)
{
- struct cam_devq *devq;
- int mrsas_cam_depth;
-
- mrsas_cam_depth = sc->max_fw_cmds - MRSAS_INTERNAL_CMDS;
-
- if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) {
- device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n");
- return(ENOMEM);
- }
-
-
- /*
- * Create SIM for bus 0 and register, also create path
- */
- sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_poll, "mrsas", sc,
- device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
- mrsas_cam_depth, devq);
- if (sc->sim_0 == NULL){
- cam_simq_free(devq);
- device_printf(sc->mrsas_dev, "Cannot register SIM\n");
- return(ENXIO);
- }
- /* Initialize taskqueue for Event Handling */
- TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc);
- sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO,
- taskqueue_thread_enqueue, &sc->ev_tq);
-
- /* Run the task queue with lowest priority */
- taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq",
- device_get_nameunit(sc->mrsas_dev));
- mtx_lock(&sc->sim_lock);
- if (xpt_bus_register(sc->sim_0, sc->mrsas_dev,0) != CAM_SUCCESS)
- {
- cam_sim_free(sc->sim_0, TRUE); // passing true frees the devq
- mtx_unlock(&sc->sim_lock);
- return(ENXIO);
- }
- if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0),
- CAM_TARGET_WILDCARD,
- CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
- xpt_bus_deregister(cam_sim_path(sc->sim_0));
- cam_sim_free(sc->sim_0, TRUE); // passing true will free the devq
- mtx_unlock(&sc->sim_lock);
- return(ENXIO);
- }
- mtx_unlock(&sc->sim_lock);
-
- /*
- * Create SIM for bus 1 and register, also create path
- */
- sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_poll, "mrsas", sc,
- device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
- mrsas_cam_depth, devq);
- if (sc->sim_1 == NULL){
- cam_simq_free(devq);
- device_printf(sc->mrsas_dev, "Cannot register SIM\n");
- return(ENXIO);
- }
-
- mtx_lock(&sc->sim_lock);
- if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS){
- cam_sim_free(sc->sim_1, TRUE); // passing true frees the devq
- mtx_unlock(&sc->sim_lock);
- return(ENXIO);
- }
- if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1),
- CAM_TARGET_WILDCARD,
- CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
- xpt_bus_deregister(cam_sim_path(sc->sim_1));
- cam_sim_free(sc->sim_1, TRUE);
- mtx_unlock(&sc->sim_lock);
- return(ENXIO);
- }
- mtx_unlock(&sc->sim_lock);
+ struct cam_devq *devq;
+ int mrsas_cam_depth;
+
+ mrsas_cam_depth = sc->max_fw_cmds - MRSAS_INTERNAL_CMDS;
+
+ if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) {
+ device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n");
+ return (ENOMEM);
+ }
+ /*
+ * Create SIM for bus 0 and register, also create path
+ */
+ sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc,
+ device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
+ mrsas_cam_depth, devq);
+ if (sc->sim_0 == NULL) {
+ cam_simq_free(devq);
+ device_printf(sc->mrsas_dev, "Cannot register SIM\n");
+ return (ENXIO);
+ }
+ /* Initialize taskqueue for Event Handling */
+ TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc);
+ sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO,
+ taskqueue_thread_enqueue, &sc->ev_tq);
+
+ /* Run the task queue with lowest priority */
+ taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq",
+ device_get_nameunit(sc->mrsas_dev));
+ mtx_lock(&sc->sim_lock);
+ if (xpt_bus_register(sc->sim_0, sc->mrsas_dev, 0) != CAM_SUCCESS) {
+ cam_sim_free(sc->sim_0, TRUE); /* passing true frees the devq */
+ mtx_unlock(&sc->sim_lock);
+ return (ENXIO);
+ }
+ if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0),
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ xpt_bus_deregister(cam_sim_path(sc->sim_0));
+ cam_sim_free(sc->sim_0, TRUE); /* passing true will free the
+ * devq */
+ mtx_unlock(&sc->sim_lock);
+ return (ENXIO);
+ }
+ mtx_unlock(&sc->sim_lock);
+
+ /*
+ * Create SIM for bus 1 and register, also create path
+ */
+ sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc,
+ device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
+ mrsas_cam_depth, devq);
+ if (sc->sim_1 == NULL) {
+ cam_simq_free(devq);
+ device_printf(sc->mrsas_dev, "Cannot register SIM\n");
+ return (ENXIO);
+ }
+ mtx_lock(&sc->sim_lock);
+ if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS) {
+ cam_sim_free(sc->sim_1, TRUE); /* passing true frees the devq */
+ mtx_unlock(&sc->sim_lock);
+ return (ENXIO);
+ }
+ if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1),
+ CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ xpt_bus_deregister(cam_sim_path(sc->sim_1));
+ cam_sim_free(sc->sim_1, TRUE);
+ mtx_unlock(&sc->sim_lock);
+ return (ENXIO);
+ }
+ mtx_unlock(&sc->sim_lock);
#if (__FreeBSD_version <= 704000)
- if (mrsas_bus_scan(sc)){
- device_printf(sc->mrsas_dev, "Error in bus scan.\n");
- return(1);
- }
+ if (mrsas_bus_scan(sc)) {
+ device_printf(sc->mrsas_dev, "Error in bus scan.\n");
+ return (1);
+ }
#endif
- return(0);
+ return (0);
}
-/**
- * mrsas_cam_detach: De-allocates and teardown CAM
- * input: Adapter instance soft state
+/*
+ * mrsas_cam_detach: De-allocates and teardown CAM
+ * input: Adapter instance soft state
*
- * De-registers and frees the paths and SIMs.
+ * De-registers and frees the paths and SIMs.
*/
-void mrsas_cam_detach(struct mrsas_softc *sc)
+void
+mrsas_cam_detach(struct mrsas_softc *sc)
{
if (sc->ev_tq != NULL)
- taskqueue_free(sc->ev_tq);
- mtx_lock(&sc->sim_lock);
- if (sc->path_0)
- xpt_free_path(sc->path_0);
- if (sc->sim_0) {
- xpt_bus_deregister(cam_sim_path(sc->sim_0));
- cam_sim_free(sc->sim_0, FALSE);
- }
- if (sc->path_1)
- xpt_free_path(sc->path_1);
- if (sc->sim_1) {
- xpt_bus_deregister(cam_sim_path(sc->sim_1));
- cam_sim_free(sc->sim_1, TRUE);
- }
- mtx_unlock(&sc->sim_lock);
+ taskqueue_free(sc->ev_tq);
+ mtx_lock(&sc->sim_lock);
+ if (sc->path_0)
+ xpt_free_path(sc->path_0);
+ if (sc->sim_0) {
+ xpt_bus_deregister(cam_sim_path(sc->sim_0));
+ cam_sim_free(sc->sim_0, FALSE);
+ }
+ if (sc->path_1)
+ xpt_free_path(sc->path_1);
+ if (sc->sim_1) {
+ xpt_bus_deregister(cam_sim_path(sc->sim_1));
+ cam_sim_free(sc->sim_1, TRUE);
+ }
+ mtx_unlock(&sc->sim_lock);
}
-/**
- * mrsas_action: SIM callback entry point
- * input: pointer to SIM
- * pointer to CAM Control Block
+/*
+ * mrsas_action: SIM callback entry point
+ * input: pointer to SIM pointer to CAM Control Block
*
- * This function processes CAM subsystem requests. The type of request is
- * stored in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary
- * because ccb->cpi.maxio is not supported for FreeBSD version 7.4 or
- * earlier.
+ * This function processes CAM subsystem requests. The type of request is stored
+ * in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary because
+ * ccb->cpi.maxio is not supported for FreeBSD version 7.4 or earlier.
*/
-static void mrsas_action(struct cam_sim *sim, union ccb *ccb)
+static void
+mrsas_action(struct cam_sim *sim, union ccb *ccb)
{
- struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
- struct ccb_hdr *ccb_h = &(ccb->ccb_h);
- u_int32_t device_id;
-
- switch (ccb->ccb_h.func_code) {
- case XPT_SCSI_IO:
- {
- device_id = ccb_h->target_id;
-
- /*
- * bus 0 is LD, bus 1 is for system-PD
- */
- if (cam_sim_bus(sim) == 1 &&
- sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) {
- ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
- xpt_done(ccb);
- }
- else {
- if (mrsas_startio(sc, sim, ccb)){
- ccb->ccb_h.status |= CAM_REQ_INVALID;
- xpt_done(ccb);
- }
- }
- break;
- }
- case XPT_ABORT:
- {
- ccb->ccb_h.status = CAM_UA_ABORT;
- xpt_done(ccb);
- break;
- }
- case XPT_RESET_BUS:
- {
- xpt_done(ccb);
- break;
- }
- case XPT_GET_TRAN_SETTINGS:
- {
- ccb->cts.protocol = PROTO_SCSI;
- ccb->cts.protocol_version = SCSI_REV_2;
- ccb->cts.transport = XPORT_SPI;
- ccb->cts.transport_version = 2;
- ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
- ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
- ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
- ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
- ccb->ccb_h.status = CAM_REQ_CMP;
- xpt_done(ccb);
- break;
- }
- case XPT_SET_TRAN_SETTINGS:
- {
- ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
- xpt_done(ccb);
- break;
- }
- case XPT_CALC_GEOMETRY:
- {
- cam_calc_geometry(&ccb->ccg, 1);
- xpt_done(ccb);
- break;
- }
- case XPT_PATH_INQ:
- {
- ccb->cpi.version_num = 1;
- ccb->cpi.hba_inquiry = 0;
- ccb->cpi.target_sprt = 0;
- ccb->cpi.hba_misc = 0;
- ccb->cpi.hba_eng_cnt = 0;
- ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS;
- ccb->cpi.unit_number = cam_sim_unit(sim);
- ccb->cpi.bus_id = cam_sim_bus(sim);
- ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID;
- ccb->cpi.base_transfer_speed = 150000;
- strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
- strncpy(ccb->cpi.hba_vid, "LSI", HBA_IDLEN);
- strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
- ccb->cpi.transport = XPORT_SPI;
- ccb->cpi.transport_version = 2;
- ccb->cpi.protocol = PROTO_SCSI;
- ccb->cpi.protocol_version = SCSI_REV_2;
- if (ccb->cpi.bus_id == 0)
- ccb->cpi.max_target = MRSAS_MAX_LD-1;
- else
- ccb->cpi.max_target = MRSAS_MAX_PD-1;
+ struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ u_int32_t device_id;
+
+ switch (ccb->ccb_h.func_code) {
+ case XPT_SCSI_IO:
+ {
+ device_id = ccb_h->target_id;
+
+ /*
+ * bus 0 is LD, bus 1 is for system-PD
+ */
+ if (cam_sim_bus(sim) == 1 &&
+ sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) {
+ ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
+ xpt_done(ccb);
+ } else {
+ if (mrsas_startio(sc, sim, ccb)) {
+ ccb->ccb_h.status |= CAM_REQ_INVALID;
+ xpt_done(ccb);
+ }
+ }
+ break;
+ }
+ case XPT_ABORT:
+ {
+ ccb->ccb_h.status = CAM_UA_ABORT;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_RESET_BUS:
+ {
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_GET_TRAN_SETTINGS:
+ {
+ ccb->cts.protocol = PROTO_SCSI;
+ ccb->cts.protocol_version = SCSI_REV_2;
+ ccb->cts.transport = XPORT_SPI;
+ ccb->cts.transport_version = 2;
+ ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
+ ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
+ ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
+ ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_SET_TRAN_SETTINGS:
+ {
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_CALC_GEOMETRY:
+ {
+ cam_calc_geometry(&ccb->ccg, 1);
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_PATH_INQ:
+ {
+ ccb->cpi.version_num = 1;
+ ccb->cpi.hba_inquiry = 0;
+ ccb->cpi.target_sprt = 0;
+ ccb->cpi.hba_misc = 0;
+ ccb->cpi.hba_eng_cnt = 0;
+ ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS;
+ ccb->cpi.unit_number = cam_sim_unit(sim);
+ ccb->cpi.bus_id = cam_sim_bus(sim);
+ ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID;
+ ccb->cpi.base_transfer_speed = 150000;
+ strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
+ strncpy(ccb->cpi.hba_vid, "LSI", HBA_IDLEN);
+ strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
+ ccb->cpi.transport = XPORT_SPI;
+ ccb->cpi.transport_version = 2;
+ ccb->cpi.protocol = PROTO_SCSI;
+ ccb->cpi.protocol_version = SCSI_REV_2;
+ if (ccb->cpi.bus_id == 0)
+ ccb->cpi.max_target = MRSAS_MAX_PD - 1;
+ else
+ ccb->cpi.max_target = MRSAS_MAX_LD_IDS - 1;
#if (__FreeBSD_version > 704000)
- ccb->cpi.maxio = MRSAS_MAX_IO_SIZE;
+ ccb->cpi.maxio = MRSAS_MAX_IO_SIZE;
#endif
- ccb->ccb_h.status = CAM_REQ_CMP;
- xpt_done(ccb);
- break;
- }
- default:
- {
- ccb->ccb_h.status = CAM_REQ_INVALID;
- xpt_done(ccb);
- break;
- }
- }
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ break;
+ }
+ default:
+ {
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ break;
+ }
+ }
}
-/**
- * mrsas_scsiio_timeout Callback function for IO timed out
- * input: mpt command context
+/*
+ * mrsas_scsiio_timeout: Callback function for IO timed out
+ * input: mpt command context
*
- * This function will execute after timeout value
- * provided by ccb header from CAM layer, if timer expires.
- * Driver will run timer for all DCDM and LDIO comming from CAM layer.
- * This function is callback function for IO timeout and it runs in
- * no-sleep context. Set do_timedout_reset in Adapter context so that
- * it will execute OCR/Kill adpter from ocr_thread context.
+ * This function will execute after timeout value provided by ccb header from
+ * CAM layer, if timer expires. Driver will run timer for all DCDM and LDIO
+ * comming from CAM layer. This function is callback function for IO timeout
+ * and it runs in no-sleep context. Set do_timedout_reset in Adapter context
+ * so that it will execute OCR/Kill adpter from ocr_thread context.
*/
static void
mrsas_scsiio_timeout(void *data)
{
- struct mrsas_mpt_cmd *cmd;
- struct mrsas_softc *sc;
-
- cmd = (struct mrsas_mpt_cmd *)data;
- sc = cmd->sc;
-
- if (cmd->ccb_ptr == NULL) {
- printf("command timeout with NULL ccb\n");
- return;
- }
-
- /* Below callout is dummy entry so that it will be
- * cancelled from mrsas_cmd_done(). Now Controller will
- * go to OCR/Kill Adapter based on OCR enable/disable
- * property of Controller from ocr_thread context.
- */
- callout_reset(&cmd->cm_callout, (600000 * hz) / 1000,
- mrsas_scsiio_timeout, cmd);
- sc->do_timedout_reset = 1;
- if(sc->ocr_thread_active)
- wakeup(&sc->ocr_chan);
+ struct mrsas_mpt_cmd *cmd;
+ struct mrsas_softc *sc;
+
+ cmd = (struct mrsas_mpt_cmd *)data;
+ sc = cmd->sc;
+
+ if (cmd->ccb_ptr == NULL) {
+ printf("command timeout with NULL ccb\n");
+ return;
+ }
+ /*
+ * Below callout is dummy entry so that it will be cancelled from
+ * mrsas_cmd_done(). Now Controller will go to OCR/Kill Adapter based
+ * on OCR enable/disable property of Controller from ocr_thread
+ * context.
+ */
+ callout_reset(&cmd->cm_callout, (600000 * hz) / 1000,
+ mrsas_scsiio_timeout, cmd);
+ sc->do_timedout_reset = 1;
+ if (sc->ocr_thread_active)
+ wakeup(&sc->ocr_chan);
}
-/**
- * mrsas_startio: SCSI IO entry point
- * input: Adapter instance soft state
- * pointer to CAM Control Block
+/*
+ * mrsas_startio: SCSI IO entry point
+ * input: Adapter instance soft state
+ * pointer to CAM Control Block
*
- * This function is the SCSI IO entry point and it initiates IO processing.
- * It copies the IO and depending if the IO is read/write or inquiry, it would
- * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns
- * 0 if the command is sent to firmware successfully, otherwise it returns 1.
+ * This function is the SCSI IO entry point and it initiates IO processing. It
+ * copies the IO and depending if the IO is read/write or inquiry, it would
+ * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns 0
+ * if the command is sent to firmware successfully, otherwise it returns 1.
*/
-static int32_t mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
- union ccb *ccb)
+static int32_t
+mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
+ union ccb *ccb)
{
- struct mrsas_mpt_cmd *cmd;
- struct ccb_hdr *ccb_h = &(ccb->ccb_h);
- struct ccb_scsiio *csio = &(ccb->csio);
- MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
-
- if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE){
- ccb->ccb_h.status = CAM_REQ_CMP;
- xpt_done(ccb);
- return(0);
- }
-
- ccb_h->status |= CAM_SIM_QUEUED;
- cmd = mrsas_get_mpt_cmd(sc);
-
- if (!cmd) {
- ccb_h->status |= CAM_REQUEUE_REQ;
- xpt_done(ccb);
- return(0);
- }
-
- if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
- if(ccb_h->flags & CAM_DIR_IN)
- cmd->flags |= MRSAS_DIR_IN;
- if(ccb_h->flags & CAM_DIR_OUT)
- cmd->flags |= MRSAS_DIR_OUT;
- }
- else
- cmd->flags = MRSAS_DIR_NONE; /* no data */
-
-/* For FreeBSD 10.0 and higher */
+ struct mrsas_mpt_cmd *cmd;
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ struct ccb_scsiio *csio = &(ccb->csio);
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+
+ if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE) {
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ return (0);
+ }
+ ccb_h->status |= CAM_SIM_QUEUED;
+ cmd = mrsas_get_mpt_cmd(sc);
+
+ if (!cmd) {
+ ccb_h->status |= CAM_REQUEUE_REQ;
+ xpt_done(ccb);
+ return (0);
+ }
+ if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
+ if (ccb_h->flags & CAM_DIR_IN)
+ cmd->flags |= MRSAS_DIR_IN;
+ if (ccb_h->flags & CAM_DIR_OUT)
+ cmd->flags |= MRSAS_DIR_OUT;
+ } else
+ cmd->flags = MRSAS_DIR_NONE; /* no data */
+
+ /* For FreeBSD 10.0 and higher */
#if (__FreeBSD_version >= 1000000)
-/*
- * * XXX We don't yet support physical addresses here.
- */
- switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
- case CAM_DATA_PADDR:
- case CAM_DATA_SG_PADDR:
- printf("%s: physical addresses not supported\n",
- __func__);
- mrsas_release_mpt_cmd(cmd);
- ccb_h->status = CAM_REQ_INVALID;
- ccb_h->status &= ~CAM_SIM_QUEUED;
- goto done;
- case CAM_DATA_SG:
- printf("%s: scatter gather is not supported\n",
- __func__);
- mrsas_release_mpt_cmd(cmd);
- ccb_h->status = CAM_REQ_INVALID;
- goto done;
+ /*
+ * XXX We don't yet support physical addresses here.
+ */
+ switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
+ case CAM_DATA_PADDR:
+ case CAM_DATA_SG_PADDR:
+ device_printf(sc->mrsas_dev, "%s: physical addresses not supported\n",
+ __func__);
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_INVALID;
+ ccb_h->status &= ~CAM_SIM_QUEUED;
+ goto done;
+ case CAM_DATA_SG:
+ device_printf(sc->mrsas_dev, "%s: scatter gather is not supported\n",
+ __func__);
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_INVALID;
+ goto done;
case CAM_DATA_VADDR:
- if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) {
- mrsas_release_mpt_cmd(cmd);
- ccb_h->status = CAM_REQ_TOO_BIG;
- goto done;
- }
- cmd->length = csio->dxfer_len;
- if (cmd->length)
- cmd->data = csio->data_ptr;
- break;
- default:
- ccb->ccb_h.status = CAM_REQ_INVALID;
- goto done;
- }
+ if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) {
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_TOO_BIG;
+ goto done;
+ }
+ cmd->length = csio->dxfer_len;
+ if (cmd->length)
+ cmd->data = csio->data_ptr;
+ break;
+ default:
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ goto done;
+ }
#else
- if (!(ccb_h->flags & CAM_DATA_PHYS)) { //Virtual data address
- if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
- if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) {
- mrsas_release_mpt_cmd(cmd);
- ccb_h->status = CAM_REQ_TOO_BIG;
- goto done;
- }
- cmd->length = csio->dxfer_len;
- if (cmd->length)
- cmd->data = csio->data_ptr;
- }
- else {
- mrsas_release_mpt_cmd(cmd);
- ccb_h->status = CAM_REQ_INVALID;
- goto done;
- }
- }
- else { //Data addresses are physical.
- mrsas_release_mpt_cmd(cmd);
- ccb_h->status = CAM_REQ_INVALID;
- ccb_h->status &= ~CAM_SIM_QUEUED;
- goto done;
- }
+ if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Virtual data address */
+ if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
+ if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) {
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_TOO_BIG;
+ goto done;
+ }
+ cmd->length = csio->dxfer_len;
+ if (cmd->length)
+ cmd->data = csio->data_ptr;
+ } else {
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_INVALID;
+ goto done;
+ }
+ } else { /* Data addresses are physical. */
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_INVALID;
+ ccb_h->status &= ~CAM_SIM_QUEUED;
+ goto done;
+ }
#endif
- /* save ccb ptr */
- cmd->ccb_ptr = ccb;
-
- req_desc = mrsas_get_request_desc(sc, (cmd->index)-1);
- if (!req_desc) {
- device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n");
- return (FAIL);
- }
- memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
- cmd->request_desc = req_desc;
-
- if (ccb_h->flags & CAM_CDB_POINTER)
- bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len);
- else
- bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len);
- mtx_lock(&sc->raidmap_lock);
-
- if (mrsas_ldio_inq(sim, ccb)) {
- if (mrsas_build_ldio(sc, cmd, ccb)){
- device_printf(sc->mrsas_dev, "Build LDIO failed.\n");
- mtx_unlock(&sc->raidmap_lock);
- return(1);
- }
- }
- else {
- if (mrsas_build_dcdb(sc, cmd, ccb, sim)) {
- device_printf(sc->mrsas_dev, "Build DCDB failed.\n");
- mtx_unlock(&sc->raidmap_lock);
- return(1);
- }
- }
- mtx_unlock(&sc->raidmap_lock);
-
- if (cmd->flags == MRSAS_DIR_IN) //from device
- cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ;
- else if (cmd->flags == MRSAS_DIR_OUT) //to device
- cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE;
-
- cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
- cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/4;
- cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr;
- cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE;
-
- req_desc = cmd->request_desc;
- req_desc->SCSIIO.SMID = cmd->index;
-
- /*
- * Start timer for IO timeout. Default timeout value is 90 second.
- */
- callout_reset(&cmd->cm_callout, (sc->mrsas_io_timeout * hz) / 1000,
- mrsas_scsiio_timeout, cmd);
- atomic_inc(&sc->fw_outstanding);
-
- if(atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
- sc->io_cmds_highwater++;
-
- mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
- return(0);
+ /* save ccb ptr */
+ cmd->ccb_ptr = ccb;
+
+ req_desc = mrsas_get_request_desc(sc, (cmd->index) - 1);
+ if (!req_desc) {
+ device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n");
+ return (FAIL);
+ }
+ memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
+ cmd->request_desc = req_desc;
+
+ if (ccb_h->flags & CAM_CDB_POINTER)
+ bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len);
+ else
+ bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len);
+ mtx_lock(&sc->raidmap_lock);
+
+ if (mrsas_ldio_inq(sim, ccb)) {
+ if (mrsas_build_ldio(sc, cmd, ccb)) {
+ device_printf(sc->mrsas_dev, "Build LDIO failed.\n");
+ mtx_unlock(&sc->raidmap_lock);
+ return (1);
+ }
+ } else {
+ if (mrsas_build_dcdb(sc, cmd, ccb, sim)) {
+ device_printf(sc->mrsas_dev, "Build DCDB failed.\n");
+ mtx_unlock(&sc->raidmap_lock);
+ return (1);
+ }
+ }
+ mtx_unlock(&sc->raidmap_lock);
+
+ if (cmd->flags == MRSAS_DIR_IN) /* from device */
+ cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ;
+ else if (cmd->flags == MRSAS_DIR_OUT) /* to device */
+ cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE;
+
+ cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
+ cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
+ cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr;
+ cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE;
+
+ req_desc = cmd->request_desc;
+ req_desc->SCSIIO.SMID = cmd->index;
+
+ /*
+ * Start timer for IO timeout. Default timeout value is 90 second.
+ */
+ callout_reset(&cmd->cm_callout, (sc->mrsas_io_timeout * hz) / 1000,
+ mrsas_scsiio_timeout, cmd);
+ atomic_inc(&sc->fw_outstanding);
+
+ if (atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
+ sc->io_cmds_highwater++;
+
+ mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
+ return (0);
done:
- xpt_done(ccb);
- return(0);
+ xpt_done(ccb);
+ return (0);
}
-/**
- * mrsas_ldio_inq: Determines if IO is read/write or inquiry
- * input: pointer to CAM Control Block
+/*
+ * mrsas_ldio_inq: Determines if IO is read/write or inquiry
+ * input: pointer to CAM Control Block
*
- * This function determines if the IO is read/write or inquiry. It returns a
- * 1 if the IO is read/write and 0 if it is inquiry.
+ * This function determines if the IO is read/write or inquiry. It returns a 1
+ * if the IO is read/write and 0 if it is inquiry.
*/
-int mrsas_ldio_inq(struct cam_sim *sim, union ccb *ccb)
+int
+mrsas_ldio_inq(struct cam_sim *sim, union ccb *ccb)
{
- struct ccb_scsiio *csio = &(ccb->csio);
-
- if (cam_sim_bus(sim) == 1)
- return(0);
-
- switch (csio->cdb_io.cdb_bytes[0]) {
- case READ_10:
- case WRITE_10:
- case READ_12:
- case WRITE_12:
- case READ_6:
- case WRITE_6:
- case READ_16:
- case WRITE_16:
- return 1;
- default:
- return 0;
- }
+ struct ccb_scsiio *csio = &(ccb->csio);
+
+ if (cam_sim_bus(sim) == 1)
+ return (0);
+
+ switch (csio->cdb_io.cdb_bytes[0]) {
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_6:
+ case WRITE_6:
+ case READ_16:
+ case WRITE_16:
+ return 1;
+ default:
+ return 0;
+ }
}
-/**
- * mrsas_get_mpt_cmd: Get a cmd from free command pool
- * input: Adapter instance soft state
+/*
+ * mrsas_get_mpt_cmd: Get a cmd from free command pool
+ * input: Adapter instance soft state
*
- * This function removes an MPT command from the command free list and
+ * This function removes an MPT command from the command free list and
* initializes it.
*/
-struct mrsas_mpt_cmd* mrsas_get_mpt_cmd(struct mrsas_softc *sc)
+struct mrsas_mpt_cmd *
+mrsas_get_mpt_cmd(struct mrsas_softc *sc)
{
- struct mrsas_mpt_cmd *cmd = NULL;
-
- mtx_lock(&sc->mpt_cmd_pool_lock);
- if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)){
- cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head);
- TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next);
- }
- memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
- cmd->data = NULL;
- cmd->length = 0;
- cmd->flags = 0;
- cmd->error_code = 0;
- cmd->load_balance = 0;
- cmd->ccb_ptr = NULL;
- mtx_unlock(&sc->mpt_cmd_pool_lock);
-
- return cmd;
+ struct mrsas_mpt_cmd *cmd = NULL;
+
+ mtx_lock(&sc->mpt_cmd_pool_lock);
+ if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)) {
+ cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head);
+ TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next);
+ }
+ memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
+ cmd->data = NULL;
+ cmd->length = 0;
+ cmd->flags = 0;
+ cmd->error_code = 0;
+ cmd->load_balance = 0;
+ cmd->ccb_ptr = NULL;
+ mtx_unlock(&sc->mpt_cmd_pool_lock);
+
+ return cmd;
}
-/**
- * mrsas_release_mpt_cmd: Return a cmd to free command pool
- * input: Command packet for return to free command pool
+/*
+ * mrsas_release_mpt_cmd: Return a cmd to free command pool
+ * input: Command packet for return to free command pool
*
* This function returns an MPT command to the free command list.
*/
-void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
+void
+mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
{
- struct mrsas_softc *sc = cmd->sc;
+ struct mrsas_softc *sc = cmd->sc;
- mtx_lock(&sc->mpt_cmd_pool_lock);
- cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
- TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
- mtx_unlock(&sc->mpt_cmd_pool_lock);
+ mtx_lock(&sc->mpt_cmd_pool_lock);
+ cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
+ TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
+ mtx_unlock(&sc->mpt_cmd_pool_lock);
- return;
+ return;
}
-/**
- * mrsas_get_request_desc: Get request descriptor from array
- * input: Adapter instance soft state
- * SMID index
+/*
+ * mrsas_get_request_desc: Get request descriptor from array
+ * input: Adapter instance soft state
+ * SMID index
*
* This function returns a pointer to the request descriptor.
*/
MRSAS_REQUEST_DESCRIPTOR_UNION *
mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
{
- u_int8_t *p;
+ u_int8_t *p;
- if (index >= sc->max_fw_cmds) {
- device_printf(sc->mrsas_dev, "Invalid SMID (0x%x)request for desc\n", index);
- return NULL;
- }
- p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index;
+ if (index >= sc->max_fw_cmds) {
+ device_printf(sc->mrsas_dev, "Invalid SMID (0x%x)request for desc\n", index);
+ return NULL;
+ }
+ p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index;
- return (MRSAS_REQUEST_DESCRIPTOR_UNION *)p;
+ return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p;
}
-/**
- * mrsas_build_ldio: Builds an LDIO command
- * input: Adapter instance soft state
- * Pointer to command packet
- * Pointer to CCB
+/*
+ * mrsas_build_ldio: Builds an LDIO command
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ * Pointer to CCB
*
- * This function builds the LDIO command packet. It returns 0 if the
- * command is built successfully, otherwise it returns a 1.
+ * This function builds the LDIO command packet. It returns 0 if the command is
+ * built successfully, otherwise it returns a 1.
*/
-int mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
- union ccb *ccb)
+int
+mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb)
{
- struct ccb_hdr *ccb_h = &(ccb->ccb_h);
- struct ccb_scsiio *csio = &(ccb->csio);
- u_int32_t device_id;
- MRSAS_RAID_SCSI_IO_REQUEST *io_request;
-
- device_id = ccb_h->target_id;
-
- io_request = cmd->io_request;
- io_request->RaidContext.VirtualDiskTgtId = device_id;
- io_request->RaidContext.status = 0;
- io_request->RaidContext.exStatus = 0;
-
- /* just the cdb len, other flags zero, and ORed-in later for FP */
- io_request->IoFlags = csio->cdb_len;
-
- if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS)
- device_printf(sc->mrsas_dev, "Build ldio or fpio error\n");
-
- io_request->DataLength = cmd->length;
-
- if (mrsas_map_request(sc, cmd) == SUCCESS) {
- if (cmd->sge_count > MRSAS_MAX_SGL) {
- device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
- "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
- return (FAIL);
- }
- io_request->RaidContext.numSGE = cmd->sge_count;
- }
- else {
- device_printf(sc->mrsas_dev, "Data map/load failed.\n");
- return(FAIL);
- }
- return(0);
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ struct ccb_scsiio *csio = &(ccb->csio);
+ u_int32_t device_id;
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+
+ device_id = ccb_h->target_id;
+
+ io_request = cmd->io_request;
+ io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->RaidContext.status = 0;
+ io_request->RaidContext.exStatus = 0;
+
+ /* just the cdb len, other flags zero, and ORed-in later for FP */
+ io_request->IoFlags = csio->cdb_len;
+
+ if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS)
+ device_printf(sc->mrsas_dev, "Build ldio or fpio error\n");
+
+ io_request->DataLength = cmd->length;
+
+ if (mrsas_map_request(sc, cmd) == SUCCESS) {
+ if (cmd->sge_count > MRSAS_MAX_SGL) {
+ device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
+ "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
+ return (FAIL);
+ }
+ io_request->RaidContext.numSGE = cmd->sge_count;
+ } else {
+ device_printf(sc->mrsas_dev, "Data map/load failed.\n");
+ return (FAIL);
+ }
+ return (0);
}
-/**
- * mrsas_setup_io: Set up data including Fast Path I/O
- * input: Adapter instance soft state
- * Pointer to command packet
- * Pointer to CCB
+/*
+ * mrsas_setup_io: Set up data including Fast Path I/O
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ * Pointer to CCB
*
- * This function builds the DCDB inquiry command. It returns 0 if the
- * command is built successfully, otherwise it returns a 1.
+ * This function builds the DCDB inquiry command. It returns 0 if the command
+ * is built successfully, otherwise it returns a 1.
*/
-int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
- union ccb *ccb, u_int32_t device_id,
- MRSAS_RAID_SCSI_IO_REQUEST *io_request)
+int
+mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, u_int32_t device_id,
+ MRSAS_RAID_SCSI_IO_REQUEST * io_request)
{
- struct ccb_hdr *ccb_h = &(ccb->ccb_h);
- struct ccb_scsiio *csio = &(ccb->csio);
- struct IO_REQUEST_INFO io_info;
- MR_FW_RAID_MAP_ALL *map_ptr;
- u_int8_t fp_possible;
- u_int32_t start_lba_hi, start_lba_lo, ld_block_size;
- u_int32_t datalength = 0;
-
- start_lba_lo = 0;
- start_lba_hi = 0;
- fp_possible = 0;
-
- /*
- * READ_6 (0x08) or WRITE_6 (0x0A) cdb
- */
- if (csio->cdb_len == 6) {
- datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4];
- start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[1] << 16) |
- ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 8) |
- (u_int32_t) csio->cdb_io.cdb_bytes[3];
- start_lba_lo &= 0x1FFFFF;
- }
- /*
- * READ_10 (0x28) or WRITE_6 (0x2A) cdb
- */
- else if (csio->cdb_len == 10) {
- datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] |
- ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8);
- start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 24) |
- ((u_int32_t) csio->cdb_io.cdb_bytes[3] << 16) |
- (u_int32_t) csio->cdb_io.cdb_bytes[4] << 8 |
- ((u_int32_t) csio->cdb_io.cdb_bytes[5]);
- }
- /*
- * READ_12 (0xA8) or WRITE_12 (0xAA) cdb
- */
- else if (csio->cdb_len == 12) {
- datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 |
- ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
- ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) |
- ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
- start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 24) |
- ((u_int32_t) csio->cdb_io.cdb_bytes[3] << 16) |
- (u_int32_t) csio->cdb_io.cdb_bytes[4] << 8 |
- ((u_int32_t) csio->cdb_io.cdb_bytes[5]);
- }
- /*
- * READ_16 (0x88) or WRITE_16 (0xx8A) cdb
- */
- else if (csio->cdb_len == 16) {
- datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 |
- ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) |
- ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) |
- ((u_int32_t)csio->cdb_io.cdb_bytes[13]);
- start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[6] << 24) |
- ((u_int32_t) csio->cdb_io.cdb_bytes[7] << 16) |
- (u_int32_t) csio->cdb_io.cdb_bytes[8] << 8 |
- ((u_int32_t) csio->cdb_io.cdb_bytes[9]);
- start_lba_hi = ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 24) |
- ((u_int32_t) csio->cdb_io.cdb_bytes[3] << 16) |
- (u_int32_t) csio->cdb_io.cdb_bytes[4] << 8 |
- ((u_int32_t) csio->cdb_io.cdb_bytes[5]);
- }
-
- memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
- io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo;
- io_info.numBlocks = datalength;
- io_info.ldTgtId = device_id;
-
- switch (ccb_h->flags & CAM_DIR_MASK) {
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ struct ccb_scsiio *csio = &(ccb->csio);
+ struct IO_REQUEST_INFO io_info;
+ MR_DRV_RAID_MAP_ALL *map_ptr;
+ u_int8_t fp_possible;
+ u_int32_t start_lba_hi, start_lba_lo, ld_block_size;
+ u_int32_t datalength = 0;
+
+ start_lba_lo = 0;
+ start_lba_hi = 0;
+ fp_possible = 0;
+
+ /*
+ * READ_6 (0x08) or WRITE_6 (0x0A) cdb
+ */
+ if (csio->cdb_len == 6) {
+ datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4];
+ start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[1] << 16) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 8) |
+ (u_int32_t)csio->cdb_io.cdb_bytes[3];
+ start_lba_lo &= 0x1FFFFF;
+ }
+ /*
+ * READ_10 (0x28) or WRITE_6 (0x2A) cdb
+ */
+ else if (csio->cdb_len == 10) {
+ datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8);
+ start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
+ (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
+ }
+ /*
+ * READ_12 (0xA8) or WRITE_12 (0xAA) cdb
+ */
+ else if (csio->cdb_len == 12) {
+ datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
+ start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
+ (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
+ }
+ /*
+ * READ_16 (0x88) or WRITE_16 (0xx8A) cdb
+ */
+ else if (csio->cdb_len == 16) {
+ datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[13]);
+ start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[6] << 24) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
+ (u_int32_t)csio->cdb_io.cdb_bytes[8] << 8 |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
+ start_lba_hi = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
+ (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
+ }
+ memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
+ io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo;
+ io_info.numBlocks = datalength;
+ io_info.ldTgtId = device_id;
+
+ switch (ccb_h->flags & CAM_DIR_MASK) {
case CAM_DIR_IN:
io_info.isRead = 1;
break;
@@ -776,401 +782,418 @@ int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
break;
case CAM_DIR_NONE:
default:
- mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK);
+ mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK);
break;
- }
-
- map_ptr = sc->raidmap_mem[(sc->map_id & 1)];
- ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr, sc);
-
- if ((MR_TargetIdToLdGet(device_id, map_ptr) >= MAX_LOGICAL_DRIVES) ||
- (!sc->fast_path_io)) {
- io_request->RaidContext.regLockFlags = 0;
- fp_possible = 0;
- }
- else
- {
- if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext, map_ptr))
- fp_possible = io_info.fpOkForIo;
- }
-
- if (fp_possible) {
- mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr,
- start_lba_lo, ld_block_size);
- io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
- cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
- << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
- if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
- cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- io_request->RaidContext.Type = MPI2_TYPE_CUDA;
- io_request->RaidContext.nseg = 0x1;
- io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
- io_request->RaidContext.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE);
- }
- if ((sc->load_balance_info[device_id].loadBalanceFlag) && (io_info.isRead)) {
- io_info.devHandle = mrsas_get_updated_dev_handle(&sc->load_balance_info[device_id],
- &io_info);
- cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG;
- }
- else
- cmd->load_balance = 0;
- cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
- io_request->DevHandle = io_info.devHandle;
- }
- else {
- /* Not FP IO */
- io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
- cmd->request_desc->SCSIIO.RequestFlags =
- (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
- if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
- cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- io_request->RaidContext.Type = MPI2_TYPE_CUDA;
- io_request->RaidContext.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE);
- io_request->RaidContext.nseg = 0x1;
- }
- io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
- io_request->DevHandle = device_id;
- }
- return(0);
+ }
+
+ map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
+ ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr, sc);
+
+ if ((MR_TargetIdToLdGet(device_id, map_ptr) >= MAX_LOGICAL_DRIVES_EXT) ||
+ (!sc->fast_path_io)) {
+ io_request->RaidContext.regLockFlags = 0;
+ fp_possible = 0;
+ } else {
+ if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext, map_ptr))
+ fp_possible = io_info.fpOkForIo;
+ }
+
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
+
+
+ if (fp_possible) {
+ mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr,
+ start_lba_lo, ld_block_size);
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+ MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.nseg = 0x1;
+ io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+ io_request->RaidContext.regLockFlags |=
+ (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
+ MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ }
+ if ((sc->load_balance_info[device_id].loadBalanceFlag) &&
+ (io_info.isRead)) {
+ io_info.devHandle =
+ mrsas_get_updated_dev_handle(&sc->load_balance_info[device_id],
+ &io_info);
+ cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG;
+ } else
+ cmd->load_balance = 0;
+ cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
+ io_request->DevHandle = io_info.devHandle;
+ } else {
+ /* Not FP IO */
+ io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO <<
+ MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+ MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.regLockFlags |=
+ (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
+ MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ io_request->RaidContext.nseg = 0x1;
+ }
+ io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = device_id;
+ }
+ return (0);
}
-/**
- * mrsas_build_dcdb: Builds an DCDB command
- * input: Adapter instance soft state
- * Pointer to command packet
- * Pointer to CCB
+/*
+ * mrsas_build_dcdb: Builds an DCDB command
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ * Pointer to CCB
*
- * This function builds the DCDB inquiry command. It returns 0 if the
- * command is built successfully, otherwise it returns a 1.
+ * This function builds the DCDB inquiry command. It returns 0 if the command
+ * is built successfully, otherwise it returns a 1.
*/
-int mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
- union ccb *ccb, struct cam_sim *sim)
+int
+mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, struct cam_sim *sim)
{
- struct ccb_hdr *ccb_h = &(ccb->ccb_h);
- u_int32_t device_id;
- MR_FW_RAID_MAP_ALL *map_ptr;
- MRSAS_RAID_SCSI_IO_REQUEST *io_request;
-
- io_request = cmd->io_request;
- device_id = ccb_h->target_id;
- map_ptr = sc->raidmap_mem[(sc->map_id & 1)];
-
- /* Check if this is for system PD */
- if (cam_sim_bus(sim) == 1 &&
- sc->pd_list[device_id].driveState == MR_PD_STATE_SYSTEM) {
- io_request->Function = 0;
- io_request->DevHandle = map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
- io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
- io_request->RaidContext.regLockFlags = 0;
- io_request->RaidContext.regLockRowLBA = 0;
- io_request->RaidContext.regLockLength = 0;
- io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
- MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
- io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
- cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
- MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- cmd->request_desc->SCSIIO.DevHandle =
- map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
- }
- else {
- io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
- io_request->DevHandle = device_id;
- cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- }
-
- io_request->RaidContext.VirtualDiskTgtId = device_id;
- io_request->LUN[1] = ccb_h->target_lun & 0xF;
- io_request->DataLength = cmd->length;
-
- if (mrsas_map_request(sc, cmd) == SUCCESS) {
- if (cmd->sge_count > sc->max_num_sge) {
- device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
- "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
- return (1);
- }
- io_request->RaidContext.numSGE = cmd->sge_count;
- }
- else {
- device_printf(sc->mrsas_dev, "Data map/load failed.\n");
- return(1);
- }
- return(0);
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ u_int32_t device_id;
+ MR_DRV_RAID_MAP_ALL *map_ptr;
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+
+ io_request = cmd->io_request;
+ device_id = ccb_h->target_id;
+ map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
+
+ /* Check if this is for system PD */
+ if (cam_sim_bus(sim) == 1 &&
+ sc->pd_list[device_id].driveState == MR_PD_STATE_SYSTEM) {
+ io_request->Function = 0;
+ io_request->DevHandle = map_ptr->raidMap.devHndlInfo[device_id].
+ curDevHdl;
+ io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
+ io_request->RaidContext.regLockFlags = 0;
+ io_request->RaidContext.regLockRowLBA = 0;
+ io_request->RaidContext.regLockLength = 0;
+
+ io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
+ << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ cmd->request_desc->SCSIIO.DevHandle =
+ map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
+
+ } else {
+ io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = device_id;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ }
+
+ io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->LUN[1] = ccb_h->target_lun & 0xF;
+ io_request->DataLength = cmd->length;
+
+ if (mrsas_map_request(sc, cmd) == SUCCESS) {
+ if (cmd->sge_count > sc->max_num_sge) {
+ device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
+ "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
+ return (1);
+ }
+ io_request->RaidContext.numSGE = cmd->sge_count;
+ } else {
+ device_printf(sc->mrsas_dev, "Data map/load failed.\n");
+ return (1);
+ }
+ return (0);
}
-/**
- * mrsas_map_request: Map and load data
- * input: Adapter instance soft state
- * Pointer to command packet
+/*
+ * mrsas_map_request: Map and load data
+ * input: Adapter instance soft state
+ * Pointer to command packet
*
- * For data from OS, map and load the data buffer into bus space. The
- * SG list is built in the callback. If the bus dmamap load is not
- * successful, cmd->error_code will contain the error code and a 1 is
- * returned.
+ * For data from OS, map and load the data buffer into bus space. The SG list
+ * is built in the callback. If the bus dmamap load is not successful,
+ * cmd->error_code will contain the error code and a 1 is returned.
*/
-int mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
+int
+mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
{
- u_int32_t retcode = 0;
- struct cam_sim *sim;
- int flag = BUS_DMA_NOWAIT;
-
- sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path);
-
- if (cmd->data != NULL) {
- mtx_lock(&sc->io_lock);
- /* Map data buffer into bus space */
- retcode = bus_dmamap_load(sc->data_tag, cmd->data_dmamap, cmd->data,
- cmd->length, mrsas_data_load_cb, cmd, flag);
- mtx_unlock(&sc->io_lock);
- if (retcode)
- device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode);
- if (retcode == EINPROGRESS) {
- device_printf(sc->mrsas_dev, "request load in progress\n");
- mrsas_freeze_simq(cmd, sim);
- }
- }
- if (cmd->error_code)
- return(1);
- return(retcode);
+ u_int32_t retcode = 0;
+ struct cam_sim *sim;
+ int flag = BUS_DMA_NOWAIT;
+
+ sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path);
+
+ if (cmd->data != NULL) {
+ mtx_lock(&sc->io_lock);
+ /* Map data buffer into bus space */
+ retcode = bus_dmamap_load(sc->data_tag, cmd->data_dmamap, cmd->data,
+ cmd->length, mrsas_data_load_cb, cmd, flag);
+ mtx_unlock(&sc->io_lock);
+ if (retcode)
+ device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode);
+ if (retcode == EINPROGRESS) {
+ device_printf(sc->mrsas_dev, "request load in progress\n");
+ mrsas_freeze_simq(cmd, sim);
+ }
+ }
+ if (cmd->error_code)
+ return (1);
+ return (retcode);
}
-/**
- * mrsas_unmap_request: Unmap and unload data
- * input: Adapter instance soft state
- * Pointer to command packet
+/*
+ * mrsas_unmap_request: Unmap and unload data
+ * input: Adapter instance soft state
+ * Pointer to command packet
*
* This function unmaps and unloads data from OS.
*/
-void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
+void
+mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
{
- if (cmd->data != NULL) {
- if (cmd->flags & MRSAS_DIR_IN)
- bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD);
- if (cmd->flags & MRSAS_DIR_OUT)
- bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE);
- mtx_lock(&sc->io_lock);
- bus_dmamap_unload(sc->data_tag, cmd->data_dmamap);
- mtx_unlock(&sc->io_lock);
- }
+ if (cmd->data != NULL) {
+ if (cmd->flags & MRSAS_DIR_IN)
+ bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD);
+ if (cmd->flags & MRSAS_DIR_OUT)
+ bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE);
+ mtx_lock(&sc->io_lock);
+ bus_dmamap_unload(sc->data_tag, cmd->data_dmamap);
+ mtx_unlock(&sc->io_lock);
+ }
}
-/**
- * mrsas_data_load_cb: Callback entry point
- * input: Pointer to command packet as argument
- * Pointer to segment
- * Number of segments
- * Error
+/*
+ * mrsas_data_load_cb: Callback entry point
+ * input: Pointer to command packet as argument
+ * Pointer to segment
+ * Number of segments Error
*
- * This is the callback function of the bus dma map load. It builds
- * the SG list.
+ * This is the callback function of the bus dma map load. It builds the SG
+ * list.
*/
static void
mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
- struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
- struct mrsas_softc *sc = cmd->sc;
- MRSAS_RAID_SCSI_IO_REQUEST *io_request;
- pMpi25IeeeSgeChain64_t sgl_ptr;
- int i=0, sg_processed=0;
-
- if (error)
- {
- cmd->error_code = error;
- device_printf(sc->mrsas_dev, "mrsas_data_load_cb: error=%d\n", error);
- if (error == EFBIG) {
- cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
- return;
- }
- }
-
- if (cmd->flags & MRSAS_DIR_IN)
- bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
- BUS_DMASYNC_PREREAD);
- if (cmd->flags & MRSAS_DIR_OUT)
- bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
- BUS_DMASYNC_PREWRITE);
- if (nseg > sc->max_num_sge) {
- device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n");
- return;
- }
-
- io_request = cmd->io_request;
- sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
-
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
- pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
- sgl_ptr_end += sc->max_sge_in_main_msg - 1;
- sgl_ptr_end->Flags = 0;
- }
-
- if (nseg != 0) {
- for (i=0; i < nseg; i++) {
- sgl_ptr->Address = segs[i].ds_addr;
- sgl_ptr->Length = segs[i].ds_len;
- sgl_ptr->Flags = 0;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
- if (i == nseg - 1)
- sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
- }
- sgl_ptr++;
- sg_processed = i + 1;
- /*
- * Prepare chain element
- */
+ struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
+ struct mrsas_softc *sc = cmd->sc;
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+ pMpi25IeeeSgeChain64_t sgl_ptr;
+ int i = 0, sg_processed = 0;
+
+ if (error) {
+ cmd->error_code = error;
+ device_printf(sc->mrsas_dev, "mrsas_data_load_cb: error=%d\n", error);
+ if (error == EFBIG) {
+ cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
+ return;
+ }
+ }
+ if (cmd->flags & MRSAS_DIR_IN)
+ bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
+ BUS_DMASYNC_PREREAD);
+ if (cmd->flags & MRSAS_DIR_OUT)
+ bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
+ BUS_DMASYNC_PREWRITE);
+ if (nseg > sc->max_num_sge) {
+ device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n");
+ return;
+ }
+ io_request = cmd->io_request;
+ sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
+
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
+
+ sgl_ptr_end += sc->max_sge_in_main_msg - 1;
+ sgl_ptr_end->Flags = 0;
+ }
+ if (nseg != 0) {
+ for (i = 0; i < nseg; i++) {
+ sgl_ptr->Address = segs[i].ds_addr;
+ sgl_ptr->Length = segs[i].ds_len;
+ sgl_ptr->Flags = 0;
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if (i == nseg - 1)
+ sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
+ }
+ sgl_ptr++;
+ sg_processed = i + 1;
if ((sg_processed == (sc->max_sge_in_main_msg - 1)) &&
- (nseg > sc->max_sge_in_main_msg)) {
- pMpi25IeeeSgeChain64_t sg_chain;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
- if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
- != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
- cmd->io_request->ChainOffset = sc->chain_offset_io_request;
- else
- cmd->io_request->ChainOffset = 0;
- } else
- cmd->io_request->ChainOffset = sc->chain_offset_io_request;
- sg_chain = sgl_ptr;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
- sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
- else
- sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
- sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed));
- sg_chain->Address = cmd->chain_frame_phys_addr;
- sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame;
+ (nseg > sc->max_sge_in_main_msg)) {
+ pMpi25IeeeSgeChain64_t sg_chain;
+
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+ != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+ cmd->io_request->ChainOffset = sc->chain_offset_io_request;
+ else
+ cmd->io_request->ChainOffset = 0;
+ } else
+ cmd->io_request->ChainOffset = sc->chain_offset_io_request;
+ sg_chain = sgl_ptr;
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
+ else
+ sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
+ sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed));
+ sg_chain->Address = cmd->chain_frame_phys_addr;
+ sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame;
}
- }
- }
- cmd->sge_count = nseg;
+ }
+ }
+ cmd->sge_count = nseg;
}
-/**
- * mrsas_freeze_simq: Freeze SIM queue
- * input: Pointer to command packet
- * Pointer to SIM
+/*
+ * mrsas_freeze_simq: Freeze SIM queue
+ * input: Pointer to command packet
+ * Pointer to SIM
*
* This function freezes the sim queue.
*/
-static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim)
+static void
+mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim)
{
- union ccb *ccb = (union ccb *)(cmd->ccb_ptr);
+ union ccb *ccb = (union ccb *)(cmd->ccb_ptr);
- xpt_freeze_simq(sim, 1);
- ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
- ccb->ccb_h.status |= CAM_REQUEUE_REQ;
+ xpt_freeze_simq(sim, 1);
+ ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
+ ccb->ccb_h.status |= CAM_REQUEUE_REQ;
}
-void mrsas_xpt_freeze(struct mrsas_softc *sc) {
- xpt_freeze_simq(sc->sim_0, 1);
- xpt_freeze_simq(sc->sim_1, 1);
+void
+mrsas_xpt_freeze(struct mrsas_softc *sc)
+{
+ xpt_freeze_simq(sc->sim_0, 1);
+ xpt_freeze_simq(sc->sim_1, 1);
}
-
-void mrsas_xpt_release(struct mrsas_softc *sc) {
- xpt_release_simq(sc->sim_0, 1);
- xpt_release_simq(sc->sim_1, 1);
+
+void
+mrsas_xpt_release(struct mrsas_softc *sc)
+{
+ xpt_release_simq(sc->sim_0, 1);
+ xpt_release_simq(sc->sim_1, 1);
}
-/**
- * mrsas_cmd_done: Perform remaining command completion
- * input: Adapter instance soft state
- * Pointer to command packet
+/*
+ * mrsas_cmd_done: Perform remaining command completion
+ * input: Adapter instance soft state Pointer to command packet
*
- * This function calls ummap request and releases the MPT command.
+ * This function calls ummap request and releases the MPT command.
*/
-void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
+void
+mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
{
- callout_stop(&cmd->cm_callout);
- mrsas_unmap_request(sc, cmd);
- mtx_lock(&sc->sim_lock);
- xpt_done(cmd->ccb_ptr);
- cmd->ccb_ptr = NULL;
- mtx_unlock(&sc->sim_lock);
- mrsas_release_mpt_cmd(cmd);
+ callout_stop(&cmd->cm_callout);
+ mrsas_unmap_request(sc, cmd);
+ mtx_lock(&sc->sim_lock);
+ xpt_done(cmd->ccb_ptr);
+ cmd->ccb_ptr = NULL;
+ mtx_unlock(&sc->sim_lock);
+ mrsas_release_mpt_cmd(cmd);
}
-/**
- * mrsas_poll: Polling entry point
- * input: Pointer to SIM
+/*
+ * mrsas_cam_poll: Polling entry point
+ * input: Pointer to SIM
*
* This is currently a stub function.
*/
-static void mrsas_poll(struct cam_sim *sim)
+static void
+mrsas_cam_poll(struct cam_sim *sim)
{
- struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
- mrsas_isr((void *) sc);
+ struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
+
+ mrsas_isr((void *)sc);
}
/*
- * mrsas_bus_scan: Perform bus scan
- * input: Adapter instance soft state
+ * mrsas_bus_scan: Perform bus scan
+ * input: Adapter instance soft state
*
- * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should
- * not be called in FreeBSD 8.x and later versions, where the bus scan is
- * automatic.
- */
-int mrsas_bus_scan(struct mrsas_softc *sc)
+ * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should not
+ * be called in FreeBSD 8.x and later versions, where the bus scan is
+ * automatic.
+ */
+int
+mrsas_bus_scan(struct mrsas_softc *sc)
{
- union ccb *ccb_0;
- union ccb *ccb_1;
-
- if ((ccb_0 = xpt_alloc_ccb()) == NULL) {
- return(ENOMEM);
- }
-
- if ((ccb_1 = xpt_alloc_ccb()) == NULL) {
- xpt_free_ccb(ccb_0);
- return(ENOMEM);
- }
-
- mtx_lock(&sc->sim_lock);
- if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0),
- CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP){
- xpt_free_ccb(ccb_0);
- xpt_free_ccb(ccb_1);
- mtx_unlock(&sc->sim_lock);
- return(EIO);
- }
-
- if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1),
- CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP){
- xpt_free_ccb(ccb_0);
- xpt_free_ccb(ccb_1);
- mtx_unlock(&sc->sim_lock);
- return(EIO);
- }
-
- mtx_unlock(&sc->sim_lock);
- xpt_rescan(ccb_0);
- xpt_rescan(ccb_1);
-
- return(0);
+ union ccb *ccb_0;
+ union ccb *ccb_1;
+
+ if ((ccb_0 = xpt_alloc_ccb()) == NULL) {
+ return (ENOMEM);
+ }
+ if ((ccb_1 = xpt_alloc_ccb()) == NULL) {
+ xpt_free_ccb(ccb_0);
+ return (ENOMEM);
+ }
+ mtx_lock(&sc->sim_lock);
+ if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0),
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ xpt_free_ccb(ccb_0);
+ xpt_free_ccb(ccb_1);
+ mtx_unlock(&sc->sim_lock);
+ return (EIO);
+ }
+ if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1),
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ xpt_free_ccb(ccb_0);
+ xpt_free_ccb(ccb_1);
+ mtx_unlock(&sc->sim_lock);
+ return (EIO);
+ }
+ mtx_unlock(&sc->sim_lock);
+ xpt_rescan(ccb_0);
+ xpt_rescan(ccb_1);
+
+ return (0);
}
/*
- * mrsas_bus_scan_sim: Perform bus scan per SIM
- * input: Adapter instance soft state
- * This function will be called from Event handler
- * on LD creation/deletion, JBOD on/off.
- */
-int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim)
+ * mrsas_bus_scan_sim: Perform bus scan per SIM
+ * input: adapter instance soft state
+ *
+ * This function will be called from Event handler on LD creation/deletion,
+ * JBOD on/off.
+ */
+int
+mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim)
{
- union ccb *ccb;
-
- if ((ccb = xpt_alloc_ccb()) == NULL) {
- return(ENOMEM);
- }
- mtx_lock(&sc->sim_lock);
- if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim),
- CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP){
- xpt_free_ccb(ccb);
- mtx_unlock(&sc->sim_lock);
- return(EIO);
- }
- mtx_unlock(&sc->sim_lock);
- xpt_rescan(ccb);
-
- return(0);
+ union ccb *ccb;
+
+ if ((ccb = xpt_alloc_ccb()) == NULL) {
+ return (ENOMEM);
+ }
+ mtx_lock(&sc->sim_lock);
+ if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim),
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ xpt_free_ccb(ccb);
+ mtx_unlock(&sc->sim_lock);
+ return (EIO);
+ }
+ mtx_unlock(&sc->sim_lock);
+ xpt_rescan(ccb);
+
+ return (0);
}
diff --git a/sys/dev/mrsas/mrsas_fp.c b/sys/dev/mrsas/mrsas_fp.c
index 91005a5..19bb4e4 100644
--- a/sys/dev/mrsas/mrsas_fp.c
+++ b/sys/dev/mrsas/mrsas_fp.c
@@ -1,43 +1,38 @@
/*
- * Copyright (c) 2014, LSI Corp.
- * All rights reserved.
- * Author: Marian Choy
+ * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
* Support: freebsdraid@lsi.com
*
* Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * 3. Neither the name of the <ORGANIZATION> nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer. 2. Redistributions
+ * in binary form must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution. 3. Neither the name of the
+ * <ORGANIZATION> nor the names of its contributors may be used to endorse or
+ * promote products derived from this software without specific prior written
+ * permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
- * The views and conclusions contained in the software and documentation
- * are those of the authors and should not be interpreted as representing
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing
* official policies,either expressed or implied, of the FreeBSD Project.
*
- * Send feedback to: <megaraidfbsd@lsi.com>
- * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
- * ATTN: MegaRaid FreeBSD
+ * Send feedback to: <megaraidfbsd@lsi.com> Mail to: LSI Corporation, 1621
+ * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
*
*/
@@ -59,88 +54,106 @@ __FBSDID("$FreeBSD$");
* Function prototypes
*/
u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
-u_int8_t mrsas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
- u_int64_t block, u_int32_t count);
-u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc,
- struct IO_REQUEST_INFO *io_info,
- RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map);
-u_int8_t MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
- u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
- RAID_CONTEXT *pRAID_Context,
- MR_FW_RAID_MAP_ALL *map);
-u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map);
-u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map);
-u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
-u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
- struct IO_REQUEST_INFO *io_info);
+u_int8_t
+mrsas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
+ u_int64_t block, u_int32_t count);
+u_int8_t
+MR_BuildRaidContext(struct mrsas_softc *sc,
+ struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map);
+u_int8_t
+MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
+ u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT * pRAID_Context,
+ MR_DRV_RAID_MAP_ALL * map);
+u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
+u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
+u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
+u_int16_t
+mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
+ struct IO_REQUEST_INFO *io_info);
u_int32_t mega_mod64(u_int64_t dividend, u_int32_t divisor);
-u_int32_t MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
- MR_FW_RAID_MAP_ALL *map, int *div_error);
+u_int32_t
+MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
+ MR_DRV_RAID_MAP_ALL * map, int *div_error);
u_int64_t mega_div64_32(u_int64_t dividend, u_int32_t divisor);
-void mrsas_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
- PLD_LOAD_BALANCE_INFO lbInfo);
-void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request,
- u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb,
- MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
- u_int32_t ld_block_size);
-static u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
- MR_FW_RAID_MAP_ALL *map);
-static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_FW_RAID_MAP_ALL *map);
-static u_int16_t MR_ArPdGet(u_int32_t ar, u_int32_t arm,
- MR_FW_RAID_MAP_ALL *map);
-static MR_LD_SPAN *MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span,
- MR_FW_RAID_MAP_ALL *map);
-static u_int8_t MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx,
- MR_FW_RAID_MAP_ALL *map);
-static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u_int32_t ld,
- MR_FW_RAID_MAP_ALL *map);
-MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
+void
+mrsas_update_load_balance_params(MR_DRV_RAID_MAP_ALL * map,
+ PLD_LOAD_BALANCE_INFO lbInfo);
+void
+mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request,
+ u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb,
+ MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag,
+ u_int32_t ld_block_size);
+static u_int16_t
+MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
+ MR_DRV_RAID_MAP_ALL * map);
+static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map);
+static u_int16_t
+MR_ArPdGet(u_int32_t ar, u_int32_t arm,
+ MR_DRV_RAID_MAP_ALL * map);
+static MR_LD_SPAN *
+MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span,
+ MR_DRV_RAID_MAP_ALL * map);
+static u_int8_t
+MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx,
+ MR_DRV_RAID_MAP_ALL * map);
+static MR_SPAN_BLOCK_INFO *
+MR_LdSpanInfoGet(u_int32_t ld,
+ MR_DRV_RAID_MAP_ALL * map);
+MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
+void MR_PopulateDrvRaidMap(struct mrsas_softc *sc);
+
/*
- * Spanset related function prototypes
- * Added for PRL11 configuration (Uneven span support)
+ * Spanset related function prototypes Added for PRL11 configuration (Uneven
+ * span support)
*/
-void mr_update_span_set(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo);
-static u_int8_t mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld,
- u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
- RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map);
-static u_int64_t get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld,
- u_int64_t strip, MR_FW_RAID_MAP_ALL *map);
-static u_int32_t mr_spanset_get_span_block(struct mrsas_softc *sc,
- u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
- MR_FW_RAID_MAP_ALL *map, int *div_error);
-static u_int8_t get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span,
- u_int64_t stripe, MR_FW_RAID_MAP_ALL *map);
+void mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo);
+static u_int8_t
+mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld,
+ u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map);
+static u_int64_t
+get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld,
+ u_int64_t strip, MR_DRV_RAID_MAP_ALL * map);
+static u_int32_t
+mr_spanset_get_span_block(struct mrsas_softc *sc,
+ u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
+ MR_DRV_RAID_MAP_ALL * map, int *div_error);
+static u_int8_t
+get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span,
+ u_int64_t stripe, MR_DRV_RAID_MAP_ALL * map);
/*
- * Spanset related defines
- * Added for PRL11 configuration(Uneven span support)
+ * Spanset related defines Added for PRL11 configuration(Uneven span support)
*/
-#define SPAN_ROW_SIZE(map, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowSize
-#define SPAN_ROW_DATA_SIZE(map_, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize
-#define SPAN_INVALID 0xff
-#define SPAN_DEBUG 0
+#define SPAN_ROW_SIZE(map, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowSize
+#define SPAN_ROW_DATA_SIZE(map_, ld, index_) \
+ MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize
+#define SPAN_INVALID 0xff
+#define SPAN_DEBUG 0
/*
* Related Defines
*/
-typedef u_int64_t REGION_KEY;
-typedef u_int32_t REGION_LEN;
+typedef u_int64_t REGION_KEY;
+typedef u_int32_t REGION_LEN;
-#define MR_LD_STATE_OPTIMAL 3
-#define FALSE 0
-#define TRUE 1
+#define MR_LD_STATE_OPTIMAL 3
+#define FALSE 0
+#define TRUE 1
/*
* Related Macros
*/
-#define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) )
+#define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) )
-#define swap32(x) \
+#define swap32(x) \
((unsigned int)( \
(((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \
(((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \
@@ -149,20 +162,20 @@ typedef u_int32_t REGION_LEN;
/*
- * In-line functions for mod and divide of 64-bit dividend and 32-bit divisor.
- * Assumes a check for a divisor of zero is not possible.
- *
- * @param dividend : Dividend
- * @param divisor : Divisor
- * @return remainder
+ * In-line functions for mod and divide of 64-bit dividend and 32-bit
+ * divisor. Assumes a check for a divisor of zero is not possible.
+ *
+ * @param dividend: Dividend
+ * @param divisor: Divisor
+ * @return remainder
*/
-#define mega_mod64(dividend, divisor) ({ \
+#define mega_mod64(dividend, divisor) ({ \
int remainder; \
remainder = ((u_int64_t) (dividend)) % (u_int32_t) (divisor); \
remainder;})
-#define mega_div64_32(dividend, divisor) ({ \
+#define mega_div64_32(dividend, divisor) ({ \
int quotient; \
quotient = ((u_int64_t) (dividend)) / (u_int32_t) (divisor); \
quotient;})
@@ -170,1282 +183,1336 @@ quotient;})
/*
* Various RAID map access functions. These functions access the various
- * parts of the RAID map and returns the appropriate parameters.
+ * parts of the RAID map and returns the appropriate parameters.
*/
-MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
+MR_LD_RAID *
+MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map)
{
- return (&map->raidMap.ldSpanMap[ld].ldRaid);
+ return (&map->raidMap.ldSpanMap[ld].ldRaid);
}
-u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
+u_int16_t
+MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map)
{
- return (map->raidMap.ldSpanMap[ld].ldRaid.targetId);
+ return (map->raidMap.ldSpanMap[ld].ldRaid.targetId);
}
-static u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_FW_RAID_MAP_ALL *map)
+static u_int16_t
+MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map)
{
- return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef;
+ return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef;
}
-static u_int8_t MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_FW_RAID_MAP_ALL *map)
+static u_int8_t
+MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_DRV_RAID_MAP_ALL * map)
{
- return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
+ return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
}
-static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_FW_RAID_MAP_ALL *map)
+static u_int16_t
+MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map)
{
- return map->raidMap.devHndlInfo[pd].curDevHdl;
+ return map->raidMap.devHndlInfo[pd].curDevHdl;
}
-static u_int16_t MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_FW_RAID_MAP_ALL *map)
+static u_int16_t
+MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_DRV_RAID_MAP_ALL * map)
{
- return map->raidMap.arMapInfo[ar].pd[arm];
+ return map->raidMap.arMapInfo[ar].pd[arm];
}
-static MR_LD_SPAN *MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_FW_RAID_MAP_ALL *map)
+static MR_LD_SPAN *
+MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map)
{
- return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
+ return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
}
-static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
+static MR_SPAN_BLOCK_INFO *
+MR_LdSpanInfoGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map)
{
- return &map->raidMap.ldSpanMap[ld].spanBlock[0];
+ return &map->raidMap.ldSpanMap[ld].spanBlock[0];
}
-u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map)
+u_int16_t
+MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map)
{
- return map->raidMap.ldTgtIdToLd[ldTgtId];
+ return map->raidMap.ldTgtIdToLd[ldTgtId];
}
-u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map)
+u_int32_t
+MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map)
{
- MR_LD_RAID *raid;
- u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE;
+ MR_LD_RAID *raid;
+ u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE;
+
+ ld = MR_TargetIdToLdGet(ldTgtId, map);
+
+ /*
+ * Check if logical drive was removed.
+ */
+ if (ld >= MAX_LOGICAL_DRIVES)
+ return ldBlockSize;
- ld = MR_TargetIdToLdGet(ldTgtId, map);
+ raid = MR_LdRaidGet(ld, map);
+ ldBlockSize = raid->logicalBlockLength;
+ if (!ldBlockSize)
+ ldBlockSize = MRSAS_SCSIBLOCKSIZE;
- /*
- * Check if logical drive was removed.
- */
- if (ld >= MAX_LOGICAL_DRIVES)
- return ldBlockSize;
+ return ldBlockSize;
+}
- raid = MR_LdRaidGet(ld, map);
- ldBlockSize = raid->logicalBlockLength;
- if (!ldBlockSize)
- ldBlockSize = MRSAS_SCSIBLOCKSIZE;
+/*
+ * This function will Populate Driver Map using firmware raid map
+ */
+void
+MR_PopulateDrvRaidMap(struct mrsas_softc *sc)
+{
+ MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
+ MR_FW_RAID_MAP *pFwRaidMap = NULL;
+ unsigned int i;
+
+ MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)];
+ MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
+
+ if (sc->max256vdSupport) {
+ memcpy(sc->ld_drv_map[sc->map_id & 1],
+ sc->raidmap_mem[sc->map_id & 1],
+ sc->current_map_sz);
+ /*
+ * New Raid map will not set totalSize, so keep expected
+ * value for legacy code in ValidateMapInfo
+ */
+ pDrvRaidMap->totalSize = sizeof(MR_FW_RAID_MAP_EXT);
+ } else {
+ fw_map_old = (MR_FW_RAID_MAP_ALL *) sc->raidmap_mem[(sc->map_id & 1)];
+ pFwRaidMap = &fw_map_old->raidMap;
+
+#if VD_EXT_DEBUG
+ for (i = 0; i < pFwRaidMap->ldCount; i++) {
+ device_printf(sc->mrsas_dev,
+ "Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", i,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.size);
+ }
+#endif
+
+ memset(drv_map, 0, sc->drv_map_sz);
+ pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
+ pDrvRaidMap->ldCount = pFwRaidMap->ldCount;
+ pDrvRaidMap->fpPdIoTimeoutSec =
+ pFwRaidMap->fpPdIoTimeoutSec;
+
+ for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) {
+ pDrvRaidMap->ldTgtIdToLd[i] =
+ (u_int8_t)pFwRaidMap->ldTgtIdToLd[i];
+ }
+
+ for (i = 0; i < pDrvRaidMap->ldCount; i++) {
+ pDrvRaidMap->ldSpanMap[i] =
+ pFwRaidMap->ldSpanMap[i];
+
+#if VD_EXT_DEBUG
+ device_printf(sc->mrsas_dev, "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
+ "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n",
+ i, i, pFwRaidMap->ldSpanMap[i].ldRaid.targetId,
+ pFwRaidMap->ldSpanMap[i].ldRaid.seqNum,
+ (u_int32_t)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize);
+ device_printf(sc->mrsas_dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x"
+ "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i,
+ pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
+ pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
+ (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
+ device_printf(sc->mrsas_dev, "drv raid map all %p raid map %p LD RAID MAP %p/%p\n",
+ drv_map, pDrvRaidMap,
+ &pFwRaidMap->ldSpanMap[i].ldRaid, &pDrvRaidMap->ldSpanMap[i].ldRaid);
+#endif
+ }
- return ldBlockSize;
+ memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
+ sizeof(MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
+ memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
+ sizeof(MR_DEV_HANDLE_INFO) *
+ MAX_RAIDMAP_PHYSICAL_DEVICES);
+ }
}
-/**
- * MR_ValidateMapInfo: Validate RAID map
- * input: Adapter instance soft state
+/*
+ * MR_ValidateMapInfo: Validate RAID map
+ * input: Adapter instance soft state
*
- * This function checks and validates the loaded RAID map. It returns 0 if
+ * This function checks and validates the loaded RAID map. It returns 0 if
* successful, and 1 otherwise.
*/
-u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc)
+u_int8_t
+MR_ValidateMapInfo(struct mrsas_softc *sc)
{
if (!sc) {
return 1;
}
- uint32_t total_map_sz;
- MR_FW_RAID_MAP_ALL *map = sc->raidmap_mem[(sc->map_id & 1)];
- MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
- PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) &sc->log_to_span;
-
- total_map_sz = (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP) +
- (sizeof(MR_LD_SPAN_MAP) * pFwRaidMap->ldCount));
-
- if (pFwRaidMap->totalSize != total_map_sz) {
- device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", total_map_sz);
- device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP));
- device_printf(sc->mrsas_dev, "pFwRaidMap->totalSize=%x\n", pFwRaidMap->totalSize);
- return 1;
- }
-
- if (sc->UnevenSpanSupport) {
- mr_update_span_set(map, ldSpanInfo);
- }
+ MR_PopulateDrvRaidMap(sc);
+
+ MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)];
+ MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
+
+ u_int32_t expected_map_size;
+
+ drv_map = sc->ld_drv_map[(sc->map_id & 1)];
+ pDrvRaidMap = &drv_map->raidMap;
+ PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) & sc->log_to_span;
+
+ if (sc->max256vdSupport)
+ expected_map_size = sizeof(MR_FW_RAID_MAP_EXT);
+ else
+ expected_map_size =
+ (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP)) +
+ (sizeof(MR_LD_SPAN_MAP) * pDrvRaidMap->ldCount);
- mrsas_update_load_balance_params(map, sc->load_balance_info);
+ if (pDrvRaidMap->totalSize != expected_map_size) {
+ device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", expected_map_size);
+ device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP));
+ device_printf(sc->mrsas_dev, "pDrvRaidMap->totalSize=%x\n", pDrvRaidMap->totalSize);
+ return 1;
+ }
+ if (sc->UnevenSpanSupport) {
+ printf("Updating span set\n\n");
+ mr_update_span_set(drv_map, ldSpanInfo);
+ }
+ mrsas_update_load_balance_params(drv_map, sc->load_balance_info);
- return 0;
+ return 0;
}
/*
- * ******************************************************************************
- *
- * Function to print info about span set created in driver from FW raid map
- *
- * Inputs :
- * map - LD map
- * ldSpanInfo - ldSpanInfo per HBA instance
- *
- *
- * */
+ *
+ * Function to print info about span set created in driver from FW raid map
+ *
+ * Inputs: map
+ * ldSpanInfo: ld map span info per HBA instance
+ *
+ *
+ */
#if SPAN_DEBUG
-static int getSpanInfo(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
+static int
+getSpanInfo(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo)
{
- u_int8_t span;
- u_int32_t element;
- MR_LD_RAID *raid;
- LD_SPAN_SET *span_set;
- MR_QUAD_ELEMENT *quad;
- int ldCount;
- u_int16_t ld;
-
- for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
- {
- ld = MR_TargetIdToLdGet(ldCount, map);
- if (ld >= MAX_LOGICAL_DRIVES) {
- continue;
- }
- raid = MR_LdRaidGet(ld, map);
- printf("LD %x: span_depth=%x\n", ld, raid->spanDepth);
- for (span=0; span<raid->spanDepth; span++)
- printf("Span=%x, number of quads=%x\n", span,
- map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements);
- for (element=0; element < MAX_QUAD_DEPTH; element++) {
- span_set = &(ldSpanInfo[ld].span_set[element]);
- if (span_set->span_row_data_width == 0) break;
-
- printf(" Span Set %x: width=%x, diff=%x\n", element,
- (unsigned int)span_set->span_row_data_width,
- (unsigned int)span_set->diff);
- printf(" logical LBA start=0x%08lx, end=0x%08lx\n",
- (long unsigned int)span_set->log_start_lba,
- (long unsigned int)span_set->log_end_lba);
- printf(" span row start=0x%08lx, end=0x%08lx\n",
- (long unsigned int)span_set->span_row_start,
- (long unsigned int)span_set->span_row_end);
- printf(" data row start=0x%08lx, end=0x%08lx\n",
- (long unsigned int)span_set->data_row_start,
- (long unsigned int)span_set->data_row_end);
- printf(" data strip start=0x%08lx, end=0x%08lx\n",
- (long unsigned int)span_set->data_strip_start,
- (long unsigned int)span_set->data_strip_end);
-
- for (span=0; span<raid->spanDepth; span++) {
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >=element+1){
- quad = &map->raidMap.ldSpanMap[ld].
- spanBlock[span].block_span_info.
- quad[element];
- printf(" Span=%x, Quad=%x, diff=%x\n", span,
- element, quad->diff);
- printf(" offset_in_span=0x%08lx\n",
- (long unsigned int)quad->offsetInSpan);
- printf(" logical start=0x%08lx, end=0x%08lx\n",
- (long unsigned int)quad->logStart,
- (long unsigned int)quad->logEnd);
- }
- }
- }
- }
- return 0;
+ u_int8_t span;
+ u_int32_t element;
+ MR_LD_RAID *raid;
+ LD_SPAN_SET *span_set;
+ MR_QUAD_ELEMENT *quad;
+ int ldCount;
+ u_int16_t ld;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= MAX_LOGICAL_DRIVES) {
+ continue;
+ }
+ raid = MR_LdRaidGet(ld, map);
+ printf("LD %x: span_depth=%x\n", ld, raid->spanDepth);
+ for (span = 0; span < raid->spanDepth; span++)
+ printf("Span=%x, number of quads=%x\n", span,
+ map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements);
+ for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+ span_set = &(ldSpanInfo[ld].span_set[element]);
+ if (span_set->span_row_data_width == 0)
+ break;
+
+ printf("Span Set %x: width=%x, diff=%x\n", element,
+ (unsigned int)span_set->span_row_data_width,
+ (unsigned int)span_set->diff);
+ printf("logical LBA start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->log_start_lba,
+ (long unsigned int)span_set->log_end_lba);
+ printf("span row start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->span_row_start,
+ (long unsigned int)span_set->span_row_end);
+ printf("data row start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->data_row_start,
+ (long unsigned int)span_set->data_row_end);
+ printf("data strip start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->data_strip_start,
+ (long unsigned int)span_set->data_strip_end);
+
+ for (span = 0; span < raid->spanDepth; span++) {
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >= element + 1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.
+ quad[element];
+ printf("Span=%x, Quad=%x, diff=%x\n", span,
+ element, quad->diff);
+ printf("offset_in_span=0x%08lx\n",
+ (long unsigned int)quad->offsetInSpan);
+ printf("logical start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)quad->logStart,
+ (long unsigned int)quad->logEnd);
+ }
+ }
+ }
+ }
+ return 0;
}
+
#endif
/*
-******************************************************************************
-*
-* This routine calculates the Span block for given row using spanset.
-*
-* Inputs :
-* instance - HBA instance
-* ld - Logical drive number
-* row - Row number
-* map - LD map
-*
-* Outputs :
-*
-* span - Span number
-* block - Absolute Block number in the physical disk
-* div_error - Devide error code.
-*/
-
-u_int32_t mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row,
- u_int64_t *span_blk, MR_FW_RAID_MAP_ALL *map, int *div_error)
+ *
+ * This routine calculates the Span block for given row using spanset.
+ *
+ * Inputs : HBA instance
+ * ld: Logical drive number
+ * row: Row number
+ * map: LD map
+ *
+ * Outputs : span - Span number block
+ * - Absolute Block number in the physical disk
+ * div_error - Devide error code.
+ */
+
+u_int32_t
+mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row,
+ u_int64_t *span_blk, MR_DRV_RAID_MAP_ALL * map, int *div_error)
{
- MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
- LD_SPAN_SET *span_set;
- MR_QUAD_ELEMENT *quad;
- u_int32_t span, info;
- PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
-
- for (info=0; info < MAX_QUAD_DEPTH; info++) {
- span_set = &(ldSpanInfo[ld].span_set[info]);
-
- if (span_set->span_row_data_width == 0) break;
- if (row > span_set->data_row_end) continue;
-
- for (span=0; span<raid->spanDepth; span++)
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >= info+1) {
- quad = &map->raidMap.ldSpanMap[ld].
- spanBlock[span].
- block_span_info.quad[info];
- if (quad->diff == 0) {
- *div_error = 1;
- return span;
- }
- if ( quad->logStart <= row &&
- row <= quad->logEnd &&
- (mega_mod64(row - quad->logStart,
- quad->diff)) == 0 ) {
- if (span_blk != NULL) {
- u_int64_t blk;
- blk = mega_div64_32
- ((row - quad->logStart),
- quad->diff);
- blk = (blk + quad->offsetInSpan)
- << raid->stripeShift;
- *span_blk = blk;
- }
- return span;
- }
- }
- }
- return SPAN_INVALID;
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ MR_QUAD_ELEMENT *quad;
+ u_int32_t span, info;
+ PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
+
+ for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+ if (row > span_set->data_row_end)
+ continue;
+
+ for (span = 0; span < raid->spanDepth; span++)
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >= info + 1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].
+ block_span_info.quad[info];
+ if (quad->diff == 0) {
+ *div_error = 1;
+ return span;
+ }
+ if (quad->logStart <= row &&
+ row <= quad->logEnd &&
+ (mega_mod64(row - quad->logStart,
+ quad->diff)) == 0) {
+ if (span_blk != NULL) {
+ u_int64_t blk;
+
+ blk = mega_div64_32
+ ((row - quad->logStart),
+ quad->diff);
+ blk = (blk + quad->offsetInSpan)
+ << raid->stripeShift;
+ *span_blk = blk;
+ }
+ return span;
+ }
+ }
+ }
+ return SPAN_INVALID;
}
/*
-******************************************************************************
-*
-* This routine calculates the row for given strip using spanset.
-*
-* Inputs :
-* instance - HBA instance
-* ld - Logical drive number
-* Strip - Strip
-* map - LD map
-*
-* Outputs :
-*
-* row - row associated with strip
-*/
-
-static u_int64_t get_row_from_strip(struct mrsas_softc *sc,
- u_int32_t ld, u_int64_t strip, MR_FW_RAID_MAP_ALL *map)
+ *
+ * This routine calculates the row for given strip using spanset.
+ *
+ * Inputs : HBA instance
+ * ld: Logical drive number
+ * Strip: Strip
+ * map: LD map
+ *
+ * Outputs : row - row associated with strip
+ */
+
+static u_int64_t
+get_row_from_strip(struct mrsas_softc *sc,
+ u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map)
{
- MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
- LD_SPAN_SET *span_set;
- PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
- u_int32_t info, strip_offset, span, span_offset;
- u_int64_t span_set_Strip, span_set_Row;
-
- for (info=0; info < MAX_QUAD_DEPTH; info++) {
- span_set = &(ldSpanInfo[ld].span_set[info]);
-
- if (span_set->span_row_data_width == 0) break;
- if (strip > span_set->data_strip_end) continue;
-
- span_set_Strip = strip - span_set->data_strip_start;
- strip_offset = mega_mod64(span_set_Strip,
- span_set->span_row_data_width);
- span_set_Row = mega_div64_32(span_set_Strip,
- span_set->span_row_data_width) * span_set->diff;
- for (span=0,span_offset=0; span<raid->spanDepth; span++)
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >=info+1) {
- if (strip_offset >=
- span_set->strip_offset[span])
- span_offset++;
- else
- break;
- }
- mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : Strip 0x%llx, span_set_Strip 0x%llx, span_set_Row 0x%llx "
- "data width 0x%llx span offset 0x%llx\n", (unsigned long long)strip,
- (unsigned long long)span_set_Strip,
- (unsigned long long)span_set_Row,
- (unsigned long long)span_set->span_row_data_width, (unsigned long long)span_offset);
- mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : For strip 0x%llx row is 0x%llx\n", (unsigned long long)strip,
- (unsigned long long) span_set->data_row_start +
- (unsigned long long) span_set_Row + (span_offset - 1));
- return (span_set->data_row_start + span_set_Row + (span_offset - 1));
- }
- return -1LLU;
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
+ u_int32_t info, strip_offset, span, span_offset;
+ u_int64_t span_set_Strip, span_set_Row;
+
+ for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+ if (strip > span_set->data_strip_end)
+ continue;
+
+ span_set_Strip = strip - span_set->data_strip_start;
+ strip_offset = mega_mod64(span_set_Strip,
+ span_set->span_row_data_width);
+ span_set_Row = mega_div64_32(span_set_Strip,
+ span_set->span_row_data_width) * span_set->diff;
+ for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >= info + 1) {
+ if (strip_offset >=
+ span_set->strip_offset[span])
+ span_offset++;
+ else
+ break;
+ }
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : Strip 0x%llx, span_set_Strip 0x%llx, span_set_Row 0x%llx "
+ "data width 0x%llx span offset 0x%llx\n", (unsigned long long)strip,
+ (unsigned long long)span_set_Strip,
+ (unsigned long long)span_set_Row,
+ (unsigned long long)span_set->span_row_data_width, (unsigned long long)span_offset);
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : For strip 0x%llx row is 0x%llx\n", (unsigned long long)strip,
+ (unsigned long long)span_set->data_row_start +
+ (unsigned long long)span_set_Row + (span_offset - 1));
+ return (span_set->data_row_start + span_set_Row + (span_offset - 1));
+ }
+ return -1LLU;
}
/*
-******************************************************************************
-*
-* This routine calculates the Start Strip for given row using spanset.
-*
-* Inputs :
-* instance - HBA instance
-* ld - Logical drive number
-* row - Row number
-* map - LD map
-*
-* Outputs :
-*
-* Strip - Start strip associated with row
-*/
-
-static u_int64_t get_strip_from_row(struct mrsas_softc *sc,
- u_int32_t ld, u_int64_t row, MR_FW_RAID_MAP_ALL *map)
+ *
+ * This routine calculates the Start Strip for given row using spanset.
+ *
+ * Inputs: HBA instance
+ * ld: Logical drive number
+ * row: Row number
+ * map: LD map
+ *
+ * Outputs : Strip - Start strip associated with row
+ */
+
+static u_int64_t
+get_strip_from_row(struct mrsas_softc *sc,
+ u_int32_t ld, u_int64_t row, MR_DRV_RAID_MAP_ALL * map)
{
- MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
- LD_SPAN_SET *span_set;
- MR_QUAD_ELEMENT *quad;
- PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
- u_int32_t span, info;
- u_int64_t strip;
-
- for (info=0; info<MAX_QUAD_DEPTH; info++) {
- span_set = &(ldSpanInfo[ld].span_set[info]);
-
- if (span_set->span_row_data_width == 0) break;
- if (row > span_set->data_row_end) continue;
-
- for (span=0; span<raid->spanDepth; span++)
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >=info+1) {
- quad = &map->raidMap.ldSpanMap[ld].
- spanBlock[span].block_span_info.quad[info];
- if ( quad->logStart <= row &&
- row <= quad->logEnd &&
- mega_mod64((row - quad->logStart),
- quad->diff) == 0 ) {
- strip = mega_div64_32
- (((row - span_set->data_row_start)
- - quad->logStart),
- quad->diff);
- strip *= span_set->span_row_data_width;
- strip += span_set->data_strip_start;
- strip += span_set->strip_offset[span];
- return strip;
- }
- }
- }
- mrsas_dprint(sc, MRSAS_PRL11,"LSI Debug - get_strip_from_row: returns invalid "
- "strip for ld=%x, row=%lx\n", ld, (long unsigned int)row);
- return -1;
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ MR_QUAD_ELEMENT *quad;
+ PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
+ u_int32_t span, info;
+ u_int64_t strip;
+
+ for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+ if (row > span_set->data_row_end)
+ continue;
+
+ for (span = 0; span < raid->spanDepth; span++)
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >= info + 1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.quad[info];
+ if (quad->logStart <= row &&
+ row <= quad->logEnd &&
+ mega_mod64((row - quad->logStart),
+ quad->diff) == 0) {
+ strip = mega_div64_32
+ (((row - span_set->data_row_start)
+ - quad->logStart),
+ quad->diff);
+ strip *= span_set->span_row_data_width;
+ strip += span_set->data_strip_start;
+ strip += span_set->strip_offset[span];
+ return strip;
+ }
+ }
+ }
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug - get_strip_from_row: returns invalid "
+ "strip for ld=%x, row=%lx\n", ld, (long unsigned int)row);
+ return -1;
}
/*
-******************************************************************************
-*
-* This routine calculates the Physical Arm for given strip using spanset.
-*
-* Inputs :
-* instance - HBA instance
-* ld - Logical drive number
-* strip - Strip
-* map - LD map
-*
-* Outputs :
-*
-* Phys Arm - Phys Arm associated with strip
-*/
-
-static u_int32_t get_arm_from_strip(struct mrsas_softc *sc,
- u_int32_t ld, u_int64_t strip, MR_FW_RAID_MAP_ALL *map)
+ * *****************************************************************************
+ *
+ *
+ * This routine calculates the Physical Arm for given strip using spanset.
+ *
+ * Inputs : HBA instance
+ * Logical drive number
+ * Strip
+ * LD map
+ *
+ * Outputs : Phys Arm - Phys Arm associated with strip
+ */
+
+static u_int32_t
+get_arm_from_strip(struct mrsas_softc *sc,
+ u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map)
{
- MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
- LD_SPAN_SET *span_set;
- PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
- u_int32_t info, strip_offset, span, span_offset;
-
- for (info=0; info<MAX_QUAD_DEPTH; info++) {
- span_set = &(ldSpanInfo[ld].span_set[info]);
-
- if (span_set->span_row_data_width == 0) break;
- if (strip > span_set->data_strip_end) continue;
-
- strip_offset = (u_int32_t)mega_mod64
- ((strip - span_set->data_strip_start),
- span_set->span_row_data_width);
-
- for (span=0,span_offset=0; span<raid->spanDepth; span++)
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >=info+1) {
- if (strip_offset >=
- span_set->strip_offset[span])
- span_offset =
- span_set->strip_offset[span];
- else
- break;
- }
- mrsas_dprint(sc, MRSAS_PRL11, "LSI PRL11: get_arm_from_strip: "
- " for ld=0x%x strip=0x%lx arm is 0x%x\n", ld,
- (long unsigned int)strip, (strip_offset - span_offset));
- return (strip_offset - span_offset);
- }
-
- mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: - get_arm_from_strip: returns invalid arm"
- " for ld=%x strip=%lx\n", ld, (long unsigned int)strip);
-
- return -1;
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
+ u_int32_t info, strip_offset, span, span_offset;
+
+ for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+ if (strip > span_set->data_strip_end)
+ continue;
+
+ strip_offset = (u_int32_t)mega_mod64
+ ((strip - span_set->data_strip_start),
+ span_set->span_row_data_width);
+
+ for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >= info + 1) {
+ if (strip_offset >= span_set->strip_offset[span])
+ span_offset = span_set->strip_offset[span];
+ else
+ break;
+ }
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI PRL11: get_arm_from_strip: "
+ "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld,
+ (long unsigned int)strip, (strip_offset - span_offset));
+ return (strip_offset - span_offset);
+ }
+
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: - get_arm_from_strip: returns invalid arm"
+ " for ld=%x strip=%lx\n", ld, (long unsigned int)strip);
+
+ return -1;
}
/* This Function will return Phys arm */
-u_int8_t get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe,
- MR_FW_RAID_MAP_ALL *map)
+u_int8_t
+get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe,
+ MR_DRV_RAID_MAP_ALL * map)
{
- MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
- /* Need to check correct default value */
- u_int32_t arm = 0;
-
- switch (raid->level) {
- case 0:
- case 5:
- case 6:
- arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
- break;
- case 1:
- // start with logical arm
- arm = get_arm_from_strip(sc, ld, stripe, map);
- arm *= 2;
- break;
-
- }
-
- return arm;
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+
+ /* Need to check correct default value */
+ u_int32_t arm = 0;
+
+ switch (raid->level) {
+ case 0:
+ case 5:
+ case 6:
+ arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
+ break;
+ case 1:
+ /* start with logical arm */
+ arm = get_arm_from_strip(sc, ld, stripe, map);
+ arm *= 2;
+ break;
+ }
+
+ return arm;
}
/*
-******************************************************************************
-*
-* This routine calculates the arm, span and block for the specified stripe and
-* reference in stripe using spanset
-*
-* Inputs :
-*
-* ld - Logical drive number
-* stripRow - Stripe number
-* stripRef - Reference in stripe
-*
-* Outputs :
-*
-* span - Span number
-* block - Absolute Block number in the physical disk
-*/
-static u_int8_t mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow,
- u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
- RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
+ *
+ * This routine calculates the arm, span and block for the specified stripe and
+ * reference in stripe using spanset
+ *
+ * Inputs : Logical drive number
+ * stripRow: Stripe number
+ * stripRef: Reference in stripe
+ *
+ * Outputs : span - Span number block - Absolute Block
+ * number in the physical disk
+ */
+static u_int8_t
+mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow,
+ u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map)
{
- MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
- u_int32_t pd, arRef;
- u_int8_t physArm, span;
- u_int64_t row;
- u_int8_t retval = TRUE;
- u_int64_t *pdBlock = &io_info->pdBlock;
- u_int16_t *pDevHandle = &io_info->devHandle;
- u_int32_t logArm, rowMod, armQ, arm;
- u_int8_t do_invader = 0;
-
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
- do_invader = 1;
-
- // Get row and span from io_info for Uneven Span IO.
- row = io_info->start_row;
- span = io_info->start_span;
-
-
- if (raid->level == 6) {
- logArm = get_arm_from_strip(sc, ld, stripRow, map);
- rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
- armQ = SPAN_ROW_SIZE(map,ld,span) - 1 - rowMod;
- arm = armQ + 1 + logArm;
- if (arm >= SPAN_ROW_SIZE(map, ld, span))
- arm -= SPAN_ROW_SIZE(map ,ld ,span);
- physArm = (u_int8_t)arm;
- } else
- // Calculate the arm
- physArm = get_arm(sc, ld, span, stripRow, map);
-
-
- arRef = MR_LdSpanArrayGet(ld, span, map);
- pd = MR_ArPdGet(arRef, physArm, map);
-
- if (pd != MR_PD_INVALID)
- *pDevHandle = MR_PdDevHandleGet(pd, map);
- else {
- *pDevHandle = MR_PD_INVALID;
- if ((raid->level >= 5) && ((!do_invader) || (do_invader &&
- raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
- pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
- else if (raid->level == 1) {
- pd = MR_ArPdGet(arRef, physArm + 1, map);
- if (pd != MR_PD_INVALID)
- *pDevHandle = MR_PdDevHandleGet(pd, map);
- }
- }
-
- *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
- pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
- return retval;
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u_int32_t pd, arRef;
+ u_int8_t physArm, span;
+ u_int64_t row;
+ u_int8_t retval = TRUE;
+ u_int64_t *pdBlock = &io_info->pdBlock;
+ u_int16_t *pDevHandle = &io_info->devHandle;
+ u_int32_t logArm, rowMod, armQ, arm;
+ u_int8_t do_invader = 0;
+
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ do_invader = 1;
+
+ /* Get row and span from io_info for Uneven Span IO. */
+ row = io_info->start_row;
+ span = io_info->start_span;
+
+
+ if (raid->level == 6) {
+ logArm = get_arm_from_strip(sc, ld, stripRow, map);
+ rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
+ armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
+ arm = armQ + 1 + logArm;
+ if (arm >= SPAN_ROW_SIZE(map, ld, span))
+ arm -= SPAN_ROW_SIZE(map, ld, span);
+ physArm = (u_int8_t)arm;
+ } else
+ /* Calculate the arm */
+ physArm = get_arm(sc, ld, span, stripRow, map);
+
+
+ arRef = MR_LdSpanArrayGet(ld, span, map);
+ pd = MR_ArPdGet(arRef, physArm, map);
+
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ else {
+ *pDevHandle = MR_PD_INVALID;
+ if ((raid->level >= 5) && ((!do_invader) || (do_invader &&
+ raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
+ pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+ else if (raid->level == 1) {
+ pd = MR_ArPdGet(arRef, physArm + 1, map);
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ }
+ }
+
+ *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+ pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+ return retval;
}
-/**
-* MR_BuildRaidContext: Set up Fast path RAID context
-*
-* This function will initiate command processing. The start/end row
-* and strip information is calculated then the lock is acquired.
-* This function will return 0 if region lock was acquired OR return
-* num strips.
-*/
-u_int8_t
+/*
+ * MR_BuildRaidContext: Set up Fast path RAID context
+ *
+ * This function will initiate command processing. The start/end row and strip
+ * information is calculated then the lock is acquired. This function will
+ * return 0 if region lock was acquired OR return num strips.
+ */
+u_int8_t
MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info,
- RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
+ RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map)
{
- MR_LD_RAID *raid;
- u_int32_t ld, stripSize, stripe_mask;
- u_int64_t endLba, endStrip, endRow, start_row, start_strip;
- REGION_KEY regStart;
- REGION_LEN regSize;
- u_int8_t num_strips, numRows;
- u_int16_t ref_in_start_stripe, ref_in_end_stripe;
- u_int64_t ldStartBlock;
- u_int32_t numBlocks, ldTgtId;
- u_int8_t isRead, stripIdx;
- u_int8_t retval = 0;
+ MR_LD_RAID *raid;
+ u_int32_t ld, stripSize, stripe_mask;
+ u_int64_t endLba, endStrip, endRow, start_row, start_strip;
+ REGION_KEY regStart;
+ REGION_LEN regSize;
+ u_int8_t num_strips, numRows;
+ u_int16_t ref_in_start_stripe, ref_in_end_stripe;
+ u_int64_t ldStartBlock;
+ u_int32_t numBlocks, ldTgtId;
+ u_int8_t isRead, stripIdx;
+ u_int8_t retval = 0;
u_int8_t startlba_span = SPAN_INVALID;
- u_int64_t *pdBlock = &io_info->pdBlock;
- int error_code = 0;
-
- ldStartBlock = io_info->ldStartBlock;
- numBlocks = io_info->numBlocks;
- ldTgtId = io_info->ldTgtId;
- isRead = io_info->isRead;
-
+ u_int64_t *pdBlock = &io_info->pdBlock;
+ int error_code = 0;
+
+ ldStartBlock = io_info->ldStartBlock;
+ numBlocks = io_info->numBlocks;
+ ldTgtId = io_info->ldTgtId;
+ isRead = io_info->isRead;
+
io_info->IoforUnevenSpan = 0;
- io_info->start_span = SPAN_INVALID;
-
- ld = MR_TargetIdToLdGet(ldTgtId, map);
- raid = MR_LdRaidGet(ld, map);
-
- /*
- * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
- * return FALSE
- */
- if (raid->rowDataSize == 0) {
- if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
- return FALSE;
- else if (sc->UnevenSpanSupport) {
- io_info->IoforUnevenSpan = 1;
- }
- else {
- mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: raid->rowDataSize is 0, but has SPAN[0] rowDataSize = 0x%0x,"
- " but there is _NO_ UnevenSpanSupport\n",
- MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
- return FALSE;
- }
+ io_info->start_span = SPAN_INVALID;
+
+ ld = MR_TargetIdToLdGet(ldTgtId, map);
+ raid = MR_LdRaidGet(ld, map);
+
+ if (raid->rowDataSize == 0) {
+ if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
+ return FALSE;
+ else if (sc->UnevenSpanSupport) {
+ io_info->IoforUnevenSpan = 1;
+ } else {
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: raid->rowDataSize is 0, but has SPAN[0] rowDataSize = 0x%0x,"
+ " but there is _NO_ UnevenSpanSupport\n",
+ MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
+ return FALSE;
+ }
+ }
+ stripSize = 1 << raid->stripeShift;
+ stripe_mask = stripSize - 1;
+ /*
+ * calculate starting row and stripe, and number of strips and rows
+ */
+ start_strip = ldStartBlock >> raid->stripeShift;
+ ref_in_start_stripe = (u_int16_t)(ldStartBlock & stripe_mask);
+ endLba = ldStartBlock + numBlocks - 1;
+ ref_in_end_stripe = (u_int16_t)(endLba & stripe_mask);
+ endStrip = endLba >> raid->stripeShift;
+ num_strips = (u_int8_t)(endStrip - start_strip + 1); /* End strip */
+ if (io_info->IoforUnevenSpan) {
+ start_row = get_row_from_strip(sc, ld, start_strip, map);
+ endRow = get_row_from_strip(sc, ld, endStrip, map);
+ if (raid->spanDepth == 1) {
+ startlba_span = 0;
+ *pdBlock = start_row << raid->stripeShift;
+ } else {
+ startlba_span = (u_int8_t)mr_spanset_get_span_block(sc, ld, start_row,
+ pdBlock, map, &error_code);
+ if (error_code == 1) {
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: return from %s %d. Send IO w/o region lock.\n",
+ __func__, __LINE__);
+ return FALSE;
+ }
+ }
+ if (startlba_span == SPAN_INVALID) {
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: return from %s %d for row 0x%llx,"
+ "start strip %llx endSrip %llx\n", __func__,
+ __LINE__, (unsigned long long)start_row,
+ (unsigned long long)start_strip,
+ (unsigned long long)endStrip);
+ return FALSE;
+ }
+ io_info->start_span = startlba_span;
+ io_info->start_row = start_row;
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: Check Span number from %s %d for row 0x%llx, "
+ " start strip 0x%llx endSrip 0x%llx span 0x%x\n",
+ __func__, __LINE__, (unsigned long long)start_row,
+ (unsigned long long)start_strip,
+ (unsigned long long)endStrip, startlba_span);
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : 1. start_row 0x%llx endRow 0x%llx Start span 0x%x\n",
+ (unsigned long long)start_row, (unsigned long long)endRow, startlba_span);
+ } else {
+ start_row = mega_div64_32(start_strip, raid->rowDataSize);
+ endRow = mega_div64_32(endStrip, raid->rowDataSize);
}
- stripSize = 1 << raid->stripeShift;
- stripe_mask = stripSize-1;
- /*
- * calculate starting row and stripe, and number of strips and rows
- */
- start_strip = ldStartBlock >> raid->stripeShift;
- ref_in_start_stripe = (u_int16_t)(ldStartBlock & stripe_mask);
- endLba = ldStartBlock + numBlocks - 1;
- ref_in_end_stripe = (u_int16_t)(endLba & stripe_mask);
- endStrip = endLba >> raid->stripeShift;
- num_strips = (u_int8_t)(endStrip - start_strip + 1); // End strip
- if (io_info->IoforUnevenSpan) {
- start_row = get_row_from_strip(sc, ld, start_strip, map);
- endRow = get_row_from_strip(sc, ld, endStrip, map);
- if (raid->spanDepth == 1) {
- startlba_span = 0;
- *pdBlock = start_row << raid->stripeShift;
- } else {
- startlba_span = (u_int8_t)mr_spanset_get_span_block(sc, ld, start_row,
- pdBlock, map, &error_code);
- if (error_code == 1) {
- mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: return from %s %d. Send IO w/o region lock.\n",
- __func__, __LINE__);
- return FALSE;
- }
- }
- if (startlba_span == SPAN_INVALID) {
- mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: return from %s %d for row 0x%llx,"
- "start strip %llx endSrip %llx\n", __func__,
- __LINE__, (unsigned long long)start_row,
- (unsigned long long)start_strip,
- (unsigned long long)endStrip);
- return FALSE;
- }
- io_info->start_span = startlba_span;
- io_info->start_row = start_row;
- mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: Check Span number from %s %d for row 0x%llx, "
- " start strip 0x%llx endSrip 0x%llx span 0x%x\n",
- __func__, __LINE__, (unsigned long long)start_row,
- (unsigned long long)start_strip,
- (unsigned long long)endStrip, startlba_span);
- mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : 1. start_row 0x%llx endRow 0x%llx Start span 0x%x\n",
- (unsigned long long)start_row, (unsigned long long)endRow, startlba_span);
- } else {
- start_row = mega_div64_32(start_strip, raid->rowDataSize); // Start Row
- endRow = mega_div64_32(endStrip, raid->rowDataSize);
- }
-
- numRows = (u_int8_t)(endRow - start_row + 1); // get the row count
-
- /*
- * Calculate region info. (Assume region at start of first row, and
- * assume this IO needs the full row - will adjust if not true.)
- */
- regStart = start_row << raid->stripeShift;
- regSize = stripSize;
-
- /* Check if we can send this I/O via FastPath */
- if (raid->capability.fpCapable) {
- if (isRead)
- io_info->fpOkForIo = (raid->capability.fpReadCapable &&
- ((num_strips == 1) ||
- raid->capability.
- fpReadAcrossStripe));
- else
- io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
- ((num_strips == 1) ||
- raid->capability.
- fpWriteAcrossStripe));
- }
- else
- io_info->fpOkForIo = FALSE;
-
- if (numRows == 1) {
- if (num_strips == 1) {
- /* single-strip IOs can always lock only the data needed,
- multi-strip IOs always need to full stripe locked */
- regStart += ref_in_start_stripe;
- regSize = numBlocks;
- }
- }
- else if (io_info->IoforUnevenSpan == 0){
- // For Even span region lock optimization.
- // If the start strip is the last in the start row
- if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
- regStart += ref_in_start_stripe;
- // initialize count to sectors from startRef to end of strip
- regSize = stripSize - ref_in_start_stripe;
- }
- // add complete rows in the middle of the transfer
+
+ numRows = (u_int8_t)(endRow - start_row + 1); /* get the row count */
+
+ /*
+ * Calculate region info. (Assume region at start of first row, and
+ * assume this IO needs the full row - will adjust if not true.)
+ */
+ regStart = start_row << raid->stripeShift;
+ regSize = stripSize;
+
+ /* Check if we can send this I/O via FastPath */
+ if (raid->capability.fpCapable) {
+ if (isRead)
+ io_info->fpOkForIo = (raid->capability.fpReadCapable &&
+ ((num_strips == 1) ||
+ raid->capability.fpReadAcrossStripe));
+ else
+ io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
+ ((num_strips == 1) ||
+ raid->capability.fpWriteAcrossStripe));
+ } else
+ io_info->fpOkForIo = FALSE;
+
+ if (numRows == 1) {
+ if (num_strips == 1) {
+ regStart += ref_in_start_stripe;
+ regSize = numBlocks;
+ }
+ } else if (io_info->IoforUnevenSpan == 0) {
+ /*
+ * For Even span region lock optimization. If the start strip
+ * is the last in the start row
+ */
+ if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
+ regStart += ref_in_start_stripe;
+ /*
+ * initialize count to sectors from startRef to end
+ * of strip
+ */
+ regSize = stripSize - ref_in_start_stripe;
+ }
+ /* add complete rows in the middle of the transfer */
if (numRows > 2)
- regSize += (numRows-2) << raid->stripeShift;
-
- // if IO ends within first strip of last row
- if (endStrip == endRow*raid->rowDataSize)
- regSize += ref_in_end_stripe+1;
- else
- regSize += stripSize;
- } else {
- //For Uneven span region lock optimization.
- // If the start strip is the last in the start row
- if (start_strip == (get_strip_from_row(sc, ld, start_row, map) +
- SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
- regStart += ref_in_start_stripe;
- // initialize count to sectors from startRef to end of strip
+ regSize += (numRows - 2) << raid->stripeShift;
+
+ /* if IO ends within first strip of last row */
+ if (endStrip == endRow * raid->rowDataSize)
+ regSize += ref_in_end_stripe + 1;
+ else
+ regSize += stripSize;
+ } else {
+ if (start_strip == (get_strip_from_row(sc, ld, start_row, map) +
+ SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
+ regStart += ref_in_start_stripe;
+ /*
+ * initialize count to sectors from startRef to end
+ * of strip
+ */
regSize = stripSize - ref_in_start_stripe;
- }
- // add complete rows in the middle of the transfer
- if (numRows > 2)
- regSize += (numRows-2) << raid->stripeShift;
-
- // if IO ends within first strip of last row
- if (endStrip == get_strip_from_row(sc, ld, endRow, map))
- regSize += ref_in_end_stripe+1;
- else
- regSize += stripSize;
- }
- pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
- pRAID_Context->regLockFlags = (isRead)? raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
- else
- pRAID_Context->regLockFlags = (isRead)? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
- pRAID_Context->VirtualDiskTgtId = raid->targetId;
- pRAID_Context->regLockRowLBA = regStart;
- pRAID_Context->regLockLength = regSize;
- pRAID_Context->configSeqNum = raid->seqNum;
-
- /*
- * Get Phy Params only if FP capable, or else leave it to MR firmware
- * to do the calculation.
- */
- if (io_info->fpOkForIo) {
- retval = io_info->IoforUnevenSpan ?
- mr_spanset_get_phy_params(sc, ld,
- start_strip, ref_in_start_stripe, io_info,
- pRAID_Context, map) :
- MR_GetPhyParams(sc, ld, start_strip,
- ref_in_start_stripe, io_info, pRAID_Context, map);
- /* If IO on an invalid Pd, then FP is not possible */
- if (io_info->devHandle == MR_PD_INVALID)
- io_info->fpOkForIo = FALSE;
- return retval;
- }
- else if (isRead) {
- for (stripIdx=0; stripIdx<num_strips; stripIdx++) {
- retval = io_info->IoforUnevenSpan ?
- mr_spanset_get_phy_params(sc, ld,
- start_strip + stripIdx,
- ref_in_start_stripe, io_info,
- pRAID_Context, map) :
- MR_GetPhyParams(sc, ld,
- start_strip + stripIdx, ref_in_start_stripe,
- io_info, pRAID_Context, map);
- if (!retval)
- return TRUE;
- }
- }
+ }
+ /* add complete rows in the middle of the transfer */
+ if (numRows > 2)
+ regSize += (numRows - 2) << raid->stripeShift;
+
+ /* if IO ends within first strip of last row */
+ if (endStrip == get_strip_from_row(sc, ld, endRow, map))
+ regSize += ref_in_end_stripe + 1;
+ else
+ regSize += stripSize;
+ }
+ pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
+ else
+ pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
+ pRAID_Context->VirtualDiskTgtId = raid->targetId;
+ pRAID_Context->regLockRowLBA = regStart;
+ pRAID_Context->regLockLength = regSize;
+ pRAID_Context->configSeqNum = raid->seqNum;
+
+ /*
+ * Get Phy Params only if FP capable, or else leave it to MR firmware
+ * to do the calculation.
+ */
+ if (io_info->fpOkForIo) {
+ retval = io_info->IoforUnevenSpan ?
+ mr_spanset_get_phy_params(sc, ld, start_strip,
+ ref_in_start_stripe, io_info, pRAID_Context, map) :
+ MR_GetPhyParams(sc, ld, start_strip,
+ ref_in_start_stripe, io_info, pRAID_Context, map);
+ /* If IO on an invalid Pd, then FP is not possible */
+ if (io_info->devHandle == MR_PD_INVALID)
+ io_info->fpOkForIo = FALSE;
+ return retval;
+ } else if (isRead) {
+ for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
+ retval = io_info->IoforUnevenSpan ?
+ mr_spanset_get_phy_params(sc, ld, start_strip + stripIdx,
+ ref_in_start_stripe, io_info, pRAID_Context, map) :
+ MR_GetPhyParams(sc, ld, start_strip + stripIdx,
+ ref_in_start_stripe, io_info, pRAID_Context, map);
+ if (!retval)
+ return TRUE;
+ }
+ }
#if SPAN_DEBUG
- // Just for testing what arm we get for strip.
- get_arm_from_strip(sc, ld, start_strip, map);
+ /* Just for testing what arm we get for strip. */
+ get_arm_from_strip(sc, ld, start_strip, map);
#endif
- return TRUE;
+ return TRUE;
}
/*
-******************************************************************************
-*
-* This routine pepare spanset info from Valid Raid map and store it into
-* local copy of ldSpanInfo per instance data structure.
-*
-* Inputs :
-* map - LD map
-* ldSpanInfo - ldSpanInfo per HBA instance
-*
-*/
-void mr_update_span_set(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
+ *
+ * This routine pepare spanset info from Valid Raid map and store it into local
+ * copy of ldSpanInfo per instance data structure.
+ *
+ * Inputs : LD map
+ * ldSpanInfo per HBA instance
+ *
+ */
+void
+mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo)
{
- u_int8_t span,count;
- u_int32_t element,span_row_width;
- u_int64_t span_row;
- MR_LD_RAID *raid;
- LD_SPAN_SET *span_set, *span_set_prev;
- MR_QUAD_ELEMENT *quad;
- int ldCount;
- u_int16_t ld;
-
- if (!ldSpanInfo)
- return;
-
- for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
- {
- ld = MR_TargetIdToLdGet(ldCount, map);
- if (ld >= MAX_LOGICAL_DRIVES)
- continue;
- raid = MR_LdRaidGet(ld, map);
- for (element=0; element < MAX_QUAD_DEPTH; element++) {
- for (span=0; span < raid->spanDepth; span++) {
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements < element+1)
- continue;
- // TO-DO
- span_set = &(ldSpanInfo[ld].span_set[element]);
- quad = &map->raidMap.ldSpanMap[ld].
- spanBlock[span].block_span_info.
- quad[element];
-
- span_set->diff = quad->diff;
-
- for (count=0,span_row_width=0;
- count<raid->spanDepth; count++) {
- if (map->raidMap.ldSpanMap[ld].
- spanBlock[count].
- block_span_info.
- noElements >=element+1) {
- span_set->strip_offset[count] =
- span_row_width;
- span_row_width +=
- MR_LdSpanPtrGet
- (ld, count, map)->spanRowDataSize;
+ u_int8_t span, count;
+ u_int32_t element, span_row_width;
+ u_int64_t span_row;
+ MR_LD_RAID *raid;
+ LD_SPAN_SET *span_set, *span_set_prev;
+ MR_QUAD_ELEMENT *quad;
+ int ldCount;
+ u_int16_t ld;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= MAX_LOGICAL_DRIVES)
+ continue;
+ raid = MR_LdRaidGet(ld, map);
+ for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+ for (span = 0; span < raid->spanDepth; span++) {
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements < element + 1)
+ continue;
+ /* TO-DO */
+ span_set = &(ldSpanInfo[ld].span_set[element]);
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.quad[element];
+
+ span_set->diff = quad->diff;
+
+ for (count = 0, span_row_width = 0;
+ count < raid->spanDepth; count++) {
+ if (map->raidMap.ldSpanMap[ld].spanBlock[count].
+ block_span_info.noElements >= element + 1) {
+ span_set->strip_offset[count] = span_row_width;
+ span_row_width +=
+ MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize;
#if SPAN_DEBUG
- printf("LSI Debug span %x rowDataSize %x\n",
- count, MR_LdSpanPtrGet
- (ld, count, map)->spanRowDataSize);
+ printf("LSI Debug span %x rowDataSize %x\n", count,
+ MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize);
#endif
- }
- }
-
- span_set->span_row_data_width = span_row_width;
- span_row = mega_div64_32(((quad->logEnd -
- quad->logStart) + quad->diff), quad->diff);
-
- if (element == 0) {
- span_set->log_start_lba = 0;
- span_set->log_end_lba =
- ((span_row << raid->stripeShift) * span_row_width) - 1;
-
- span_set->span_row_start = 0;
- span_set->span_row_end = span_row - 1;
-
- span_set->data_strip_start = 0;
- span_set->data_strip_end =
- (span_row * span_row_width) - 1;
-
- span_set->data_row_start = 0;
- span_set->data_row_end =
- (span_row * quad->diff) - 1;
- } else {
- span_set_prev = &(ldSpanInfo[ld].
- span_set[element - 1]);
- span_set->log_start_lba =
- span_set_prev->log_end_lba + 1;
- span_set->log_end_lba =
- span_set->log_start_lba +
- ((span_row << raid->stripeShift) * span_row_width) - 1;
-
- span_set->span_row_start =
- span_set_prev->span_row_end + 1;
- span_set->span_row_end =
- span_set->span_row_start + span_row - 1;
-
- span_set->data_strip_start =
- span_set_prev->data_strip_end + 1;
- span_set->data_strip_end =
- span_set->data_strip_start +
- (span_row * span_row_width) - 1;
-
- span_set->data_row_start =
- span_set_prev->data_row_end + 1;
- span_set->data_row_end =
- span_set->data_row_start +
- (span_row * quad->diff) - 1;
- }
- break;
- }
- if (span == raid->spanDepth) break; // no quads remain
- }
- }
+ }
+ }
+
+ span_set->span_row_data_width = span_row_width;
+ span_row = mega_div64_32(((quad->logEnd -
+ quad->logStart) + quad->diff), quad->diff);
+
+ if (element == 0) {
+ span_set->log_start_lba = 0;
+ span_set->log_end_lba =
+ ((span_row << raid->stripeShift) * span_row_width) - 1;
+
+ span_set->span_row_start = 0;
+ span_set->span_row_end = span_row - 1;
+
+ span_set->data_strip_start = 0;
+ span_set->data_strip_end = (span_row * span_row_width) - 1;
+
+ span_set->data_row_start = 0;
+ span_set->data_row_end = (span_row * quad->diff) - 1;
+ } else {
+ span_set_prev = &(ldSpanInfo[ld].span_set[element - 1]);
+ span_set->log_start_lba = span_set_prev->log_end_lba + 1;
+ span_set->log_end_lba = span_set->log_start_lba +
+ ((span_row << raid->stripeShift) * span_row_width) - 1;
+
+ span_set->span_row_start = span_set_prev->span_row_end + 1;
+ span_set->span_row_end =
+ span_set->span_row_start + span_row - 1;
+
+ span_set->data_strip_start =
+ span_set_prev->data_strip_end + 1;
+ span_set->data_strip_end = span_set->data_strip_start +
+ (span_row * span_row_width) - 1;
+
+ span_set->data_row_start = span_set_prev->data_row_end + 1;
+ span_set->data_row_end = span_set->data_row_start +
+ (span_row * quad->diff) - 1;
+ }
+ break;
+ }
+ if (span == raid->spanDepth)
+ break; /* no quads remain */
+ }
+ }
#if SPAN_DEBUG
- getSpanInfo(map, ldSpanInfo); //to get span set info
+ getSpanInfo(map, ldSpanInfo); /* to get span set info */
#endif
}
-/**
- * mrsas_update_load_balance_params: Update load balance parmas
- * Inputs: map pointer
- * Load balance info
- * io_info pointer
+/*
+ * mrsas_update_load_balance_params: Update load balance parmas
+ * Inputs: map pointer
+ * Load balance info
*
- * This function updates the load balance parameters for the LD config
- * of a two drive optimal RAID-1.
+ * This function updates the load balance parameters for the LD config of a two
+ * drive optimal RAID-1.
*/
-void mrsas_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
- PLD_LOAD_BALANCE_INFO lbInfo)
+void
+mrsas_update_load_balance_params(MR_DRV_RAID_MAP_ALL * map,
+ PLD_LOAD_BALANCE_INFO lbInfo)
{
- int ldCount;
- u_int16_t ld;
- u_int32_t pd, arRef;
- MR_LD_RAID *raid;
-
- for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
- {
- ld = MR_TargetIdToLdGet(ldCount, map);
- if (ld >= MAX_LOGICAL_DRIVES) {
- lbInfo[ldCount].loadBalanceFlag = 0;
- continue;
- }
-
- raid = MR_LdRaidGet(ld, map);
-
- /* Two drive Optimal RAID 1 */
- if ((raid->level == 1) && (raid->rowSize == 2) &&
- (raid->spanDepth == 1)
- && raid->ldState == MR_LD_STATE_OPTIMAL) {
- lbInfo[ldCount].loadBalanceFlag = 1;
-
- /* Get the array on which this span is present */
- arRef = MR_LdSpanArrayGet(ld, 0, map);
-
- /* Get the PD */
- pd = MR_ArPdGet(arRef, 0, map);
- /* Get dev handle from PD */
- lbInfo[ldCount].raid1DevHandle[0] = MR_PdDevHandleGet(pd, map);
- pd = MR_ArPdGet(arRef, 1, map);
- lbInfo[ldCount].raid1DevHandle[1] = MR_PdDevHandleGet(pd, map);
- }
- else
- lbInfo[ldCount].loadBalanceFlag = 0;
- }
+ int ldCount;
+ u_int16_t ld;
+ u_int32_t pd, arRef;
+ MR_LD_RAID *raid;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= MAX_LOGICAL_DRIVES) {
+ lbInfo[ldCount].loadBalanceFlag = 0;
+ continue;
+ }
+ raid = MR_LdRaidGet(ld, map);
+
+ /* Two drive Optimal RAID 1 */
+ if ((raid->level == 1) && (raid->rowSize == 2) &&
+ (raid->spanDepth == 1)
+ && raid->ldState == MR_LD_STATE_OPTIMAL) {
+ lbInfo[ldCount].loadBalanceFlag = 1;
+
+ /* Get the array on which this span is present */
+ arRef = MR_LdSpanArrayGet(ld, 0, map);
+
+ /* Get the PD */
+ pd = MR_ArPdGet(arRef, 0, map);
+ /* Get dev handle from PD */
+ lbInfo[ldCount].raid1DevHandle[0] = MR_PdDevHandleGet(pd, map);
+ pd = MR_ArPdGet(arRef, 1, map);
+ lbInfo[ldCount].raid1DevHandle[1] = MR_PdDevHandleGet(pd, map);
+ } else
+ lbInfo[ldCount].loadBalanceFlag = 0;
+ }
}
-/**
- * mrsas_set_pd_lba: Sets PD LBA
- * input: io_request pointer
- * CDB length
- * io_info pointer
- * Pointer to CCB
- * Local RAID map pointer
- * Start block of IO
- * Block Size
+/*
+ * mrsas_set_pd_lba: Sets PD LBA
+ * input: io_request pointer
+ * CDB length
+ * io_info pointer
+ * Pointer to CCB
+ * Local RAID map pointer
+ * Start block of IO Block Size
*
* Used to set the PD logical block address in CDB for FP IOs.
*/
-void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request, u_int8_t cdb_len,
+void
+mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len,
struct IO_REQUEST_INFO *io_info, union ccb *ccb,
- MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
+ MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag,
u_int32_t ld_block_size)
{
- MR_LD_RAID *raid;
- u_int32_t ld;
- u_int64_t start_blk = io_info->pdBlock;
- u_int8_t *cdb = io_request->CDB.CDB32;
- u_int32_t num_blocks = io_info->numBlocks;
- u_int8_t opcode = 0, flagvals = 0, groupnum = 0, control = 0;
- struct ccb_hdr *ccb_h = &(ccb->ccb_h);
-
- /* Check if T10 PI (DIF) is enabled for this LD */
- ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
- raid = MR_LdRaidGet(ld, local_map_ptr);
- if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
- memset(cdb, 0, sizeof(io_request->CDB.CDB32));
- cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
- cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
-
- if (ccb_h->flags == CAM_DIR_OUT)
- cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
- else
- cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
- cdb[10] = MRSAS_RD_WR_PROTECT_CHECK_ALL;
-
- /* LBA */
- cdb[12] = (u_int8_t)((start_blk >> 56) & 0xff);
- cdb[13] = (u_int8_t)((start_blk >> 48) & 0xff);
- cdb[14] = (u_int8_t)((start_blk >> 40) & 0xff);
- cdb[15] = (u_int8_t)((start_blk >> 32) & 0xff);
- cdb[16] = (u_int8_t)((start_blk >> 24) & 0xff);
- cdb[17] = (u_int8_t)((start_blk >> 16) & 0xff);
- cdb[18] = (u_int8_t)((start_blk >> 8) & 0xff);
- cdb[19] = (u_int8_t)(start_blk & 0xff);
-
- /* Logical block reference tag */
- io_request->CDB.EEDP32.PrimaryReferenceTag = swap32(ref_tag);
- io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
- io_request->IoFlags = 32; /* Specify 32-byte cdb */
-
- /* Transfer length */
- cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff);
- cdb[29] = (u_int8_t)((num_blocks >> 16) & 0xff);
- cdb[30] = (u_int8_t)((num_blocks >> 8) & 0xff);
- cdb[31] = (u_int8_t)(num_blocks & 0xff);
-
- /* set SCSI IO EEDP Flags */
- if (ccb_h->flags == CAM_DIR_OUT) {
- io_request->EEDPFlags =
- MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
- }
- else {
- io_request->EEDPFlags =
- MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
- }
- io_request->Control |= (0x4 << 26);
- io_request->EEDPBlockSize = ld_block_size;
- }
- else {
- /* Some drives don't support 16/12 byte CDB's, convert to 10 */
- if (((cdb_len == 12) || (cdb_len == 16)) &&
- (start_blk <= 0xffffffff)) {
- if (cdb_len == 16) {
- opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
- flagvals = cdb[1];
- groupnum = cdb[14];
- control = cdb[15];
- }
- else {
- opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
- flagvals = cdb[1];
- groupnum = cdb[10];
- control = cdb[11];
- }
-
- memset(cdb, 0, sizeof(io_request->CDB.CDB32));
-
- cdb[0] = opcode;
- cdb[1] = flagvals;
- cdb[6] = groupnum;
- cdb[9] = control;
-
- /* Transfer length */
- cdb[8] = (u_int8_t)(num_blocks & 0xff);
- cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff);
-
- io_request->IoFlags = 10; /* Specify 10-byte cdb */
- cdb_len = 10;
- } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
- /* Convert to 16 byte CDB for large LBA's */
- switch (cdb_len) {
- case 6:
- opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
- control = cdb[5];
- break;
- case 10:
- opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
- flagvals = cdb[1];
- groupnum = cdb[6];
- control = cdb[9];
- break;
- case 12:
- opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
- flagvals = cdb[1];
- groupnum = cdb[10];
- control = cdb[11];
- break;
- }
-
- memset(cdb, 0, sizeof(io_request->CDB.CDB32));
-
- cdb[0] = opcode;
- cdb[1] = flagvals;
- cdb[14] = groupnum;
- cdb[15] = control;
-
- /* Transfer length */
- cdb[13] = (u_int8_t)(num_blocks & 0xff);
- cdb[12] = (u_int8_t)((num_blocks >> 8) & 0xff);
- cdb[11] = (u_int8_t)((num_blocks >> 16) & 0xff);
- cdb[10] = (u_int8_t)((num_blocks >> 24) & 0xff);
-
- io_request->IoFlags = 16; /* Specify 16-byte cdb */
- cdb_len = 16;
- } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
- /* convert to 10 byte CDB */
- opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
- control = cdb[5];
-
- memset(cdb, 0, sizeof(io_request->CDB.CDB32));
- cdb[0] = opcode;
- cdb[9] = control;
-
- /* Set transfer length */
- cdb[8] = (u_int8_t)(num_blocks & 0xff);
- cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff);
-
- /* Specify 10-byte cdb */
- cdb_len = 10;
+ MR_LD_RAID *raid;
+ u_int32_t ld;
+ u_int64_t start_blk = io_info->pdBlock;
+ u_int8_t *cdb = io_request->CDB.CDB32;
+ u_int32_t num_blocks = io_info->numBlocks;
+ u_int8_t opcode = 0, flagvals = 0, groupnum = 0, control = 0;
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+
+ /* Check if T10 PI (DIF) is enabled for this LD */
+ ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
+ raid = MR_LdRaidGet(ld, local_map_ptr);
+ if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+ cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
+ cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
+
+ if (ccb_h->flags == CAM_DIR_OUT)
+ cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
+ else
+ cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
+ cdb[10] = MRSAS_RD_WR_PROTECT_CHECK_ALL;
+
+ /* LBA */
+ cdb[12] = (u_int8_t)((start_blk >> 56) & 0xff);
+ cdb[13] = (u_int8_t)((start_blk >> 48) & 0xff);
+ cdb[14] = (u_int8_t)((start_blk >> 40) & 0xff);
+ cdb[15] = (u_int8_t)((start_blk >> 32) & 0xff);
+ cdb[16] = (u_int8_t)((start_blk >> 24) & 0xff);
+ cdb[17] = (u_int8_t)((start_blk >> 16) & 0xff);
+ cdb[18] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[19] = (u_int8_t)(start_blk & 0xff);
+
+ /* Logical block reference tag */
+ io_request->CDB.EEDP32.PrimaryReferenceTag = swap32(ref_tag);
+ io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
+ io_request->IoFlags = 32; /* Specify 32-byte cdb */
+
+ /* Transfer length */
+ cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff);
+ cdb[29] = (u_int8_t)((num_blocks >> 16) & 0xff);
+ cdb[30] = (u_int8_t)((num_blocks >> 8) & 0xff);
+ cdb[31] = (u_int8_t)(num_blocks & 0xff);
+
+ /* set SCSI IO EEDP Flags */
+ if (ccb_h->flags == CAM_DIR_OUT) {
+ io_request->EEDPFlags =
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+ } else {
+ io_request->EEDPFlags =
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+ }
+ io_request->Control |= (0x4 << 26);
+ io_request->EEDPBlockSize = ld_block_size;
+ } else {
+ /* Some drives don't support 16/12 byte CDB's, convert to 10 */
+ if (((cdb_len == 12) || (cdb_len == 16)) &&
+ (start_blk <= 0xffffffff)) {
+ if (cdb_len == 16) {
+ opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
+ flagvals = cdb[1];
+ groupnum = cdb[14];
+ control = cdb[15];
+ } else {
+ opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[6] = groupnum;
+ cdb[9] = control;
+
+ /* Transfer length */
+ cdb[8] = (u_int8_t)(num_blocks & 0xff);
+ cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff);
+
+ io_request->IoFlags = 10; /* Specify 10-byte cdb */
+ cdb_len = 10;
+ } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
+ /* Convert to 16 byte CDB for large LBA's */
+ switch (cdb_len) {
+ case 6:
+ opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
+ control = cdb[5];
+ break;
+ case 10:
+ opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[6];
+ control = cdb[9];
+ break;
+ case 12:
+ opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ break;
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[14] = groupnum;
+ cdb[15] = control;
+
+ /* Transfer length */
+ cdb[13] = (u_int8_t)(num_blocks & 0xff);
+ cdb[12] = (u_int8_t)((num_blocks >> 8) & 0xff);
+ cdb[11] = (u_int8_t)((num_blocks >> 16) & 0xff);
+ cdb[10] = (u_int8_t)((num_blocks >> 24) & 0xff);
+
+ io_request->IoFlags = 16; /* Specify 16-byte cdb */
+ cdb_len = 16;
+ } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
+ /* convert to 10 byte CDB */
+ opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
+ control = cdb[5];
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+ cdb[0] = opcode;
+ cdb[9] = control;
+
+ /* Set transfer length */
+ cdb[8] = (u_int8_t)(num_blocks & 0xff);
+ cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff);
+
+ /* Specify 10-byte cdb */
+ cdb_len = 10;
+ }
+ /* Fall through normal case, just load LBA here */
+ u_int8_t val = cdb[1] & 0xE0;
+ switch (cdb_len) {
+ case 6:
+ cdb[3] = (u_int8_t)(start_blk & 0xff);
+ cdb[2] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[1] = val | ((u_int8_t)(start_blk >> 16) & 0x1f);
+ break;
+ case 10:
+ cdb[5] = (u_int8_t)(start_blk & 0xff);
+ cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff);
+ cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff);
+ break;
+ case 12:
+ cdb[5] = (u_int8_t)(start_blk & 0xff);
+ cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff);
+ cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff);
+ break;
+ case 16:
+ cdb[9] = (u_int8_t)(start_blk & 0xff);
+ cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[7] = (u_int8_t)((start_blk >> 16) & 0xff);
+ cdb[6] = (u_int8_t)((start_blk >> 24) & 0xff);
+ cdb[5] = (u_int8_t)((start_blk >> 32) & 0xff);
+ cdb[4] = (u_int8_t)((start_blk >> 40) & 0xff);
+ cdb[3] = (u_int8_t)((start_blk >> 48) & 0xff);
+ cdb[2] = (u_int8_t)((start_blk >> 56) & 0xff);
+ break;
+ }
}
-
- /* Fall through normal case, just load LBA here */
- switch (cdb_len)
- {
- case 6:
- {
- u_int8_t val = cdb[1] & 0xE0;
- cdb[3] = (u_int8_t)(start_blk & 0xff);
- cdb[2] = (u_int8_t)((start_blk >> 8) & 0xff);
- cdb[1] = val | ((u_int8_t)(start_blk >> 16) & 0x1f);
- break;
- }
- case 10:
- cdb[5] = (u_int8_t)(start_blk & 0xff);
- cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff);
- cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff);
- cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff);
- break;
- case 12:
- cdb[5] = (u_int8_t)(start_blk & 0xff);
- cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff);
- cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff);
- cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff);
- break;
- case 16:
- cdb[9] = (u_int8_t)(start_blk & 0xff);
- cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff);
- cdb[7] = (u_int8_t)((start_blk >> 16) & 0xff);
- cdb[6] = (u_int8_t)((start_blk >> 24) & 0xff);
- cdb[5] = (u_int8_t)((start_blk >> 32) & 0xff);
- cdb[4] = (u_int8_t)((start_blk >> 40) & 0xff);
- cdb[3] = (u_int8_t)((start_blk >> 48) & 0xff);
- cdb[2] = (u_int8_t)((start_blk >> 56) & 0xff);
- break;
- }
- }
}
-/**
- * mrsas_get_best_arm Determine the best spindle arm
- * Inputs: Load balance info
+/*
+ * mrsas_get_best_arm: Determine the best spindle arm
+ * Inputs: Load balance info
*
* This function determines and returns the best arm by looking at the
* parameters of the last PD access.
*/
-u_int8_t mrsas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
- u_int64_t block, u_int32_t count)
+u_int8_t
+mrsas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
+ u_int64_t block, u_int32_t count)
{
- u_int16_t pend0, pend1;
- u_int64_t diff0, diff1;
- u_int8_t bestArm;
+ u_int16_t pend0, pend1;
+ u_int64_t diff0, diff1;
+ u_int8_t bestArm;
- /* get the pending cmds for the data and mirror arms */
- pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]);
- pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]);
+ /* get the pending cmds for the data and mirror arms */
+ pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]);
+ pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]);
- /* Determine the disk whose head is nearer to the req. block */
- diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
- diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
- bestArm = (diff0 <= diff1 ? 0 : 1);
+ /* Determine the disk whose head is nearer to the req. block */
+ diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
+ diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
+ bestArm = (diff0 <= diff1 ? 0 : 1);
- if ((bestArm == arm && pend0 > pend1 + 16) || (bestArm != arm && pend1 > pend0 + 16))
- bestArm ^= 1;
+ if ((bestArm == arm && pend0 > pend1 + 16) || (bestArm != arm && pend1 > pend0 + 16))
+ bestArm ^= 1;
- /* Update the last accessed block on the correct pd */
- lbInfo->last_accessed_block[bestArm] = block + count - 1;
+ /* Update the last accessed block on the correct pd */
+ lbInfo->last_accessed_block[bestArm] = block + count - 1;
- return bestArm;
+ return bestArm;
}
-/**
- * mrsas_get_updated_dev_handle Get the update dev handle
- * Inputs: Load balance info
- * io_info pointer
+/*
+ * mrsas_get_updated_dev_handle: Get the update dev handle
+ * Inputs: Load balance info io_info pointer
*
* This function determines and returns the updated dev handle.
*/
-u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
- struct IO_REQUEST_INFO *io_info)
+u_int16_t
+mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
+ struct IO_REQUEST_INFO *io_info)
{
- u_int8_t arm, old_arm;
- u_int16_t devHandle;
+ u_int8_t arm, old_arm;
+ u_int16_t devHandle;
- old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
+ old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
- /* get best new arm */
- arm = mrsas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock, io_info->numBlocks);
- devHandle = lbInfo->raid1DevHandle[arm];
- atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
+ /* get best new arm */
+ arm = mrsas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock, io_info->numBlocks);
+ devHandle = lbInfo->raid1DevHandle[arm];
+ atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
- return devHandle;
+ return devHandle;
}
-/**
- * MR_GetPhyParams Calculates arm, span, and block
- * Inputs: Adapter instance soft state
- * Logical drive number (LD)
- * Stripe number (stripRow)
- * Reference in stripe (stripRef)
- * Outputs: Span number
- * Absolute Block number in the physical disk
+/*
+ * MR_GetPhyParams: Calculates arm, span, and block
+ * Inputs: Adapter soft state
+ * Logical drive number (LD)
+ * Stripe number(stripRow)
+ * Reference in stripe (stripRef)
*
- * This routine calculates the arm, span and block for the specified stripe
- * and reference in stripe.
+ * Outputs: Absolute Block number in the physical disk
+ *
+ * This routine calculates the arm, span and block for the specified stripe and
+ * reference in stripe.
*/
-u_int8_t MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
- u_int64_t stripRow,
- u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
- RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
+u_int8_t
+MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
+ u_int64_t stripRow,
+ u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map)
{
- MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
- u_int32_t pd, arRef;
- u_int8_t physArm, span;
- u_int64_t row;
- u_int8_t retval = TRUE;
- int error_code = 0;
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u_int32_t pd, arRef;
+ u_int8_t physArm, span;
+ u_int64_t row;
+ u_int8_t retval = TRUE;
+ int error_code = 0;
u_int64_t *pdBlock = &io_info->pdBlock;
- u_int16_t *pDevHandle = &io_info->devHandle;
- u_int32_t rowMod, armQ, arm, logArm;
+ u_int16_t *pDevHandle = &io_info->devHandle;
+ u_int32_t rowMod, armQ, arm, logArm;
u_int8_t do_invader = 0;
if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
do_invader = 1;
- row = mega_div64_32(stripRow, raid->rowDataSize);
-
- if (raid->level == 6) {
- logArm = mega_mod64(stripRow, raid->rowDataSize); // logical arm within row
- if (raid->rowSize == 0)
- return FALSE;
- rowMod = mega_mod64(row, raid->rowSize); // get logical row mod
- armQ = raid->rowSize-1-rowMod; // index of Q drive
- arm = armQ+1+logArm; // data always logically follows Q
- if (arm >= raid->rowSize) // handle wrap condition
- arm -= raid->rowSize;
- physArm = (u_int8_t)arm;
- }
- else {
- if (raid->modFactor == 0)
- return FALSE;
- physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map);
- }
-
- if (raid->spanDepth == 1) {
- span = 0;
- *pdBlock = row << raid->stripeShift;
- }
- else {
- span = (u_int8_t)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
- if (error_code == 1)
- return FALSE;
- }
-
- /* Get the array on which this span is present */
- arRef = MR_LdSpanArrayGet(ld, span, map);
-
- pd = MR_ArPdGet(arRef, physArm, map); // Get the Pd.
-
- if (pd != MR_PD_INVALID)
- *pDevHandle = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd.
- else {
- *pDevHandle = MR_PD_INVALID; // set dev handle as invalid.
- if ((raid->level >= 5) && ((!do_invader) || (do_invader &&
- raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
- pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
- else if (raid->level == 1) {
- pd = MR_ArPdGet(arRef, physArm + 1, map); // Get Alternate Pd.
- if (pd != MR_PD_INVALID)
- *pDevHandle = MR_PdDevHandleGet(pd, map);//Get dev handle from Pd.
- }
- }
-
- *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
- pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
- return retval;
+ row = mega_div64_32(stripRow, raid->rowDataSize);
+
+ if (raid->level == 6) {
+ /* logical arm within row */
+ logArm = mega_mod64(stripRow, raid->rowDataSize);
+ if (raid->rowSize == 0)
+ return FALSE;
+ rowMod = mega_mod64(row, raid->rowSize); /* get logical row mod */
+ armQ = raid->rowSize - 1 - rowMod; /* index of Q drive */
+ arm = armQ + 1 + logArm;/* data always logically follows Q */
+ if (arm >= raid->rowSize) /* handle wrap condition */
+ arm -= raid->rowSize;
+ physArm = (u_int8_t)arm;
+ } else {
+ if (raid->modFactor == 0)
+ return FALSE;
+ physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map);
+ }
+
+ if (raid->spanDepth == 1) {
+ span = 0;
+ *pdBlock = row << raid->stripeShift;
+ } else {
+ span = (u_int8_t)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
+ if (error_code == 1)
+ return FALSE;
+ }
+
+ /* Get the array on which this span is present */
+ arRef = MR_LdSpanArrayGet(ld, span, map);
+
+ pd = MR_ArPdGet(arRef, physArm, map); /* Get the Pd. */
+
+ if (pd != MR_PD_INVALID)
+ /* Get dev handle from Pd */
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ else {
+ *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
+ if ((raid->level >= 5) && ((!do_invader) || (do_invader &&
+ raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
+ pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+ else if (raid->level == 1) {
+ /* Get Alternate Pd. */
+ pd = MR_ArPdGet(arRef, physArm + 1, map);
+ if (pd != MR_PD_INVALID)
+ /* Get dev handle from Pd. */
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ }
+ }
+
+ *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+ pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+ return retval;
}
-/**
- * MR_GetSpanBlock Calculates span block
- * Inputs: LD
- * row
- * PD span block
- * RAID map pointer
- * Outputs: Span number
- * Error code
+/*
+ * MR_GetSpanBlock: Calculates span block
+ * Inputs: LD
+ * row PD
+ * span block
+ * RAID map pointer
*
- * This routine calculates the span from the span block info.
+ * Outputs: Span number Error code
+ *
+ * This routine calculates the span from the span block info.
*/
-u_int32_t MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
- MR_FW_RAID_MAP_ALL *map, int *div_error)
+u_int32_t
+MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
+ MR_DRV_RAID_MAP_ALL * map, int *div_error)
{
- MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
- MR_QUAD_ELEMENT *quad;
- MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
- u_int32_t span, j;
- u_int64_t blk, debugBlk;
-
- for (span=0; span < raid->spanDepth; span++, pSpanBlock++) {
- for (j=0; j < pSpanBlock->block_span_info.noElements; j++) {
- quad = &pSpanBlock->block_span_info.quad[j];
- if (quad->diff == 0) {
- *div_error = 1;
- return span;
- }
- if (quad->logStart <= row && row <= quad->logEnd &&
- (mega_mod64(row-quad->logStart, quad->diff)) == 0) {
- if (span_blk != NULL) {
- blk = mega_div64_32((row-quad->logStart), quad->diff);
- debugBlk = blk;
- blk = (blk + quad->offsetInSpan) << raid->stripeShift;
- *span_blk = blk;
- }
- return span;
- }
- }
- }
- return span;
+ MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
+ MR_QUAD_ELEMENT *quad;
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u_int32_t span, j;
+ u_int64_t blk, debugBlk;
+
+ for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
+ for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
+ quad = &pSpanBlock->block_span_info.quad[j];
+ if (quad->diff == 0) {
+ *div_error = 1;
+ return span;
+ }
+ if (quad->logStart <= row && row <= quad->logEnd &&
+ (mega_mod64(row - quad->logStart, quad->diff)) == 0) {
+ if (span_blk != NULL) {
+ blk = mega_div64_32((row - quad->logStart), quad->diff);
+ debugBlk = blk;
+ blk = (blk + quad->offsetInSpan) << raid->stripeShift;
+ *span_blk = blk;
+ }
+ return span;
+ }
+ }
+ }
+ return span;
}
-
diff --git a/sys/dev/mrsas/mrsas_ioctl.c b/sys/dev/mrsas/mrsas_ioctl.c
index 6343aa5..8e2aead 100644
--- a/sys/dev/mrsas/mrsas_ioctl.c
+++ b/sys/dev/mrsas/mrsas_ioctl.c
@@ -1,43 +1,38 @@
/*
- * Copyright (c) 2014, LSI Corp.
- * All rights reserved.
- * Author: Marian Choy
+ * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
* Support: freebsdraid@lsi.com
*
* Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * 3. Neither the name of the <ORGANIZATION> nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer. 2. Redistributions
+ * in binary form must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution. 3. Neither the name of the
+ * <ORGANIZATION> nor the names of its contributors may be used to endorse or
+ * promote products derived from this software without specific prior written
+ * permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
- * The views and conclusions contained in the software and documentation
- * are those of the authors and should not be interpreted as representing
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing
* official policies,either expressed or implied, of the FreeBSD Project.
*
- * Send feedback to: <megaraidfbsd@lsi.com>
- * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
- * ATTN: MegaRaid FreeBSD
+ * Send feedback to: <megaraidfbsd@lsi.com> Mail to: LSI Corporation, 1621
+ * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
*
*/
@@ -47,500 +42,464 @@ __FBSDID("$FreeBSD$");
#include <dev/mrsas/mrsas.h>
#include <dev/mrsas/mrsas_ioctl.h>
-/*
- * Function prototypes
+/*
+ * Function prototypes
*/
-int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
-int mrsas_passthru(struct mrsas_softc *sc, void *arg);
-void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
-void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
-void mrsas_dump_dcmd(struct mrsas_softc *sc, struct mrsas_dcmd_frame* dcmd);
-void mrsas_dump_ioctl(struct mrsas_softc *sc, struct mrsas_iocpacket *user_ioc);
-void * mrsas_alloc_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
+int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
+void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
+void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+void *mrsas_alloc_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
static int mrsas_create_frame_pool(struct mrsas_softc *sc);
-static void mrsas_alloc_cb(void *arg, bus_dma_segment_t *segs,
- int nsegs, int error);
+static void
+mrsas_alloc_cb(void *arg, bus_dma_segment_t *segs,
+ int nsegs, int error);
-extern struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc);
+extern struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
extern void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
-extern int mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
+extern int
+mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
struct mrsas_mfi_cmd *cmd);
-
-/**
- * mrsas_dump_ioctl: Print debug output for DCMDs
- * input: Adapter instance soft state
- * DCMD frame structure
- *
- * This function is called from mrsas_passthru() to print out debug information
- * in the handling and routing of DCMD commands.
- */
-void mrsas_dump_dcmd( struct mrsas_softc *sc, struct mrsas_dcmd_frame* dcmd )
-{
- int i;
-
- device_printf(sc->mrsas_dev, "dcmd->cmd: 0x%02hhx\n", dcmd->cmd);
- device_printf(sc->mrsas_dev, "dcmd->cmd_status: 0x%02hhx\n", dcmd->cmd_status);
- device_printf(sc->mrsas_dev, "dcmd->sge_count: 0x%02hhx\n", dcmd->sge_count);
- device_printf(sc->mrsas_dev, "dcmd->context: 0x%08x\n", dcmd->context);
- device_printf(sc->mrsas_dev, "dcmd->flags: 0x%04hx\n", dcmd->flags);
- device_printf(sc->mrsas_dev, "dcmd->timeout: 0x%04hx\n", dcmd->timeout);
- device_printf(sc->mrsas_dev, "dcmd->data_xfer_len: 0x%08x\n", dcmd->data_xfer_len);
- device_printf(sc->mrsas_dev, "dcmd->opcode: 0x%08x\n", dcmd->opcode);
- device_printf(sc->mrsas_dev, "dcmd->mbox.w[0]: 0x%08x\n", dcmd->mbox.w[0]);
- device_printf(sc->mrsas_dev, "dcmd->mbox.w[1]: 0x%08x\n", dcmd->mbox.w[1]);
- device_printf(sc->mrsas_dev, "dcmd->mbox.w[2]: 0x%08x\n", dcmd->mbox.w[2]);
- for (i=0; i< MIN(MAX_IOCTL_SGE, dcmd->sge_count); i++) {
- device_printf(sc->mrsas_dev, "sgl[%02d]\n", i);
- device_printf(sc->mrsas_dev, " sge32[%02d].phys_addr: 0x%08x\n",
- i, dcmd->sgl.sge32[i].phys_addr);
- device_printf(sc->mrsas_dev, " sge32[%02d].length: 0x%08x\n",
- i, dcmd->sgl.sge32[i].length);
- device_printf(sc->mrsas_dev, " sge64[%02d].phys_addr: 0x%08llx\n",
- i, (long long unsigned int) dcmd->sgl.sge64[i].phys_addr);
- device_printf(sc->mrsas_dev, " sge64[%02d].length: 0x%08x\n",
- i, dcmd->sgl.sge64[i].length);
- }
-}
-
-/**
- * mrsas_dump_ioctl: Print debug output for ioctl
- * input: Adapter instance soft state
- * iocpacket structure
- *
- * This function is called from mrsas_passthru() to print out debug information
- * in the handling and routing of ioctl commands.
- */
-void mrsas_dump_ioctl(struct mrsas_softc *sc, struct mrsas_iocpacket *user_ioc)
-{
- union mrsas_frame *in_cmd = (union mrsas_frame *) &(user_ioc->frame.raw);
- struct mrsas_dcmd_frame* dcmd = (struct mrsas_dcmd_frame *) &(in_cmd->dcmd);
- int i;
-
- device_printf(sc->mrsas_dev,
- "====== In %s() ======================================\n", __func__);
- device_printf(sc->mrsas_dev, "host_no: 0x%04hx\n", user_ioc->host_no);
- device_printf(sc->mrsas_dev, " __pad1: 0x%04hx\n", user_ioc->__pad1);
- device_printf(sc->mrsas_dev, "sgl_off: 0x%08x\n", user_ioc->sgl_off);
- device_printf(sc->mrsas_dev, "sge_count: 0x%08x\n", user_ioc->sge_count);
- device_printf(sc->mrsas_dev, "sense_off: 0x%08x\n", user_ioc->sense_off);
- device_printf(sc->mrsas_dev, "sense_len: 0x%08x\n", user_ioc->sense_len);
-
- mrsas_dump_dcmd(sc, dcmd);
-
- for (i=0; i< MIN(MAX_IOCTL_SGE, user_ioc->sge_count); i++) {
- device_printf(sc->mrsas_dev, "sge[%02d]\n", i);
- device_printf(sc->mrsas_dev,
- " iov_base: %p\n", user_ioc->sgl[i].iov_base);
- device_printf(sc->mrsas_dev, " iov_len: %p\n",
- (void*)user_ioc->sgl[i].iov_len);
- }
- device_printf(sc->mrsas_dev,
- "==================================================================\n");
-}
-
-/**
- * mrsas_passthru: Handle pass-through commands
- * input: Adapter instance soft state
- * argument pointer
+/*
+ * mrsas_passthru: Handle pass-through commands
+ * input: Adapter instance soft state argument pointer
*
- * This function is called from mrsas_ioctl() to handle pass-through and
- * ioctl commands to Firmware.
+ * This function is called from mrsas_ioctl() to handle pass-through and ioctl
+ * commands to Firmware.
*/
-int mrsas_passthru( struct mrsas_softc *sc, void *arg )
+int
+mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd)
{
- struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
- union mrsas_frame *in_cmd = (union mrsas_frame *) &(user_ioc->frame.raw);
- struct mrsas_mfi_cmd *cmd = NULL;
- bus_dma_tag_t ioctl_data_tag[MAX_IOCTL_SGE];
- bus_dmamap_t ioctl_data_dmamap[MAX_IOCTL_SGE];
- void *ioctl_data_mem[MAX_IOCTL_SGE]; // ioctl data virtual addr
- bus_addr_t ioctl_data_phys_addr[MAX_IOCTL_SGE]; // ioctl data phys addr
- bus_dma_tag_t ioctl_sense_tag = 0;
- bus_dmamap_t ioctl_sense_dmamap = 0;
- void *ioctl_sense_mem = 0;
- bus_addr_t ioctl_sense_phys_addr = 0;
- int i, adapter, ioctl_data_size, ioctl_sense_size, ret=0;
- struct mrsas_sge32 *kern_sge32;
- unsigned long *sense_ptr;
-
- /* For debug - uncomment the following line for debug output */
- //mrsas_dump_ioctl(sc, user_ioc);
-
- /*
- * Check for NOP from MegaCli... MegaCli can issue a DCMD of 0. In this
- * case do nothing and return 0 to it as status.
- */
- if (in_cmd->dcmd.opcode == 0) {
- device_printf(sc->mrsas_dev, "In %s() Got a NOP\n", __func__);
- user_ioc->frame.hdr.cmd_status = MFI_STAT_OK;
- return (0);
- }
-
- /* Validate host_no */
- adapter = user_ioc->host_no;
- if (adapter != device_get_unit(sc->mrsas_dev)) {
- device_printf(sc->mrsas_dev, "In %s() IOCTL not for me!\n", __func__);
- return(ENOENT);
- }
-
- /* Validate SGL length */
- if (user_ioc->sge_count > MAX_IOCTL_SGE) {
- device_printf(sc->mrsas_dev, "In %s() SGL is too long (%d > 8).\n",
- __func__, user_ioc->sge_count);
- return(ENOENT);
- }
-
- /* Get a command */
- cmd = mrsas_get_mfi_cmd(sc);
- if (!cmd) {
- device_printf(sc->mrsas_dev, "Failed to get a free cmd for IOCTL\n");
- return(ENOMEM);
- }
-
- /*
- * User's IOCTL packet has 2 frames (maximum). Copy those two
- * frames into our cmd's frames. cmd->frame's context will get
- * overwritten when we copy from user's frames. So set that value
- * alone separately
- */
- memcpy(cmd->frame, user_ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
- cmd->frame->hdr.context = cmd->index;
- cmd->frame->hdr.pad_0 = 0;
- cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
- MFI_FRAME_SENSE64);
-
- /*
- * The management interface between applications and the fw uses
- * MFI frames. E.g, RAID configuration changes, LD property changes
- * etc are accomplishes through different kinds of MFI frames. The
- * driver needs to care only about substituting user buffers with
- * kernel buffers in SGLs. The location of SGL is embedded in the
- * struct iocpacket itself.
- */
- kern_sge32 = (struct mrsas_sge32 *)
- ((unsigned long)cmd->frame + user_ioc->sgl_off);
-
- /*
- * For each user buffer, create a mirror buffer and copy in
- */
- for (i=0; i < user_ioc->sge_count; i++) {
- if (!user_ioc->sgl[i].iov_len)
- continue;
- ioctl_data_size = user_ioc->sgl[i].iov_len;
- if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
- 1, 0, // algnmnt, boundary
- BUS_SPACE_MAXADDR_32BIT,// lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- ioctl_data_size, // maxsize
- 1, // msegments
- ioctl_data_size, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- NULL, NULL, // lockfunc, lockarg
- &ioctl_data_tag[i])) {
- device_printf(sc->mrsas_dev, "Cannot allocate ioctl data tag\n");
- return (ENOMEM);
- }
- if (bus_dmamem_alloc(ioctl_data_tag[i], (void **)&ioctl_data_mem[i],
- (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_data_dmamap[i])) {
- device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n");
- return (ENOMEM);
- }
- if (bus_dmamap_load(ioctl_data_tag[i], ioctl_data_dmamap[i],
- ioctl_data_mem[i], ioctl_data_size, mrsas_alloc_cb,
- &ioctl_data_phys_addr[i], BUS_DMA_NOWAIT)) {
- device_printf(sc->mrsas_dev, "Cannot load ioctl data mem\n");
- return (ENOMEM);
- }
-
- /* Save the physical address and length */
- kern_sge32[i].phys_addr = (u_int32_t)ioctl_data_phys_addr[i];
- kern_sge32[i].length = user_ioc->sgl[i].iov_len;
-
- /* Copy in data from user space */
- ret = copyin(user_ioc->sgl[i].iov_base, ioctl_data_mem[i],
- user_ioc->sgl[i].iov_len);
- if (ret) {
- device_printf(sc->mrsas_dev, "IOCTL copyin failed!\n");
- goto out;
- }
- }
-
- ioctl_sense_size = user_ioc->sense_len;
- if (user_ioc->sense_len) {
- if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
- 1, 0, // algnmnt, boundary
- BUS_SPACE_MAXADDR_32BIT,// lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- ioctl_sense_size, // maxsize
- 1, // msegments
- ioctl_sense_size, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- NULL, NULL, // lockfunc, lockarg
- &ioctl_sense_tag)) {
- device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense tag\n");
- return (ENOMEM);
- }
- if (bus_dmamem_alloc(ioctl_sense_tag, (void **)&ioctl_sense_mem,
- (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_sense_dmamap)) {
- device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n");
- return (ENOMEM);
- }
- if (bus_dmamap_load(ioctl_sense_tag, ioctl_sense_dmamap,
- ioctl_sense_mem, ioctl_sense_size, mrsas_alloc_cb,
- &ioctl_sense_phys_addr, BUS_DMA_NOWAIT)) {
- device_printf(sc->mrsas_dev, "Cannot load ioctl sense mem\n");
- return (ENOMEM);
- }
- sense_ptr =
- (unsigned long *)((unsigned long)cmd->frame + user_ioc->sense_off);
- sense_ptr = ioctl_sense_mem;
- }
-
- /*
- * Set the sync_cmd flag so that the ISR knows not to complete this
- * cmd to the SCSI mid-layer
- */
- cmd->sync_cmd = 1;
- mrsas_issue_blocked_cmd(sc, cmd);
- cmd->sync_cmd = 0;
-
- /*
- * copy out the kernel buffers to user buffers
- */
- for (i = 0; i < user_ioc->sge_count; i++) {
- ret = copyout(ioctl_data_mem[i], user_ioc->sgl[i].iov_base,
- user_ioc->sgl[i].iov_len);
- if (ret) {
- device_printf(sc->mrsas_dev, "IOCTL copyout failed!\n");
- goto out;
- }
- }
-
- /*
- * copy out the sense
- */
- if (user_ioc->sense_len) {
- /*
- * sense_buff points to the location that has the user
- * sense buffer address
- */
- sense_ptr = (unsigned long *) ((unsigned long)user_ioc->frame.raw +
- user_ioc->sense_off);
- ret = copyout(ioctl_sense_mem, (unsigned long*)*sense_ptr,
- user_ioc->sense_len);
- if (ret) {
- device_printf(sc->mrsas_dev, "IOCTL sense copyout failed!\n");
- goto out;
- }
- }
-
- /*
- * Return command status to user space
- */
- memcpy(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status,
- sizeof(u_int8_t));
+ struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
+
+#ifdef COMPAT_FREEBSD32
+ struct mrsas_iocpacket32 *user_ioc32 = (struct mrsas_iocpacket32 *)arg;
+
+#endif
+ union mrsas_frame *in_cmd = (union mrsas_frame *)&(user_ioc->frame.raw);
+ struct mrsas_mfi_cmd *cmd = NULL;
+ bus_dma_tag_t ioctl_data_tag[MAX_IOCTL_SGE];
+ bus_dmamap_t ioctl_data_dmamap[MAX_IOCTL_SGE];
+ void *ioctl_data_mem[MAX_IOCTL_SGE];
+ bus_addr_t ioctl_data_phys_addr[MAX_IOCTL_SGE];
+ bus_dma_tag_t ioctl_sense_tag = 0;
+ bus_dmamap_t ioctl_sense_dmamap = 0;
+ void *ioctl_sense_mem = 0;
+ bus_addr_t ioctl_sense_phys_addr = 0;
+ int i, ioctl_data_size = 0, ioctl_sense_size, ret = 0;
+ struct mrsas_sge32 *kern_sge32;
+ unsigned long *sense_ptr;
+ uint8_t *iov_base_ptrin = NULL;
+ size_t iov_len = 0;
+
+ /*
+ * Check for NOP from MegaCli... MegaCli can issue a DCMD of 0. In
+ * this case do nothing and return 0 to it as status.
+ */
+ if (in_cmd->dcmd.opcode == 0) {
+ device_printf(sc->mrsas_dev, "In %s() Got a NOP\n", __func__);
+ user_ioc->frame.hdr.cmd_status = MFI_STAT_OK;
+ return (0);
+ }
+ /* Validate SGL length */
+ if (user_ioc->sge_count > MAX_IOCTL_SGE) {
+ device_printf(sc->mrsas_dev, "In %s() SGL is too long (%d > 8).\n",
+ __func__, user_ioc->sge_count);
+ return (ENOENT);
+ }
+ /* Get a command */
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Failed to get a free cmd for IOCTL\n");
+ return (ENOMEM);
+ }
+ /*
+ * User's IOCTL packet has 2 frames (maximum). Copy those two frames
+ * into our cmd's frames. cmd->frame's context will get overwritten
+ * when we copy from user's frames. So set that value alone
+ * separately
+ */
+ memcpy(cmd->frame, user_ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
+ cmd->frame->hdr.context = cmd->index;
+ cmd->frame->hdr.pad_0 = 0;
+ cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64);
+
+ /*
+ * The management interface between applications and the fw uses MFI
+ * frames. E.g, RAID configuration changes, LD property changes etc
+ * are accomplishes through different kinds of MFI frames. The driver
+ * needs to care only about substituting user buffers with kernel
+ * buffers in SGLs. The location of SGL is embedded in the struct
+ * iocpacket itself.
+ */
+ kern_sge32 = (struct mrsas_sge32 *)
+ ((unsigned long)cmd->frame + user_ioc->sgl_off);
+
+ /*
+ * For each user buffer, create a mirror buffer and copy in
+ */
+ for (i = 0; i < user_ioc->sge_count; i++) {
+ if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) {
+ if (!user_ioc->sgl[i].iov_len)
+ continue;
+ ioctl_data_size = user_ioc->sgl[i].iov_len;
+#ifdef COMPAT_FREEBSD32
+ } else {
+ if (!user_ioc32->sgl[i].iov_len)
+ continue;
+ ioctl_data_size = user_ioc32->sgl[i].iov_len;
+#endif
+ }
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 1, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ ioctl_data_size,
+ 1,
+ ioctl_data_size,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &ioctl_data_tag[i])) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioctl data tag\n");
+ ret = ENOMEM;
+ goto out;
+ }
+ if (bus_dmamem_alloc(ioctl_data_tag[i], (void **)&ioctl_data_mem[i],
+ (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_data_dmamap[i])) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n");
+ ret = ENOMEM;
+ goto out;
+ }
+ if (bus_dmamap_load(ioctl_data_tag[i], ioctl_data_dmamap[i],
+ ioctl_data_mem[i], ioctl_data_size, mrsas_alloc_cb,
+ &ioctl_data_phys_addr[i], BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load ioctl data mem\n");
+ ret = ENOMEM;
+ goto out;
+ }
+ /* Save the physical address and length */
+ kern_sge32[i].phys_addr = (u_int32_t)ioctl_data_phys_addr[i];
+
+ if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) {
+ kern_sge32[i].length = user_ioc->sgl[i].iov_len;
+
+ iov_base_ptrin = user_ioc->sgl[i].iov_base;
+ iov_len = user_ioc->sgl[i].iov_len;
+#ifdef COMPAT_FREEBSD32
+ } else {
+ kern_sge32[i].length = user_ioc32->sgl[i].iov_len;
+
+ iov_base_ptrin = PTRIN(user_ioc32->sgl[i].iov_base);
+ iov_len = user_ioc32->sgl[i].iov_len;
+#endif
+ }
+
+ /* Copy in data from user space */
+ ret = copyin(iov_base_ptrin, ioctl_data_mem[i], iov_len);
+ if (ret) {
+ device_printf(sc->mrsas_dev, "IOCTL copyin failed!\n");
+ goto out;
+ }
+ }
+
+ ioctl_sense_size = user_ioc->sense_len;
+
+ if (user_ioc->sense_len) {
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 1, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ ioctl_sense_size,
+ 1,
+ ioctl_sense_size,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &ioctl_sense_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense tag\n");
+ ret = ENOMEM;
+ goto out;
+ }
+ if (bus_dmamem_alloc(ioctl_sense_tag, (void **)&ioctl_sense_mem,
+ (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_sense_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense mem\n");
+ ret = ENOMEM;
+ goto out;
+ }
+ if (bus_dmamap_load(ioctl_sense_tag, ioctl_sense_dmamap,
+ ioctl_sense_mem, ioctl_sense_size, mrsas_alloc_cb,
+ &ioctl_sense_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load ioctl sense mem\n");
+ ret = ENOMEM;
+ goto out;
+ }
+ sense_ptr =
+ (unsigned long *)((unsigned long)cmd->frame + user_ioc->sense_off);
+ sense_ptr = ioctl_sense_mem;
+ }
+ /*
+ * Set the sync_cmd flag so that the ISR knows not to complete this
+ * cmd to the SCSI mid-layer
+ */
+ cmd->sync_cmd = 1;
+ mrsas_issue_blocked_cmd(sc, cmd);
+ cmd->sync_cmd = 0;
+
+ /*
+ * copy out the kernel buffers to user buffers
+ */
+ for (i = 0; i < user_ioc->sge_count; i++) {
+ if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) {
+ iov_base_ptrin = user_ioc->sgl[i].iov_base;
+ iov_len = user_ioc->sgl[i].iov_len;
+#ifdef COMPAT_FREEBSD32
+ } else {
+ iov_base_ptrin = PTRIN(user_ioc32->sgl[i].iov_base);
+ iov_len = user_ioc32->sgl[i].iov_len;
+#endif
+ }
+
+ ret = copyout(ioctl_data_mem[i], iov_base_ptrin, iov_len);
+ if (ret) {
+ device_printf(sc->mrsas_dev, "IOCTL copyout failed!\n");
+ goto out;
+ }
+ }
+
+ /*
+ * copy out the sense
+ */
+ if (user_ioc->sense_len) {
+ /*
+ * sense_buff points to the location that has the user sense
+ * buffer address
+ */
+ sense_ptr = (unsigned long *)((unsigned long)user_ioc->frame.raw +
+ user_ioc->sense_off);
+ ret = copyout(ioctl_sense_mem, (unsigned long *)*sense_ptr,
+ user_ioc->sense_len);
+ if (ret) {
+ device_printf(sc->mrsas_dev, "IOCTL sense copyout failed!\n");
+ goto out;
+ }
+ }
+ /*
+ * Return command status to user space
+ */
+ memcpy(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status,
+ sizeof(u_int8_t));
out:
- /*
- * Release sense buffer
- */
- if (ioctl_sense_phys_addr)
- bus_dmamap_unload(ioctl_sense_tag, ioctl_sense_dmamap);
- if (ioctl_sense_mem)
- bus_dmamem_free(ioctl_sense_tag, ioctl_sense_mem, ioctl_sense_dmamap);
- if (ioctl_sense_tag)
- bus_dma_tag_destroy(ioctl_sense_tag);
-
- /*
- * Release data buffers
- */
- for (i = 0; i < user_ioc->sge_count; i++) {
- if (!user_ioc->sgl[i].iov_len)
- continue;
- if (ioctl_data_phys_addr[i])
- bus_dmamap_unload(ioctl_data_tag[i], ioctl_data_dmamap[i]);
- if (ioctl_data_mem[i] != NULL)
- bus_dmamem_free(ioctl_data_tag[i], ioctl_data_mem[i],
- ioctl_data_dmamap[i]);
- if (ioctl_data_tag[i] != NULL)
- bus_dma_tag_destroy(ioctl_data_tag[i]);
- }
-
- /* Free command */
- mrsas_release_mfi_cmd(cmd);
-
- return(ret);
+ /*
+ * Release sense buffer
+ */
+ if (ioctl_sense_phys_addr)
+ bus_dmamap_unload(ioctl_sense_tag, ioctl_sense_dmamap);
+ if (ioctl_sense_mem != NULL)
+ bus_dmamem_free(ioctl_sense_tag, ioctl_sense_mem, ioctl_sense_dmamap);
+ if (ioctl_sense_tag != NULL)
+ bus_dma_tag_destroy(ioctl_sense_tag);
+
+ /*
+ * Release data buffers
+ */
+ for (i = 0; i < user_ioc->sge_count; i++) {
+ if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) {
+ if (!user_ioc->sgl[i].iov_len)
+ continue;
+#ifdef COMPAT_FREEBSD32
+ } else {
+ if (!user_ioc32->sgl[i].iov_len)
+ continue;
+#endif
+ }
+ if (ioctl_data_phys_addr[i])
+ bus_dmamap_unload(ioctl_data_tag[i], ioctl_data_dmamap[i]);
+ if (ioctl_data_mem[i] != NULL)
+ bus_dmamem_free(ioctl_data_tag[i], ioctl_data_mem[i],
+ ioctl_data_dmamap[i]);
+ if (ioctl_data_tag[i] != NULL)
+ bus_dma_tag_destroy(ioctl_data_tag[i]);
+ }
+ /* Free command */
+ mrsas_release_mfi_cmd(cmd);
+
+ return (ret);
}
-/**
- * mrsas_alloc_mfi_cmds: Allocates the command packets
- * input: Adapter instance soft state
+/*
+ * mrsas_alloc_mfi_cmds: Allocates the command packets
+ * input: Adapter instance soft state
*
* Each IOCTL or passthru command that is issued to the FW are wrapped in a
- * local data structure called mrsas_mfi_cmd. The frame embedded in this
- * mrsas_mfi is issued to FW. The array is used only to look up the
+ * local data structure called mrsas_mfi_cmd. The frame embedded in this
+ * mrsas_mfi is issued to FW. The array is used only to look up the
* mrsas_mfi_cmd given the context. The free commands are maintained in a
* linked list.
*/
-int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc)
+int
+mrsas_alloc_mfi_cmds(struct mrsas_softc *sc)
{
- int i, j;
- u_int32_t max_cmd;
- struct mrsas_mfi_cmd *cmd;
-
- max_cmd = MRSAS_MAX_MFI_CMDS;
-
- /*
- * sc->mfi_cmd_list is an array of struct mrsas_mfi_cmd pointers. Allocate the
- * dynamic array first and then allocate individual commands.
- */
- sc->mfi_cmd_list = malloc(sizeof(struct mrsas_mfi_cmd*)*max_cmd, M_MRSAS, M_NOWAIT);
- if (!sc->mfi_cmd_list) {
- device_printf(sc->mrsas_dev, "Cannot alloc memory for mfi_cmd cmd_list.\n");
- return(ENOMEM);
- }
- memset(sc->mfi_cmd_list, 0, sizeof(struct mrsas_mfi_cmd *)*max_cmd);
- for (i = 0; i < max_cmd; i++) {
- sc->mfi_cmd_list[i] = malloc(sizeof(struct mrsas_mfi_cmd),
- M_MRSAS, M_NOWAIT);
- if (!sc->mfi_cmd_list[i]) {
- for (j = 0; j < i; j++)
- free(sc->mfi_cmd_list[j],M_MRSAS);
- free(sc->mfi_cmd_list, M_MRSAS);
- sc->mfi_cmd_list = NULL;
- return(ENOMEM);
- }
- }
-
- for (i = 0; i < max_cmd; i++) {
- cmd = sc->mfi_cmd_list[i];
- memset(cmd, 0, sizeof(struct mrsas_mfi_cmd));
- cmd->index = i;
- cmd->ccb_ptr = NULL;
- cmd->sc = sc;
- TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
- }
-
- /* create a frame pool and assign one frame to each command */
- if (mrsas_create_frame_pool(sc)) {
- device_printf(sc->mrsas_dev, "Cannot allocate DMA frame pool.\n");
- for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { // Free the frames
- cmd = sc->mfi_cmd_list[i];
- mrsas_free_frame(sc, cmd);
- }
- if (sc->mficmd_frame_tag != NULL)
- bus_dma_tag_destroy(sc->mficmd_frame_tag);
- return(ENOMEM);
- }
-
- return(0);
+ int i, j;
+ u_int32_t max_cmd;
+ struct mrsas_mfi_cmd *cmd;
+
+ max_cmd = MRSAS_MAX_MFI_CMDS;
+
+ /*
+ * sc->mfi_cmd_list is an array of struct mrsas_mfi_cmd pointers.
+ * Allocate the dynamic array first and then allocate individual
+ * commands.
+ */
+ sc->mfi_cmd_list = malloc(sizeof(struct mrsas_mfi_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
+ if (!sc->mfi_cmd_list) {
+ device_printf(sc->mrsas_dev, "Cannot alloc memory for mfi_cmd cmd_list.\n");
+ return (ENOMEM);
+ }
+ memset(sc->mfi_cmd_list, 0, sizeof(struct mrsas_mfi_cmd *) * max_cmd);
+ for (i = 0; i < max_cmd; i++) {
+ sc->mfi_cmd_list[i] = malloc(sizeof(struct mrsas_mfi_cmd),
+ M_MRSAS, M_NOWAIT);
+ if (!sc->mfi_cmd_list[i]) {
+ for (j = 0; j < i; j++)
+ free(sc->mfi_cmd_list[j], M_MRSAS);
+ free(sc->mfi_cmd_list, M_MRSAS);
+ sc->mfi_cmd_list = NULL;
+ return (ENOMEM);
+ }
+ }
+
+ for (i = 0; i < max_cmd; i++) {
+ cmd = sc->mfi_cmd_list[i];
+ memset(cmd, 0, sizeof(struct mrsas_mfi_cmd));
+ cmd->index = i;
+ cmd->ccb_ptr = NULL;
+ cmd->sc = sc;
+ TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
+ }
+
+ /* create a frame pool and assign one frame to each command */
+ if (mrsas_create_frame_pool(sc)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate DMA frame pool.\n");
+ /* Free the frames */
+ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
+ cmd = sc->mfi_cmd_list[i];
+ mrsas_free_frame(sc, cmd);
+ }
+ if (sc->mficmd_frame_tag != NULL)
+ bus_dma_tag_destroy(sc->mficmd_frame_tag);
+ return (ENOMEM);
+ }
+ return (0);
}
-/**
- * mrsas_create_frame_pool - Creates DMA pool for cmd frames
- * input: Adapter soft state
+/*
+ * mrsas_create_frame_pool: Creates DMA pool for cmd frames
+ * input: Adapter soft state
*
* Each command packet has an embedded DMA memory buffer that is used for
* filling MFI frame and the SG list that immediately follows the frame. This
* function creates those DMA memory buffers for each command packet by using
- * PCI pool facility. pad_0 is initialized to 0 to prevent corrupting value
+ * PCI pool facility. pad_0 is initialized to 0 to prevent corrupting value
* of context and could cause FW crash.
*/
-static int mrsas_create_frame_pool(struct mrsas_softc *sc)
+static int
+mrsas_create_frame_pool(struct mrsas_softc *sc)
{
- int i;
- struct mrsas_mfi_cmd *cmd;
-
- if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
- 1, 0, // algnmnt, boundary
- BUS_SPACE_MAXADDR_32BIT,// lowaddr
- BUS_SPACE_MAXADDR, // highaddr
- NULL, NULL, // filter, filterarg
- MRSAS_MFI_FRAME_SIZE, // maxsize
- 1, // msegments
- MRSAS_MFI_FRAME_SIZE, // maxsegsize
- BUS_DMA_ALLOCNOW, // flags
- NULL, NULL, // lockfunc, lockarg
- &sc->mficmd_frame_tag)) {
- device_printf(sc->mrsas_dev, "Cannot create MFI frame tag\n");
- return (ENOMEM);
- }
-
- for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
- cmd = sc->mfi_cmd_list[i];
- cmd->frame = mrsas_alloc_frame(sc, cmd);
- if (cmd->frame == NULL) {
- device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n");
- return (ENOMEM);
- }
- memset(cmd->frame, 0, MRSAS_MFI_FRAME_SIZE);
- cmd->frame->io.context = cmd->index;
- cmd->frame->io.pad_0 = 0;
- }
-
- return(0);
+ int i;
+ struct mrsas_mfi_cmd *cmd;
+
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 1, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ MRSAS_MFI_FRAME_SIZE,
+ 1,
+ MRSAS_MFI_FRAME_SIZE,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &sc->mficmd_frame_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create MFI frame tag\n");
+ return (ENOMEM);
+ }
+ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
+ cmd = sc->mfi_cmd_list[i];
+ cmd->frame = mrsas_alloc_frame(sc, cmd);
+ if (cmd->frame == NULL) {
+ device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n");
+ return (ENOMEM);
+ }
+ memset(cmd->frame, 0, MRSAS_MFI_FRAME_SIZE);
+ cmd->frame->io.context = cmd->index;
+ cmd->frame->io.pad_0 = 0;
+ }
+
+ return (0);
}
-/**
- * mrsas_alloc_frame - Allocates MFI Frames
- * input: Adapter soft state
+/*
+ * mrsas_alloc_frame: Allocates MFI Frames
+ * input: Adapter soft state
*
- * Create bus DMA memory tag and dmamap and load memory for MFI frames.
- * Returns virtual memory pointer to allocated region.
+ * Create bus DMA memory tag and dmamap and load memory for MFI frames. Returns
+ * virtual memory pointer to allocated region.
*/
-void *mrsas_alloc_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+void *
+mrsas_alloc_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
- u_int32_t frame_size = MRSAS_MFI_FRAME_SIZE;
-
- if (bus_dmamem_alloc(sc->mficmd_frame_tag, (void **)&cmd->frame_mem,
- BUS_DMA_NOWAIT, &cmd->frame_dmamap)) {
- device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n");
- return (NULL);
- }
- if (bus_dmamap_load(sc->mficmd_frame_tag, cmd->frame_dmamap,
- cmd->frame_mem, frame_size, mrsas_alloc_cb,
- &cmd->frame_phys_addr, BUS_DMA_NOWAIT)) {
- device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
- return (NULL);
- }
-
- return(cmd->frame_mem);
+ u_int32_t frame_size = MRSAS_MFI_FRAME_SIZE;
+
+ if (bus_dmamem_alloc(sc->mficmd_frame_tag, (void **)&cmd->frame_mem,
+ BUS_DMA_NOWAIT, &cmd->frame_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n");
+ return (NULL);
+ }
+ if (bus_dmamap_load(sc->mficmd_frame_tag, cmd->frame_dmamap,
+ cmd->frame_mem, frame_size, mrsas_alloc_cb,
+ &cmd->frame_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
+ return (NULL);
+ }
+ return (cmd->frame_mem);
}
/*
- * mrsas_alloc_cb: Callback function of bus_dmamap_load()
- * input: callback argument,
- * machine dependent type that describes DMA segments,
- * number of segments,
- * error code.
+ * mrsas_alloc_cb: Callback function of bus_dmamap_load()
+ * input: callback argument,
+ * machine dependent type that describes DMA segments,
+ * number of segments,
+ * error code.
*
- * This function is for the driver to receive mapping information resultant
- * of the bus_dmamap_load(). The information is actually not being used,
- * but the address is saved anyway.
+ * This function is for the driver to receive mapping information resultant of
+ * the bus_dmamap_load(). The information is actually not being used, but the
+ * address is saved anyway.
*/
-static void mrsas_alloc_cb(void *arg, bus_dma_segment_t *segs,
- int nsegs, int error)
+static void
+mrsas_alloc_cb(void *arg, bus_dma_segment_t *segs,
+ int nsegs, int error)
{
- bus_addr_t *addr;
+ bus_addr_t *addr;
- addr = arg;
- *addr = segs[0].ds_addr;
+ addr = arg;
+ *addr = segs[0].ds_addr;
}
-/**
- * mrsas_free_frames: Frees memory for MFI frames
- * input: Adapter soft state
+/*
+ * mrsas_free_frames: Frees memory for MFI frames
+ * input: Adapter soft state
*
- * Deallocates MFI frames memory. Called from mrsas_free_mem() during
- * detach and error case during creation of frame pool.
+ * Deallocates MFI frames memory. Called from mrsas_free_mem() during detach
+ * and error case during creation of frame pool.
*/
-void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+void
+mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
- if (cmd->frame_phys_addr)
- bus_dmamap_unload(sc->mficmd_frame_tag, cmd->frame_dmamap);
- if (cmd->frame_mem != NULL)
- bus_dmamem_free(sc->mficmd_frame_tag, cmd->frame_mem, cmd->frame_dmamap);
+ if (cmd->frame_phys_addr)
+ bus_dmamap_unload(sc->mficmd_frame_tag, cmd->frame_dmamap);
+ if (cmd->frame_mem != NULL)
+ bus_dmamem_free(sc->mficmd_frame_tag, cmd->frame_mem, cmd->frame_dmamap);
}
diff --git a/sys/dev/mrsas/mrsas_ioctl.h b/sys/dev/mrsas/mrsas_ioctl.h
index 3604842..bf05a7d 100644
--- a/sys/dev/mrsas/mrsas_ioctl.h
+++ b/sys/dev/mrsas/mrsas_ioctl.h
@@ -1,43 +1,38 @@
/*
- * Copyright (c) 2014, LSI Corp.
- * All rights reserved.
- * Author: Marian Choy
+ * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
* Support: freebsdraid@lsi.com
*
* Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * 3. Neither the name of the <ORGANIZATION> nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer. 2. Redistributions
+ * in binary form must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution. 3. Neither the name of the
+ * <ORGANIZATION> nor the names of its contributors may be used to endorse or
+ * promote products derived from this software without specific prior written
+ * permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
- * The views and conclusions contained in the software and documentation
- * are those of the authors and should not be interpreted as representing
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing
* official policies,either expressed or implied, of the FreeBSD Project.
*
- * Send feedback to: <megaraidfbsd@lsi.com>
- * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
- * ATTN: MegaRaid FreeBSD
+ * Send feedback to: <megaraidfbsd@lsi.com> Mail to: LSI Corporation, 1621
+ * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
*
*/
@@ -45,53 +40,87 @@
__FBSDID("$FreeBSD$");
#ifndef MRSAS_IOCTL_H
-#define MRSAS_IOCTL_H
+#define MRSAS_IOCTL_H
#ifndef _IOWR
#include <sys/ioccom.h>
-#endif /* !_IOWR */
+#endif /* !_IOWR */
+
+#ifdef COMPAT_FREEBSD32
+/* Compilation error FIX */
+#if (__FreeBSD_version <= 900000)
+#include <sys/socket.h>
+#endif
+#include <sys/mount.h>
+#include <compat/freebsd32/freebsd32.h>
+#endif
/*
- * We need to use the same values as the mfi driver until MegaCli adds
- * support for this (mrsas) driver:
- * M is for MegaRAID. (This is typically the vendor or product initial)
- * 1 arbitrary. (This may be used to segment kinds of commands.
- * (1-9 status, 10-20 policy, etc.)
- * struct mrsas_iocpacket (sizeof() this parameter will be used.)
- * These three values are encoded into a somewhat unique, 32-bit value.
+ * We need to use the same values as the mfi driver until MegaCli adds
+ * support for this (mrsas) driver: M is for MegaRAID. (This is typically the
+ * vendor or product initial) 1 arbitrary. (This may be used to segment kinds
+ * of commands. (1-9 status, 10-20 policy, etc.) struct mrsas_iocpacket
+ * (sizeof() this parameter will be used.) These three values are encoded
+ * into a somewhat unique, 32-bit value.
*/
-#define MRSAS_IOC_FIRMWARE_PASS_THROUGH _IOWR('M', 1, struct mrsas_iocpacket)
+#define MRSAS_IOC_FIRMWARE_PASS_THROUGH64 _IOWR('M', 1, struct mrsas_iocpacket)
+#ifdef COMPAT_FREEBSD32
+#define MRSAS_IOC_FIRMWARE_PASS_THROUGH32 _IOWR('M', 1, struct mrsas_iocpacket32)
+#endif
+
+#define MRSAS_IOC_SCAN_BUS _IO('M', 10)
-#define MRSAS_IOC_SCAN_BUS _IO('M', 10)
+#define MRSAS_LINUX_CMD32 0xc1144d01
-#define MAX_IOCTL_SGE 16
-#define MFI_FRAME_DIR_READ 0x0010
-#define MFI_CMD_LD_SCSI_IO 0x03
+#define MAX_IOCTL_SGE 16
+#define MFI_FRAME_DIR_READ 0x0010
+#define MFI_CMD_LD_SCSI_IO 0x03
-#define INQUIRY_CMD 0x12
-#define INQUIRY_CMDLEN 6
-#define INQUIRY_REPLY_LEN 96
-#define INQUIRY_VENDOR 8 /* Offset in reply data to vendor name */
-#define SCSI_SENSE_BUFFERSIZE 96
+#define INQUIRY_CMD 0x12
+#define INQUIRY_CMDLEN 6
+#define INQUIRY_REPLY_LEN 96
+#define INQUIRY_VENDOR 8 /* Offset in reply data to
+ * vendor name */
+#define SCSI_SENSE_BUFFERSIZE 96
-#define MEGAMFI_RAW_FRAME_SIZE 128
+#define MEGAMFI_RAW_FRAME_SIZE 128
#pragma pack(1)
struct mrsas_iocpacket {
- u_int16_t host_no;
- u_int16_t __pad1;
- u_int32_t sgl_off;
- u_int32_t sge_count;
- u_int32_t sense_off;
- u_int32_t sense_len;
- union {
- u_int8_t raw[MEGAMFI_RAW_FRAME_SIZE];
- struct mrsas_header hdr;
- } frame;
- struct iovec sgl[MAX_IOCTL_SGE];
+ u_int16_t host_no;
+ u_int16_t __pad1;
+ u_int32_t sgl_off;
+ u_int32_t sge_count;
+ u_int32_t sense_off;
+ u_int32_t sense_len;
+ union {
+ u_int8_t raw[MEGAMFI_RAW_FRAME_SIZE];
+ struct mrsas_header hdr;
+ } frame;
+ struct iovec sgl[MAX_IOCTL_SGE];
+};
+
+#pragma pack()
+
+#ifdef COMPAT_FREEBSD32
+#pragma pack(1)
+struct mrsas_iocpacket32 {
+ u_int16_t host_no;
+ u_int16_t __pad1;
+ u_int32_t sgl_off;
+ u_int32_t sge_count;
+ u_int32_t sense_off;
+ u_int32_t sense_len;
+ union {
+ u_int8_t raw[MEGAMFI_RAW_FRAME_SIZE];
+ struct mrsas_header hdr;
+ } frame;
+ struct iovec32 sgl[MAX_IOCTL_SGE];
};
+
#pragma pack()
+#endif /* COMPAT_FREEBSD32 */
-#endif /* MRSAS_IOCTL_H */
+#endif /* MRSAS_IOCTL_H */
diff --git a/sys/dev/mrsas/mrsas_linux.c b/sys/dev/mrsas/mrsas_linux.c
new file mode 100644
index 0000000..8a3db0b
--- /dev/null
+++ b/sys/dev/mrsas/mrsas_linux.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Kashyap Desai,
+ * Sibananda Sahu Support: freebsdraid@lsi.com
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer. 2. Redistributions
+ * in binary form must reproduce the above copyright notice, this list of
+ * conditions and the following disclaimer in the documentation and/or other
+ * materials provided with the distribution. 3. Neither the name of the
+ * <ORGANIZATION> nor the names of its contributors may be used to endorse or
+ * promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing
+ * official policies,either expressed or implied, of the FreeBSD Project.
+ *
+ * Send feedback to: <megaraidfbsd@lsi.com> Mail to: LSI Corporation, 1621
+ * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#if (__FreeBSD_version > 900000)
+#include <sys/capability.h>
+#endif
+
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/file.h>
+#include <sys/proc.h>
+#include <machine/bus.h>
+
+#if defined(__amd64__) /* Assume amd64 wants 32 bit Linux */
+#include <machine/../linux32/linux.h>
+#include <machine/../linux32/linux32_proto.h>
+#else
+#include <machine/../linux/linux.h>
+#include <machine/../linux/linux_proto.h>
+#endif
+#include <compat/linux/linux_ioctl.h>
+#include <compat/linux/linux_util.h>
+
+#include <dev/mrsas/mrsas.h>
+#include <dev/mrsas/mrsas_ioctl.h>
+
+/* There are multiple ioctl number ranges that need to be handled */
+#define MRSAS_LINUX_IOCTL_MIN 0x4d00
+#define MRSAS_LINUX_IOCTL_MAX 0x4d01
+
+static linux_ioctl_function_t mrsas_linux_ioctl;
+static struct linux_ioctl_handler mrsas_linux_handler = {mrsas_linux_ioctl,
+ MRSAS_LINUX_IOCTL_MIN,
+MRSAS_LINUX_IOCTL_MAX};
+
+SYSINIT(mrsas_register, SI_SUB_KLD, SI_ORDER_MIDDLE,
+ linux_ioctl_register_handler, &mrsas_linux_handler);
+SYSUNINIT(mrsas_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE,
+ linux_ioctl_unregister_handler, &mrsas_linux_handler);
+
+static struct linux_device_handler mrsas_device_handler =
+{"mrsas", "megaraid_sas", "mrsas0", "megaraid_sas_ioctl_node", -1, 0, 1};
+
+SYSINIT(mrsas_register2, SI_SUB_KLD, SI_ORDER_MIDDLE,
+ linux_device_register_handler, &mrsas_device_handler);
+SYSUNINIT(mrsas_unregister2, SI_SUB_KLD, SI_ORDER_MIDDLE,
+ linux_device_unregister_handler, &mrsas_device_handler);
+
+static int
+mrsas_linux_modevent(module_t mod __unused, int cmd __unused, void *data __unused)
+{
+ return (0);
+}
+
+/*
+ * mrsas_linux_ioctl: linux emulator IOCtl commands entry point.
+ *
+ * This function is the entry point for IOCtls from linux binaries.
+ * It calls the mrsas_ioctl function for processing
+ * depending on the IOCTL command received.
+ */
+static int
+mrsas_linux_ioctl(struct thread *p, struct linux_ioctl_args *args)
+{
+#if (__FreeBSD_version >= 1000000)
+ cap_rights_t rights;
+
+#endif
+ struct file *fp;
+ int error;
+ u_long cmd = args->cmd;
+
+ if (cmd != MRSAS_LINUX_CMD32) {
+ error = ENOTSUP;
+ goto END;
+ }
+#if (__FreeBSD_version >= 1000000)
+ error = fget(p, args->fd, cap_rights_init(&rights, CAP_IOCTL), &fp);
+#elif (__FreeBSD_version <= 900000)
+ error = fget(p, args->fd, &fp);
+#else /* For FreeBSD version greater than
+ * 9.0.0 but less than 10.0.0 */
+ error = fget(p, args->fd, CAP_IOCTL, &fp);
+#endif
+ if (error != 0)
+ goto END;
+
+ error = fo_ioctl(fp, cmd, (caddr_t)args->arg, p->td_ucred, p);
+ fdrop(fp, p);
+END:
+ return (error);
+}
+
+DEV_MODULE(mrsas_linux, mrsas_linux_modevent, NULL);
+MODULE_DEPEND(mrsas, linux, 1, 1, 1);
diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c
index 1bb3172..2666d7f 100644
--- a/sys/dev/pci/pci.c
+++ b/sys/dev/pci/pci.c
@@ -207,6 +207,7 @@ struct pci_quirk {
#define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
#define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
#define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
+#define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
int arg1;
int arg2;
};
@@ -266,6 +267,15 @@ static const struct pci_quirk pci_quirks[] = {
*/
{ 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
+ /*
+ * Atheros AR8161/AR8162/E2200 ethernet controller has a bug that
+ * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the
+ * command register is set.
+ */
+ { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
+ { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
+ { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
+
{ 0 }
};
@@ -3856,8 +3866,14 @@ pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
mte->mte_handlers++;
}
- /* Make sure that INTx is disabled if we are using MSI/MSIX */
- pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
+ if (!pci_has_quirk(pci_get_devid(dev),
+ PCI_QUIRK_MSI_INTX_BUG)) {
+ /*
+ * Make sure that INTx is disabled if we are
+ * using MSI/MSIX
+ */
+ pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
+ }
bad:
if (error) {
(void)bus_generic_teardown_intr(dev, child, irq,
@@ -3949,6 +3965,7 @@ pci_print_child(device_t dev, device_t child)
retval += printf(" at device %d.%d", pci_get_slot(child),
pci_get_function(child));
+ retval += bus_print_child_domain(dev, child);
retval += bus_print_child_footer(dev, child);
return (retval);
diff --git a/sys/dev/usb/usb_freebsd.h b/sys/dev/usb/usb_freebsd.h
index cda4e69..5c10923 100644
--- a/sys/dev/usb/usb_freebsd.h
+++ b/sys/dev/usb/usb_freebsd.h
@@ -50,6 +50,7 @@
#define USB_HAVE_FIXED_IFACE 0
#define USB_HAVE_FIXED_CONFIG 0
#define USB_HAVE_FIXED_PORT 0
+#define USB_HAVE_DISABLE_ENUM 1
/* define zero ticks callout value */
#define USB_CALLOUT_ZERO_TICKS 1
diff --git a/sys/dev/usb/usb_freebsd_loader.h b/sys/dev/usb/usb_freebsd_loader.h
index bd08218..4f82dd0 100644
--- a/sys/dev/usb/usb_freebsd_loader.h
+++ b/sys/dev/usb/usb_freebsd_loader.h
@@ -50,6 +50,7 @@
#define USB_HAVE_FIXED_IFACE 0
#define USB_HAVE_FIXED_CONFIG 0
#define USB_HAVE_FIXED_PORT 0
+#define USB_HAVE_DISABLE_ENUM 0
#define USB_CALLOUT_ZERO_TICKS 1
diff --git a/sys/dev/usb/usb_hub.c b/sys/dev/usb/usb_hub.c
index 8e0d557..6d79327 100644
--- a/sys/dev/usb/usb_hub.c
+++ b/sys/dev/usb/usb_hub.c
@@ -97,6 +97,16 @@ SYSCTL_INT(_hw_usb, OID_AUTO, power_timeout, CTLFLAG_RW,
&usb_power_timeout, 0, "USB power timeout");
#endif
+#if USB_HAVE_DISABLE_ENUM
+static int usb_disable_enumeration = 0;
+SYSCTL_INT(_hw_usb, OID_AUTO, disable_enumeration, CTLFLAG_RWTUN,
+ &usb_disable_enumeration, 0, "Set to disable all USB device enumeration.");
+
+static int usb_disable_port_power = 0;
+SYSCTL_INT(_hw_usb, OID_AUTO, disable_port_power, CTLFLAG_RWTUN,
+ &usb_disable_port_power, 0, "Set to disable all USB port power.");
+#endif
+
struct uhub_current_state {
uint16_t port_change;
uint16_t port_status;
@@ -111,6 +121,10 @@ struct uhub_softc {
struct mtx sc_mtx; /* our mutex */
struct usb_device *sc_udev; /* USB device */
struct usb_xfer *sc_xfer[UHUB_N_TRANSFER]; /* interrupt xfer */
+#if USB_HAVE_DISABLE_ENUM
+ int sc_disable_enumeration;
+ int sc_disable_port_power;
+#endif
uint8_t sc_flags;
#define UHUB_FLAG_DID_EXPLORE 0x01
};
@@ -618,9 +632,9 @@ repeat:
err = usbd_req_clear_port_feature(udev, NULL,
portno, UHF_C_PORT_CONNECTION);
- if (err) {
+ if (err)
goto error;
- }
+
/* check if there is a child */
if (child != NULL) {
@@ -633,14 +647,22 @@ repeat:
/* get fresh status */
err = uhub_read_port_status(sc, portno);
- if (err) {
+ if (err)
+ goto error;
+
+#if USB_HAVE_DISABLE_ENUM
+ /* check if we should skip enumeration from this USB HUB */
+ if (usb_disable_enumeration != 0 ||
+ sc->sc_disable_enumeration != 0) {
+ DPRINTF("Enumeration is disabled!\n");
goto error;
}
+#endif
/* check if nothing is connected to the port */
- if (!(sc->sc_st.port_status & UPS_CURRENT_CONNECT_STATUS)) {
+ if (!(sc->sc_st.port_status & UPS_CURRENT_CONNECT_STATUS))
goto error;
- }
+
/* check if there is no power on the port and print a warning */
switch (udev->speed) {
@@ -1188,6 +1210,10 @@ uhub_attach(device_t dev)
struct usb_hub *hub;
struct usb_hub_descriptor hubdesc20;
struct usb_hub_ss_descriptor hubdesc30;
+#if USB_HAVE_DISABLE_ENUM
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+#endif
uint16_t pwrdly;
uint16_t nports;
uint8_t x;
@@ -1385,6 +1411,24 @@ uhub_attach(device_t dev)
/* wait with power off for a while */
usb_pause_mtx(NULL, USB_MS_TO_TICKS(USB_POWER_DOWN_TIME));
+#if USB_HAVE_DISABLE_ENUM
+ /* Add device sysctls */
+
+ sysctl_ctx = device_get_sysctl_ctx(dev);
+ sysctl_tree = device_get_sysctl_tree(dev);
+
+ if (sysctl_ctx != NULL && sysctl_tree != NULL) {
+ (void) SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "disable_enumeration", CTLFLAG_RWTUN,
+ &sc->sc_disable_enumeration, 0,
+ "Set to disable enumeration on this USB HUB.");
+
+ (void) SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "disable_port_power", CTLFLAG_RWTUN,
+ &sc->sc_disable_port_power, 0,
+ "Set to disable USB port power on this USB HUB.");
+ }
+#endif
/*
* To have the best chance of success we do things in the exact same
* order as Windoze98. This should not be necessary, but some
@@ -1439,13 +1483,27 @@ uhub_attach(device_t dev)
removable++;
break;
}
- if (!err) {
- /* turn the power on */
- err = usbd_req_set_port_feature(udev, NULL,
- portno, UHF_PORT_POWER);
+ if (err == 0) {
+#if USB_HAVE_DISABLE_ENUM
+ /* check if we should disable USB port power or not */
+ if (usb_disable_port_power != 0 ||
+ sc->sc_disable_port_power != 0) {
+ /* turn the power off */
+ DPRINTFN(0, "Turning port %d power off\n", portno);
+ err = usbd_req_clear_port_feature(udev, NULL,
+ portno, UHF_PORT_POWER);
+ } else {
+#endif
+ /* turn the power on */
+ DPRINTFN(0, "Turning port %d power on\n", portno);
+ err = usbd_req_set_port_feature(udev, NULL,
+ portno, UHF_PORT_POWER);
+#if USB_HAVE_DISABLE_ENUM
+ }
+#endif
}
- if (err) {
- DPRINTFN(0, "port %d power on failed, %s\n",
+ if (err != 0) {
+ DPRINTFN(0, "port %d power on or off failed, %s\n",
portno, usbd_errstr(err));
}
DPRINTF("turn on port %d power\n",
diff --git a/sys/dev/usb/usbdevs b/sys/dev/usb/usbdevs
index 17a8806..a5273ac 100644
--- a/sys/dev/usb/usbdevs
+++ b/sys/dev/usb/usbdevs
@@ -2353,6 +2353,8 @@ product HUAWEI E3131 0x1506 3G modem
product HUAWEI K3765_INIT 0x1520 K3765 Initial
product HUAWEI K4505_INIT 0x1521 K4505 Initial
product HUAWEI E3272_INIT 0x155b LTE modem initial
+product HUAWEI R215_INIT 0x1582 LTE modem initial
+product HUAWEI R215 0x1588 LTE modem
product HUAWEI ETS2055 0x1803 CDMA modem
product HUAWEI E173 0x1c05 3G modem
product HUAWEI E173_INIT 0x1c0b 3G modem initial
diff --git a/sys/dev/vt/vt_core.c b/sys/dev/vt/vt_core.c
index 67d4348..487a7ce 100644
--- a/sys/dev/vt/vt_core.c
+++ b/sys/dev/vt/vt_core.c
@@ -120,6 +120,7 @@ const struct terminal_class vt_termclass = {
static SYSCTL_NODE(_kern, OID_AUTO, vt, CTLFLAG_RD, 0, "vt(9) parameters");
VT_SYSCTL_INT(enable_altgr, 1, "Enable AltGr key (Do not assume R.Alt as Alt)");
+VT_SYSCTL_INT(enable_bell, 1, "Enable bell");
VT_SYSCTL_INT(debug, 0, "vt(9) debug level");
VT_SYSCTL_INT(deadtimer, 15, "Time to wait busy process in VT_PROCESS mode");
VT_SYSCTL_INT(suspendswitch, 1, "Switch to VT0 before suspend");
@@ -904,6 +905,9 @@ vtterm_bell(struct terminal *tm)
struct vt_window *vw = tm->tm_softc;
struct vt_device *vd = vw->vw_device;
+ if (!vt_enable_bell)
+ return;
+
if (vd->vd_flags & VDF_QUIET_BELL)
return;
@@ -915,6 +919,9 @@ vtterm_beep(struct terminal *tm, u_int param)
{
u_int freq, period;
+ if (!vt_enable_bell)
+ return;
+
if ((param == 0) || ((param & 0xffff) == 0)) {
vtterm_bell(tm);
return;
diff --git a/sys/fs/autofs/autofs.c b/sys/fs/autofs/autofs.c
index 719a2c5..a2aa6c1 100644
--- a/sys/fs/autofs/autofs.c
+++ b/sys/fs/autofs/autofs.c
@@ -367,7 +367,7 @@ autofs_trigger_one(struct autofs_node *anp,
char *key, *path;
int error = 0, request_error, last;
- amp = VFSTOAUTOFS(anp->an_vnode->v_mount);
+ amp = anp->an_mount;
sx_assert(&autofs_softc->sc_lock, SA_XLOCKED);
diff --git a/sys/geom/geom_dev.c b/sys/geom/geom_dev.c
index c7104d8..e6213ee 100644
--- a/sys/geom/geom_dev.c
+++ b/sys/geom/geom_dev.c
@@ -82,6 +82,8 @@ static struct cdevsw g_dev_cdevsw = {
.d_flags = D_DISK | D_TRACKCLOSE,
};
+static g_init_t g_dev_init;
+static g_fini_t g_dev_fini;
static g_taste_t g_dev_taste;
static g_orphan_t g_dev_orphan;
static g_attrchanged_t g_dev_attrchanged;
@@ -89,6 +91,8 @@ static g_attrchanged_t g_dev_attrchanged;
static struct g_class g_dev_class = {
.name = "DEV",
.version = G_VERSION,
+ .init = g_dev_init,
+ .fini = g_dev_fini,
.taste = g_dev_taste,
.orphan = g_dev_orphan,
.attrchanged = g_dev_attrchanged
@@ -107,6 +111,58 @@ SYSCTL_QUAD(_kern_geom_dev, OID_AUTO, delete_max_sectors, CTLFLAG_RW,
"delete request sent to the provider. Larger requests are chunked "
"so they can be interrupted. (0 = disable chunking)");
+static char *dumpdev = NULL;
+static void
+g_dev_init(struct g_class *mp)
+{
+
+ dumpdev = getenv("dumpdev");
+}
+
+static void
+g_dev_fini(struct g_class *mp)
+{
+
+ freeenv(dumpdev);
+}
+
+static int
+g_dev_setdumpdev(struct cdev *dev)
+{
+ struct g_kerneldump kd;
+ struct g_consumer *cp;
+ int error, len;
+
+ if (dev == NULL)
+ return (set_dumper(NULL, NULL));
+
+ cp = dev->si_drv2;
+ len = sizeof(kd);
+ kd.offset = 0;
+ kd.length = OFF_MAX;
+ error = g_io_getattr("GEOM::kerneldump", cp, &len, &kd);
+ if (error == 0) {
+ error = set_dumper(&kd.di, devtoname(dev));
+ if (error == 0)
+ dev->si_flags |= SI_DUMPDEV;
+ }
+ return (error);
+}
+
+static void
+init_dumpdev(struct cdev *dev)
+{
+
+ if (dumpdev == NULL)
+ return;
+ if (strcmp(devtoname(dev), dumpdev) != 0)
+ return;
+ if (g_dev_setdumpdev(dev) == 0) {
+ freeenv(dumpdev);
+ dumpdev = NULL;
+ }
+}
+
static void
g_dev_destroy(void *arg, int flags __unused)
{
@@ -261,10 +317,12 @@ g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
dev->si_iosize_max = MAXPHYS;
dev->si_drv2 = cp;
+ init_dumpdev(dev);
if (adev != NULL) {
adev->si_iosize_max = MAXPHYS;
adev->si_drv2 = cp;
adev->si_flags |= SI_UNMAPPED;
+ init_dumpdev(adev);
}
g_dev_attrchanged(cp, "GEOM::physpath");
@@ -358,7 +416,6 @@ g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread
{
struct g_consumer *cp;
struct g_provider *pp;
- struct g_kerneldump kd;
off_t offset, length, chunk;
int i, error;
@@ -395,19 +452,10 @@ g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread
error = g_io_getattr("GEOM::frontstuff", cp, &i, data);
break;
case DIOCSKERNELDUMP:
- if (*(u_int *)data == 0) {
- error = set_dumper(NULL, NULL);
- break;
- }
- kd.offset = 0;
- kd.length = OFF_MAX;
- i = sizeof kd;
- error = g_io_getattr("GEOM::kerneldump", cp, &i, &kd);
- if (error == 0) {
- error = set_dumper(&kd.di, devtoname(dev));
- if (error == 0)
- dev->si_flags |= SI_DUMPDEV;
- }
+ if (*(u_int *)data == 0)
+ error = g_dev_setdumpdev(NULL);
+ else
+ error = g_dev_setdumpdev(dev);
break;
case DIOCGFLUSH:
error = g_io_flush(cp);
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index 34b147d..9d98f0e 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -3326,9 +3326,7 @@ init386(first)
setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
initializecpu(); /* Initialize CPU registers */
-#ifdef PC98
initializecpucache();
-#endif
/* make an initial tss so cpu can get interrupt stack on syscall! */
/* Note: -16 is so we can grow the trapframe if we came from vm86 */
diff --git a/sys/i386/i386/minidump_machdep.c b/sys/i386/i386/minidump_machdep.c
index e0cd1ff..dd3490b 100644
--- a/sys/i386/i386/minidump_machdep.c
+++ b/sys/i386/i386/minidump_machdep.c
@@ -178,7 +178,7 @@ blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
/* A fake page table page, to avoid having to handle both 4K and 2M pages */
static pt_entry_t fakept[NPTEPG];
-void
+int
minidumpsys(struct dumperinfo *di)
{
uint64_t dumpsize;
@@ -377,7 +377,7 @@ minidumpsys(struct dumperinfo *di)
/* Signal completion, signoff and exit stage left. */
dump_write(di, NULL, 0, 0, 0);
printf("\nDump complete\n");
- return;
+ return (0);
fail:
if (error < 0)
@@ -389,6 +389,7 @@ minidumpsys(struct dumperinfo *di)
printf("\nDump failed. Partition too small.\n");
else
printf("\n** DUMP FAILED (ERROR %d) **\n", error);
+ return (error);
}
void
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 37e14e9..90ea046 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -1172,16 +1172,20 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
void
-pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
+pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
{
- KASSERT((sva & PAGE_MASK) == 0,
- ("pmap_invalidate_cache_range: sva not page-aligned"));
- KASSERT((eva & PAGE_MASK) == 0,
- ("pmap_invalidate_cache_range: eva not page-aligned"));
+ if (force) {
+ sva &= ~(vm_offset_t)cpu_clflush_line_size;
+ } else {
+ KASSERT((sva & PAGE_MASK) == 0,
+ ("pmap_invalidate_cache_range: sva not page-aligned"));
+ KASSERT((eva & PAGE_MASK) == 0,
+ ("pmap_invalidate_cache_range: eva not page-aligned"));
+ }
- if (cpu_feature & CPUID_SS)
- ; /* If "Self Snoop" is supported, do nothing. */
+ if ((cpu_feature & CPUID_SS) != 0 && !force)
+ ; /* If "Self Snoop" is supported and allowed, do nothing. */
else if ((cpu_feature & CPUID_CLFSH) != 0 &&
eva - sva < PMAP_CLFLUSH_THRESHOLD) {
@@ -5164,7 +5168,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
- pmap_invalidate_cache_range(va, va + size);
+ pmap_invalidate_cache_range(va, va + size, FALSE);
return ((void *)(va + offset));
}
@@ -5370,7 +5374,7 @@ pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
*/
if (changed) {
pmap_invalidate_range(kernel_pmap, base, tmpva);
- pmap_invalidate_cache_range(base, tmpva);
+ pmap_invalidate_cache_range(base, tmpva, FALSE);
}
return (0);
}
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index 9c87548..e2ba874 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -813,7 +813,7 @@ sf_buf_invalidate(struct sf_buf *sf)
* settings are recalculated.
*/
pmap_qenter(sf->kva, &m, 1);
- pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE);
+ pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE, FALSE);
}
/*
diff --git a/sys/i386/include/md_var.h b/sys/i386/include/md_var.h
index 950fa1f..2425fab 100644
--- a/sys/i386/include/md_var.h
+++ b/sys/i386/include/md_var.h
@@ -113,6 +113,6 @@ void ppro_reenable_apic(void);
void printcpuinfo(void);
void setidt(int idx, alias_for_inthand_t *func, int typ, int dpl, int selec);
int user_dbreg_trap(void);
-void minidumpsys(struct dumperinfo *);
+int minidumpsys(struct dumperinfo *);
#endif /* !_MACHINE_MD_VAR_H_ */
diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h
index 100475c..05656cd 100644
--- a/sys/i386/include/pmap.h
+++ b/sys/i386/include/pmap.h
@@ -458,7 +458,8 @@ void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
void pmap_invalidate_all(pmap_t);
void pmap_invalidate_cache(void);
void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
-void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
+void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva,
+ boolean_t force);
#endif /* _KERNEL */
diff --git a/sys/kern/bus_if.m b/sys/kern/bus_if.m
index ccc854c..cce997c 100644
--- a/sys/kern/bus_if.m
+++ b/sys/kern/bus_if.m
@@ -692,3 +692,16 @@ METHOD int resume_child {
device_t _dev;
device_t _child;
} DEFAULT bus_generic_resume_child;
+
+/**
+ * @brief Get the VM domain handle for the given bus and child.
+ *
+ * @param _dev the bus device
+ * @param _child the child device
+ * @param _domain a pointer to the bus's domain handle identifier
+ */
+METHOD int get_domain {
+ device_t _dev;
+ device_t _child;
+ int *_domain;
+} DEFAULT bus_generic_get_domain;
diff --git a/sys/kern/init_sysent.c b/sys/kern/init_sysent.c
index 0e6e0f1..3decfd41 100644
--- a/sys/kern/init_sysent.c
+++ b/sys/kern/init_sysent.c
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/kern/syscalls.master 263318 2014-03-18 21:32:03Z attilio
+ * created from FreeBSD: head/sys/kern/syscalls.master 272823 2014-10-09 15:16:52Z marcel
*/
#include "opt_compat.h"
@@ -505,10 +505,10 @@ struct sysent sysent[] = {
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 468 = nosys */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 469 = __getpath_fromfd */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 470 = __getpath_fromaddr */
- { AS(sctp_peeloff_args), (sy_call_t *)sys_sctp_peeloff, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 471 = sctp_peeloff */
- { AS(sctp_generic_sendmsg_args), (sy_call_t *)sys_sctp_generic_sendmsg, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 472 = sctp_generic_sendmsg */
- { AS(sctp_generic_sendmsg_iov_args), (sy_call_t *)sys_sctp_generic_sendmsg_iov, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 473 = sctp_generic_sendmsg_iov */
- { AS(sctp_generic_recvmsg_args), (sy_call_t *)sys_sctp_generic_recvmsg, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 474 = sctp_generic_recvmsg */
+ { AS(sctp_peeloff_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_ABSENT }, /* 471 = sctp_peeloff */
+ { AS(sctp_generic_sendmsg_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_ABSENT }, /* 472 = sctp_generic_sendmsg */
+ { AS(sctp_generic_sendmsg_iov_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_ABSENT }, /* 473 = sctp_generic_sendmsg_iov */
+ { AS(sctp_generic_recvmsg_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_ABSENT }, /* 474 = sctp_generic_recvmsg */
{ AS(pread_args), (sy_call_t *)sys_pread, AUE_PREAD, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 475 = pread */
{ AS(pwrite_args), (sy_call_t *)sys_pwrite, AUE_PWRITE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 476 = pwrite */
{ AS(mmap_args), (sy_call_t *)sys_mmap, AUE_MMAP, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 477 = mmap */
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index b503633..13822fd 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -163,6 +163,7 @@ struct callout_cpu {
sbintime_t cc_lastscan;
void *cc_cookie;
u_int cc_bucket;
+ char cc_ktr_event_name[20];
};
#define cc_exec_curr cc_exec_entity[0].cc_curr
@@ -201,7 +202,7 @@ struct callout_cpu cc_cpu;
static int timeout_cpu;
-static void callout_cpu_init(struct callout_cpu *cc);
+static void callout_cpu_init(struct callout_cpu *cc, int cpu);
static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
#ifdef CALLOUT_PROFILING
int *mpcalls, int *lockcalls, int *gcalls,
@@ -302,7 +303,7 @@ callout_callwheel_init(void *dummy)
cc = CC_CPU(timeout_cpu);
cc->cc_callout = malloc(ncallout * sizeof(struct callout),
M_CALLOUT, M_WAITOK);
- callout_cpu_init(cc);
+ callout_cpu_init(cc, timeout_cpu);
}
SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
@@ -310,7 +311,7 @@ SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
* Initialize the per-cpu callout structures.
*/
static void
-callout_cpu_init(struct callout_cpu *cc)
+callout_cpu_init(struct callout_cpu *cc, int cpu)
{
struct callout *c;
int i;
@@ -325,6 +326,8 @@ callout_cpu_init(struct callout_cpu *cc)
cc->cc_firstevent = SBT_MAX;
for (i = 0; i < 2; i++)
cc_cce_cleanup(cc, i);
+ snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
+ "callwheel cpu %d", cpu);
if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */
return;
for (i = 0; i < ncallout; i++) {
@@ -396,7 +399,7 @@ start_softclock(void *dummy)
continue;
cc = CC_CPU(cpu);
cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */
- callout_cpu_init(cc);
+ callout_cpu_init(cc, cpu);
snprintf(name, sizeof(name), "clock (%d)", cpu);
ie = NULL;
if (swi_add(&ie, name, softclock, cc, SWI_CLOCK,
@@ -711,6 +714,8 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
c, c_func, c_arg);
}
+ KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
+ "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct);
#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
sbt1 = sbinuptime();
#endif
@@ -733,6 +738,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
lastfunc = c_func;
}
#endif
+ KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
CTR1(KTR_CALLOUT, "callout %p finished", c);
if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
class->lc_unlock(c_lock);
diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c
index 974c540..31ad45e 100644
--- a/sys/kern/subr_bus.c
+++ b/sys/kern/subr_bus.c
@@ -54,6 +54,7 @@ __FBSDID("$FreeBSD$");
#include <sys/uio.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
+#include <sys/cpuset.h>
#include <net/vnet.h>
@@ -3754,6 +3755,25 @@ bus_print_child_footer(device_t dev, device_t child)
/**
* @brief Helper function for implementing BUS_PRINT_CHILD().
*
+ * This function prints out the VM domain for the given device.
+ *
+ * @returns the number of characters printed
+ */
+int
+bus_print_child_domain(device_t dev, device_t child)
+{
+ int domain;
+
+ /* No domain? Don't print anything */
+ if (BUS_GET_DOMAIN(dev, child, &domain) != 0)
+ return (0);
+
+ return (printf(" numa-domain %d", domain));
+}
+
+/**
+ * @brief Helper function for implementing BUS_PRINT_CHILD().
+ *
* This function simply calls bus_print_child_header() followed by
* bus_print_child_footer().
*
@@ -3765,6 +3785,7 @@ bus_generic_print_child(device_t dev, device_t child)
int retval = 0;
retval += bus_print_child_header(dev, child);
+ retval += bus_print_child_domain(dev, child);
retval += bus_print_child_footer(dev, child);
return (retval);
@@ -4179,6 +4200,16 @@ bus_generic_child_present(device_t dev, device_t child)
return (BUS_CHILD_PRESENT(device_get_parent(dev), dev));
}
+int
+bus_generic_get_domain(device_t dev, device_t child, int *domain)
+{
+
+ if (dev->parent)
+ return (BUS_GET_DOMAIN(dev->parent, dev, domain));
+
+ return (ENOENT);
+}
+
/*
* Some convenience functions to make it easier for drivers to use the
* resource-management functions. All these really do is hide the
diff --git a/sys/kern/syscalls.c b/sys/kern/syscalls.c
index d9f8388..276c34a 100644
--- a/sys/kern/syscalls.c
+++ b/sys/kern/syscalls.c
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/kern/syscalls.master 263318 2014-03-18 21:32:03Z attilio
+ * created from FreeBSD: head/sys/kern/syscalls.master 272823 2014-10-09 15:16:52Z marcel
*/
const char *syscallnames[] = {
diff --git a/sys/kern/syscalls.master b/sys/kern/syscalls.master
index 71e6273..35c05f0 100644
--- a/sys/kern/syscalls.master
+++ b/sys/kern/syscalls.master
@@ -837,14 +837,14 @@
468 AUE_NULL UNIMPL nosys
469 AUE_NULL UNIMPL __getpath_fromfd
470 AUE_NULL UNIMPL __getpath_fromaddr
-471 AUE_NULL STD { int sctp_peeloff(int sd, uint32_t name); }
-472 AUE_NULL STD { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \
+471 AUE_NULL NOSTD { int sctp_peeloff(int sd, uint32_t name); }
+472 AUE_NULL NOSTD { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \
caddr_t to, __socklen_t tolen, \
struct sctp_sndrcvinfo *sinfo, int flags); }
-473 AUE_NULL STD { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \
+473 AUE_NULL NOSTD { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \
caddr_t to, __socklen_t tolen, \
struct sctp_sndrcvinfo *sinfo, int flags); }
-474 AUE_NULL STD { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \
+474 AUE_NULL NOSTD { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \
struct sockaddr * from, __socklen_t *fromlenaddr, \
struct sctp_sndrcvinfo *sinfo, int *msg_flags); }
475 AUE_PREAD STD { ssize_t pread(int fd, void *buf, \
diff --git a/sys/kern/tty.c b/sys/kern/tty.c
index e2a0fa3..373fcc3 100644
--- a/sys/kern/tty.c
+++ b/sys/kern/tty.c
@@ -123,9 +123,10 @@ tty_watermarks(struct tty *tp)
}
static int
-tty_drain(struct tty *tp)
+tty_drain(struct tty *tp, int leaving)
{
- int error;
+ size_t bytesused;
+ int error, revokecnt;
if (ttyhook_hashook(tp, getc_inject))
/* buffer is inaccessible */
@@ -134,11 +135,27 @@ tty_drain(struct tty *tp)
while (ttyoutq_bytesused(&tp->t_outq) > 0) {
ttydevsw_outwakeup(tp);
/* Could be handled synchronously. */
- if (ttyoutq_bytesused(&tp->t_outq) == 0)
+ bytesused = ttyoutq_bytesused(&tp->t_outq);
+ if (bytesused == 0)
return (0);
/* Wait for data to be drained. */
- error = tty_wait(tp, &tp->t_outwait);
+ if (leaving) {
+ revokecnt = tp->t_revokecnt;
+ error = tty_timedwait(tp, &tp->t_outwait, hz);
+ switch (error) {
+ case ERESTART:
+ if (revokecnt != tp->t_revokecnt)
+ error = 0;
+ break;
+ case EWOULDBLOCK:
+ if (ttyoutq_bytesused(&tp->t_outq) < bytesused)
+ error = 0;
+ break;
+ }
+ } else
+ error = tty_wait(tp, &tp->t_outwait);
+
if (error)
return (error);
}
@@ -191,10 +208,8 @@ ttydev_leave(struct tty *tp)
/* Drain any output. */
MPASS((tp->t_flags & TF_STOPPED) == 0);
- if (!tty_gone(tp)) {
- while (tty_drain(tp) == ERESTART)
- ;
- }
+ if (!tty_gone(tp))
+ tty_drain(tp, 1);
ttydisc_close(tp);
@@ -1392,14 +1407,14 @@ tty_timedwait(struct tty *tp, struct cv *cv, int hz)
error = cv_timedwait_sig(cv, tp->t_mtx, hz);
- /* Restart the system call when we may have been revoked. */
- if (tp->t_revokecnt != revokecnt)
- return (ERESTART);
-
/* Bail out when the device slipped away. */
if (tty_gone(tp))
return (ENXIO);
+ /* Restart the system call when we may have been revoked. */
+ if (tp->t_revokecnt != revokecnt)
+ return (ERESTART);
+
return (error);
}
@@ -1528,7 +1543,7 @@ tty_generic_ioctl(struct tty *tp, u_long cmd, void *data, int fflag,
/* Set terminal flags through tcsetattr(). */
if (cmd == TIOCSETAW || cmd == TIOCSETAF) {
- error = tty_drain(tp);
+ error = tty_drain(tp, 0);
if (error)
return (error);
if (cmd == TIOCSETAF)
@@ -1707,7 +1722,7 @@ tty_generic_ioctl(struct tty *tp, u_long cmd, void *data, int fflag,
}
case TIOCDRAIN:
/* Drain TTY output. */
- return tty_drain(tp);
+ return tty_drain(tp, 0);
case TIOCCONS:
/* Set terminal as console TTY. */
if (*(int *)data) {
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index 43dd56b..6d423ba 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -38,7 +38,6 @@ __FBSDID("$FreeBSD$");
#include "opt_capsicum.h"
#include "opt_inet.h"
#include "opt_inet6.h"
-#include "opt_sctp.h"
#include "opt_compat.h"
#include "opt_ktrace.h"
@@ -95,13 +94,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_extern.h>
#include <vm/uma.h>
-#if defined(INET) || defined(INET6)
-#ifdef SCTP
-#include <netinet/sctp.h>
-#include <netinet/sctp_peeloff.h>
-#endif /* SCTP */
-#endif /* INET || INET6 */
-
/*
* Flags for accept1() and kern_accept4(), in addition to SOCK_CLOEXEC
* and SOCK_NONBLOCK.
@@ -198,7 +190,7 @@ SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat, CTLTYPE_OPAQUE | CTLFLAG_RW,
* capability rights are present.
* A reference on the file entry is held upon returning.
*/
-static int
+int
getsock_cap(struct filedesc *fdp, int fd, cap_rights_t *rightsp,
struct file **fpp, u_int *fflagp)
{
@@ -3206,484 +3198,3 @@ out:
return (error);
}
-
-/*
- * SCTP syscalls.
- * Functionality only compiled in if SCTP is defined in the kernel Makefile,
- * otherwise all return EOPNOTSUPP.
- * XXX: We should make this loadable one day.
- */
-int
-sys_sctp_peeloff(td, uap)
- struct thread *td;
- struct sctp_peeloff_args /* {
- int sd;
- caddr_t name;
- } */ *uap;
-{
-#if (defined(INET) || defined(INET6)) && defined(SCTP)
- struct file *nfp = NULL;
- struct socket *head, *so;
- cap_rights_t rights;
- u_int fflag;
- int error, fd;
-
- AUDIT_ARG_FD(uap->sd);
- error = fgetsock(td, uap->sd, cap_rights_init(&rights, CAP_PEELOFF),
- &head, &fflag);
- if (error != 0)
- goto done2;
- if (head->so_proto->pr_protocol != IPPROTO_SCTP) {
- error = EOPNOTSUPP;
- goto done;
- }
- error = sctp_can_peel_off(head, (sctp_assoc_t)uap->name);
- if (error != 0)
- goto done;
- /*
- * At this point we know we do have a assoc to pull
- * we proceed to get the fd setup. This may block
- * but that is ok.
- */
-
- error = falloc(td, &nfp, &fd, 0);
- if (error != 0)
- goto done;
- td->td_retval[0] = fd;
-
- CURVNET_SET(head->so_vnet);
- so = sonewconn(head, SS_ISCONNECTED);
- if (so == NULL) {
- error = ENOMEM;
- goto noconnection;
- }
- /*
- * Before changing the flags on the socket, we have to bump the
- * reference count. Otherwise, if the protocol calls sofree(),
- * the socket will be released due to a zero refcount.
- */
- SOCK_LOCK(so);
- soref(so); /* file descriptor reference */
- SOCK_UNLOCK(so);
-
- ACCEPT_LOCK();
-
- TAILQ_REMOVE(&head->so_comp, so, so_list);
- head->so_qlen--;
- so->so_state |= (head->so_state & SS_NBIO);
- so->so_state &= ~SS_NOFDREF;
- so->so_qstate &= ~SQ_COMP;
- so->so_head = NULL;
- ACCEPT_UNLOCK();
- finit(nfp, fflag, DTYPE_SOCKET, so, &socketops);
- error = sctp_do_peeloff(head, so, (sctp_assoc_t)uap->name);
- if (error != 0)
- goto noconnection;
- if (head->so_sigio != NULL)
- fsetown(fgetown(&head->so_sigio), &so->so_sigio);
-
-noconnection:
- /*
- * close the new descriptor, assuming someone hasn't ripped it
- * out from under us.
- */
- if (error != 0)
- fdclose(td->td_proc->p_fd, nfp, fd, td);
-
- /*
- * Release explicitly held references before returning.
- */
- CURVNET_RESTORE();
-done:
- if (nfp != NULL)
- fdrop(nfp, td);
- fputsock(head);
-done2:
- return (error);
-#else /* SCTP */
- return (EOPNOTSUPP);
-#endif /* SCTP */
-}
-
-int
-sys_sctp_generic_sendmsg (td, uap)
- struct thread *td;
- struct sctp_generic_sendmsg_args /* {
- int sd,
- caddr_t msg,
- int mlen,
- caddr_t to,
- __socklen_t tolen,
- struct sctp_sndrcvinfo *sinfo,
- int flags
- } */ *uap;
-{
-#if (defined(INET) || defined(INET6)) && defined(SCTP)
- struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
- struct socket *so;
- struct file *fp = NULL;
- struct sockaddr *to = NULL;
-#ifdef KTRACE
- struct uio *ktruio = NULL;
-#endif
- struct uio auio;
- struct iovec iov[1];
- cap_rights_t rights;
- int error = 0, len;
-
- if (uap->sinfo != NULL) {
- error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
- if (error != 0)
- return (error);
- u_sinfo = &sinfo;
- }
-
- cap_rights_init(&rights, CAP_SEND);
- if (uap->tolen != 0) {
- error = getsockaddr(&to, uap->to, uap->tolen);
- if (error != 0) {
- to = NULL;
- goto sctp_bad2;
- }
- cap_rights_set(&rights, CAP_CONNECT);
- }
-
- AUDIT_ARG_FD(uap->sd);
- error = getsock_cap(td->td_proc->p_fd, uap->sd, &rights, &fp, NULL);
- if (error != 0)
- goto sctp_bad;
-#ifdef KTRACE
- if (to && (KTRPOINT(td, KTR_STRUCT)))
- ktrsockaddr(to);
-#endif
-
- iov[0].iov_base = uap->msg;
- iov[0].iov_len = uap->mlen;
-
- so = (struct socket *)fp->f_data;
- if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
- error = EOPNOTSUPP;
- goto sctp_bad;
- }
-#ifdef MAC
- error = mac_socket_check_send(td->td_ucred, so);
- if (error != 0)
- goto sctp_bad;
-#endif /* MAC */
-
- auio.uio_iov = iov;
- auio.uio_iovcnt = 1;
- auio.uio_segflg = UIO_USERSPACE;
- auio.uio_rw = UIO_WRITE;
- auio.uio_td = td;
- auio.uio_offset = 0; /* XXX */
- auio.uio_resid = 0;
- len = auio.uio_resid = uap->mlen;
- CURVNET_SET(so->so_vnet);
- error = sctp_lower_sosend(so, to, &auio, (struct mbuf *)NULL,
- (struct mbuf *)NULL, uap->flags, u_sinfo, td);
- CURVNET_RESTORE();
- if (error != 0) {
- if (auio.uio_resid != len && (error == ERESTART ||
- error == EINTR || error == EWOULDBLOCK))
- error = 0;
- /* Generation of SIGPIPE can be controlled per socket. */
- if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
- !(uap->flags & MSG_NOSIGNAL)) {
- PROC_LOCK(td->td_proc);
- tdsignal(td, SIGPIPE);
- PROC_UNLOCK(td->td_proc);
- }
- }
- if (error == 0)
- td->td_retval[0] = len - auio.uio_resid;
-#ifdef KTRACE
- if (ktruio != NULL) {
- ktruio->uio_resid = td->td_retval[0];
- ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
- }
-#endif /* KTRACE */
-sctp_bad:
- if (fp != NULL)
- fdrop(fp, td);
-sctp_bad2:
- free(to, M_SONAME);
- return (error);
-#else /* SCTP */
- return (EOPNOTSUPP);
-#endif /* SCTP */
-}
-
-int
-sys_sctp_generic_sendmsg_iov(td, uap)
- struct thread *td;
- struct sctp_generic_sendmsg_iov_args /* {
- int sd,
- struct iovec *iov,
- int iovlen,
- caddr_t to,
- __socklen_t tolen,
- struct sctp_sndrcvinfo *sinfo,
- int flags
- } */ *uap;
-{
-#if (defined(INET) || defined(INET6)) && defined(SCTP)
- struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
- struct socket *so;
- struct file *fp = NULL;
- struct sockaddr *to = NULL;
-#ifdef KTRACE
- struct uio *ktruio = NULL;
-#endif
- struct uio auio;
- struct iovec *iov, *tiov;
- cap_rights_t rights;
- ssize_t len;
- int error, i;
-
- if (uap->sinfo != NULL) {
- error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
- if (error != 0)
- return (error);
- u_sinfo = &sinfo;
- }
- cap_rights_init(&rights, CAP_SEND);
- if (uap->tolen != 0) {
- error = getsockaddr(&to, uap->to, uap->tolen);
- if (error != 0) {
- to = NULL;
- goto sctp_bad2;
- }
- cap_rights_set(&rights, CAP_CONNECT);
- }
-
- AUDIT_ARG_FD(uap->sd);
- error = getsock_cap(td->td_proc->p_fd, uap->sd, &rights, &fp, NULL);
- if (error != 0)
- goto sctp_bad1;
-
-#ifdef COMPAT_FREEBSD32
- if (SV_CURPROC_FLAG(SV_ILP32))
- error = freebsd32_copyiniov((struct iovec32 *)uap->iov,
- uap->iovlen, &iov, EMSGSIZE);
- else
-#endif
- error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
- if (error != 0)
- goto sctp_bad1;
-#ifdef KTRACE
- if (to && (KTRPOINT(td, KTR_STRUCT)))
- ktrsockaddr(to);
-#endif
-
- so = (struct socket *)fp->f_data;
- if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
- error = EOPNOTSUPP;
- goto sctp_bad;
- }
-#ifdef MAC
- error = mac_socket_check_send(td->td_ucred, so);
- if (error != 0)
- goto sctp_bad;
-#endif /* MAC */
-
- auio.uio_iov = iov;
- auio.uio_iovcnt = uap->iovlen;
- auio.uio_segflg = UIO_USERSPACE;
- auio.uio_rw = UIO_WRITE;
- auio.uio_td = td;
- auio.uio_offset = 0; /* XXX */
- auio.uio_resid = 0;
- tiov = iov;
- for (i = 0; i <uap->iovlen; i++, tiov++) {
- if ((auio.uio_resid += tiov->iov_len) < 0) {
- error = EINVAL;
- goto sctp_bad;
- }
- }
- len = auio.uio_resid;
- CURVNET_SET(so->so_vnet);
- error = sctp_lower_sosend(so, to, &auio,
- (struct mbuf *)NULL, (struct mbuf *)NULL,
- uap->flags, u_sinfo, td);
- CURVNET_RESTORE();
- if (error != 0) {
- if (auio.uio_resid != len && (error == ERESTART ||
- error == EINTR || error == EWOULDBLOCK))
- error = 0;
- /* Generation of SIGPIPE can be controlled per socket */
- if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
- !(uap->flags & MSG_NOSIGNAL)) {
- PROC_LOCK(td->td_proc);
- tdsignal(td, SIGPIPE);
- PROC_UNLOCK(td->td_proc);
- }
- }
- if (error == 0)
- td->td_retval[0] = len - auio.uio_resid;
-#ifdef KTRACE
- if (ktruio != NULL) {
- ktruio->uio_resid = td->td_retval[0];
- ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
- }
-#endif /* KTRACE */
-sctp_bad:
- free(iov, M_IOV);
-sctp_bad1:
- if (fp != NULL)
- fdrop(fp, td);
-sctp_bad2:
- free(to, M_SONAME);
- return (error);
-#else /* SCTP */
- return (EOPNOTSUPP);
-#endif /* SCTP */
-}
-
-int
-sys_sctp_generic_recvmsg(td, uap)
- struct thread *td;
- struct sctp_generic_recvmsg_args /* {
- int sd,
- struct iovec *iov,
- int iovlen,
- struct sockaddr *from,
- __socklen_t *fromlenaddr,
- struct sctp_sndrcvinfo *sinfo,
- int *msg_flags
- } */ *uap;
-{
-#if (defined(INET) || defined(INET6)) && defined(SCTP)
- uint8_t sockbufstore[256];
- struct uio auio;
- struct iovec *iov, *tiov;
- struct sctp_sndrcvinfo sinfo;
- struct socket *so;
- struct file *fp = NULL;
- struct sockaddr *fromsa;
- cap_rights_t rights;
-#ifdef KTRACE
- struct uio *ktruio = NULL;
-#endif
- ssize_t len;
- int error, fromlen, i, msg_flags;
-
- AUDIT_ARG_FD(uap->sd);
- error = getsock_cap(td->td_proc->p_fd, uap->sd,
- cap_rights_init(&rights, CAP_RECV), &fp, NULL);
- if (error != 0)
- return (error);
-#ifdef COMPAT_FREEBSD32
- if (SV_CURPROC_FLAG(SV_ILP32))
- error = freebsd32_copyiniov((struct iovec32 *)uap->iov,
- uap->iovlen, &iov, EMSGSIZE);
- else
-#endif
- error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
- if (error != 0)
- goto out1;
-
- so = fp->f_data;
- if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
- error = EOPNOTSUPP;
- goto out;
- }
-#ifdef MAC
- error = mac_socket_check_receive(td->td_ucred, so);
- if (error != 0)
- goto out;
-#endif /* MAC */
-
- if (uap->fromlenaddr != NULL) {
- error = copyin(uap->fromlenaddr, &fromlen, sizeof (fromlen));
- if (error != 0)
- goto out;
- } else {
- fromlen = 0;
- }
- if (uap->msg_flags) {
- error = copyin(uap->msg_flags, &msg_flags, sizeof (int));
- if (error != 0)
- goto out;
- } else {
- msg_flags = 0;
- }
- auio.uio_iov = iov;
- auio.uio_iovcnt = uap->iovlen;
- auio.uio_segflg = UIO_USERSPACE;
- auio.uio_rw = UIO_READ;
- auio.uio_td = td;
- auio.uio_offset = 0; /* XXX */
- auio.uio_resid = 0;
- tiov = iov;
- for (i = 0; i <uap->iovlen; i++, tiov++) {
- if ((auio.uio_resid += tiov->iov_len) < 0) {
- error = EINVAL;
- goto out;
- }
- }
- len = auio.uio_resid;
- fromsa = (struct sockaddr *)sockbufstore;
-
-#ifdef KTRACE
- if (KTRPOINT(td, KTR_GENIO))
- ktruio = cloneuio(&auio);
-#endif /* KTRACE */
- memset(&sinfo, 0, sizeof(struct sctp_sndrcvinfo));
- CURVNET_SET(so->so_vnet);
- error = sctp_sorecvmsg(so, &auio, (struct mbuf **)NULL,
- fromsa, fromlen, &msg_flags,
- (struct sctp_sndrcvinfo *)&sinfo, 1);
- CURVNET_RESTORE();
- if (error != 0) {
- if (auio.uio_resid != len && (error == ERESTART ||
- error == EINTR || error == EWOULDBLOCK))
- error = 0;
- } else {
- if (uap->sinfo)
- error = copyout(&sinfo, uap->sinfo, sizeof (sinfo));
- }
-#ifdef KTRACE
- if (ktruio != NULL) {
- ktruio->uio_resid = len - auio.uio_resid;
- ktrgenio(uap->sd, UIO_READ, ktruio, error);
- }
-#endif /* KTRACE */
- if (error != 0)
- goto out;
- td->td_retval[0] = len - auio.uio_resid;
-
- if (fromlen && uap->from) {
- len = fromlen;
- if (len <= 0 || fromsa == 0)
- len = 0;
- else {
- len = MIN(len, fromsa->sa_len);
- error = copyout(fromsa, uap->from, (size_t)len);
- if (error != 0)
- goto out;
- }
- error = copyout(&len, uap->fromlenaddr, sizeof (socklen_t));
- if (error != 0)
- goto out;
- }
-#ifdef KTRACE
- if (KTRPOINT(td, KTR_STRUCT))
- ktrsockaddr(fromsa);
-#endif
- if (uap->msg_flags) {
- error = copyout(&msg_flags, uap->msg_flags, sizeof (int));
- if (error != 0)
- goto out;
- }
-out:
- free(iov, M_IOV);
-out1:
- if (fp != NULL)
- fdrop(fp, td);
-
- return (error);
-#else /* SCTP */
- return (EOPNOTSUPP);
-#endif /* SCTP */
-}
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 5a37b0b..19feecd 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -667,6 +667,10 @@ bd_speedup(void)
mtx_unlock(&bdlock);
}
+#ifndef NSWBUF_MIN
+#define NSWBUF_MIN 16
+#endif
+
#ifdef __i386__
#define TRANSIENT_DENOM 5
#else
@@ -778,11 +782,10 @@ kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
* swbufs are used as temporary holders for I/O, such as paging I/O.
* We have no less then 16 and no more then 256.
*/
- nswbuf = max(min(nbuf/4, 256), 16);
-#ifdef NSWBUF_MIN
+ nswbuf = min(nbuf / 4, 256);
+ TUNABLE_INT_FETCH("kern.nswbuf", &nswbuf);
if (nswbuf < NSWBUF_MIN)
nswbuf = NSWBUF_MIN;
-#endif
/*
* Reserve space for the buffer cache buffers
diff --git a/sys/libkern/explicit_bzero.c b/sys/libkern/explicit_bzero.c
new file mode 100644
index 0000000..2468c55
--- /dev/null
+++ b/sys/libkern/explicit_bzero.c
@@ -0,0 +1,24 @@
+/* $OpenBSD: explicit_bzero.c,v 1.3 2014/06/21 02:34:26 matthew Exp $ */
+/*
+ * Public domain.
+ * Written by Matthew Dempsky.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/libkern.h>
+
+__attribute__((weak)) void __explicit_bzero_hook(void *, size_t);
+
+__attribute__((weak)) void
+__explicit_bzero_hook(void *buf, size_t len)
+{
+}
+
+void
+explicit_bzero(void *buf, size_t len)
+{
+ memset(buf, 0, len);
+ __explicit_bzero_hook(buf, len);
+}
diff --git a/sys/mips/include/md_var.h b/sys/mips/include/md_var.h
index 71a78e7..f3778a8 100644
--- a/sys/mips/include/md_var.h
+++ b/sys/mips/include/md_var.h
@@ -79,5 +79,5 @@ void busdma_swi(void);
struct dumperinfo;
void dump_add_page(vm_paddr_t);
void dump_drop_page(vm_paddr_t);
-void minidumpsys(struct dumperinfo *);
+int minidumpsys(struct dumperinfo *);
#endif /* !_MACHINE_MD_VAR_H_ */
diff --git a/sys/mips/mips/dump_machdep.c b/sys/mips/mips/dump_machdep.c
index 26f93ff..fa96e79 100644
--- a/sys/mips/mips/dump_machdep.c
+++ b/sys/mips/mips/dump_machdep.c
@@ -266,10 +266,8 @@ dumpsys(struct dumperinfo *di)
size_t hdrsz;
int error;
- if (do_minidump) {
- minidumpsys(di);
- return (0);
- }
+ if (do_minidump)
+ return (minidumpsys(di));
bzero(&ehdr, sizeof(ehdr));
ehdr.e_ident[EI_MAG0] = ELFMAG0;
diff --git a/sys/mips/mips/minidump_machdep.c b/sys/mips/mips/minidump_machdep.c
index cded3ae..2122e00 100644
--- a/sys/mips/mips/minidump_machdep.c
+++ b/sys/mips/mips/minidump_machdep.c
@@ -153,7 +153,7 @@ write_buffer(struct dumperinfo *di, char *ptr, size_t sz)
return (0);
}
-void
+int
minidumpsys(struct dumperinfo *di)
{
struct minidumphdr mdhdr;
@@ -325,7 +325,7 @@ minidumpsys(struct dumperinfo *di)
/* Signal completion, signoff and exit stage left. */
dump_write(di, NULL, 0, 0, 0);
printf("\nDump complete\n");
- return;
+ return (0);
fail:
if (error < 0)
@@ -337,4 +337,5 @@ fail:
printf("\nDump failed. Partition too small.\n");
else
printf("\n** DUMP FAILED (ERROR %d) **\n", error);
+ return (error);
}
diff --git a/sys/modules/mrsas/Makefile b/sys/modules/mrsas/Makefile
index 7ff6c81..5976b98 100644
--- a/sys/modules/mrsas/Makefile
+++ b/sys/modules/mrsas/Makefile
@@ -1,14 +1,24 @@
+# Makefile for mrsas driver
# $FreeBSD$
-.PATH: ${.CURDIR}/../../dev/mrsas
+KMOD=mrsas
+.PATH: ${.CURDIR}/../../dev/${KMOD}
-KMOD= mrsas
-SRCS= mrsas.c mrsas_cam.c mrsas_ioctl.c mrsas_fp.c
-SRCS+= device_if.h bus_if.h pci_if.h opt_cam.h opt_scsi.h
+.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "amd64"
+SUBDIR+= mrsas_linux
+.endif
+
+SRCS=mrsas.c mrsas_cam.c mrsas_ioctl.c mrsas_fp.c
+SRCS+= device_if.h bus_if.h pci_if.h opt_cam.h opt_scsi.h
#CFLAGS+= -MRSAS_DEBUG
.include <bsd.kmod.mk>
-#CFLAGS+= -fgnu89-inline
+CFLAGS+= -fgnu89-inline
+
+TARGET_ARCH = ${MACHINE_ARCH}
+.if ${TARGET_ARCH} == "amd64"
+CFLAGS+= -DCOMPAT_FREEBSD32 -D_STANDALONE
+.endif
clean_cscope:
rm -f cscope*
diff --git a/sys/modules/mrsas/mrsas_linux/Makefile b/sys/modules/mrsas/mrsas_linux/Makefile
new file mode 100644
index 0000000..3563892
--- /dev/null
+++ b/sys/modules/mrsas/mrsas_linux/Makefile
@@ -0,0 +1,10 @@
+# Makefile for mrsas driver
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/../../../dev/mrsas
+
+KMOD= mrsas_linux
+SRCS= mrsas_linux.c
+SRCS+= device_if.h bus_if.h pci_if.h
+
+.include <bsd.kmod.mk>
diff --git a/sys/net/if_enc.c b/sys/net/if_enc.c
index fc78769..b43b7d2 100644
--- a/sys/net/if_enc.c
+++ b/sys/net/if_enc.c
@@ -230,6 +230,7 @@ ipsec_filter(struct mbuf **mp, int dir, int flags)
{
int error, i;
struct ip *ip;
+ struct ifnet *rcvif;
KASSERT(encif != NULL, ("%s: encif is null", __func__));
KASSERT(flags & (ENC_IN|ENC_OUT),
@@ -268,6 +269,8 @@ ipsec_filter(struct mbuf **mp, int dir, int flags)
}
error = 0;
+ rcvif = (*mp)->m_pkthdr.rcvif;
+ (*mp)->m_pkthdr.rcvif = encif;
ip = mtod(*mp, struct ip *);
switch (ip->ip_v) {
#ifdef INET
@@ -298,6 +301,7 @@ ipsec_filter(struct mbuf **mp, int dir, int flags)
if (error != 0)
goto bad;
+ (*mp)->m_pkthdr.rcvif = rcvif;
return (error);
bad:
diff --git a/sys/net/if_gif.c b/sys/net/if_gif.c
index f4cc3d8..5ae9ed3 100644
--- a/sys/net/if_gif.c
+++ b/sys/net/if_gif.c
@@ -547,6 +547,7 @@ gif_input(struct mbuf *m, int af, struct ifnet *ifp)
}
sc = ifp->if_softc;
m->m_pkthdr.rcvif = ifp;
+ m_clrprotoflags(m);
#ifdef MAC
mac_ifnet_create_mbuf(ifp, m);
diff --git a/sys/netinet/ip_gre.c b/sys/netinet/ip_gre.c
index 897adac..fc87327 100644
--- a/sys/netinet/ip_gre.c
+++ b/sys/netinet/ip_gre.c
@@ -205,7 +205,7 @@ gre_input2(struct mbuf *m ,int hlen, u_char proto)
}
m->m_pkthdr.rcvif = GRE2IFP(sc);
-
+ m_clrprotoflags(m);
netisr_queue(isr, m);
/* Packet is done, no further processing needed. */
diff --git a/sys/netinet/sctp_output.c b/sys/netinet/sctp_output.c
index 8260ba7..7cbb30f 100644
--- a/sys/netinet/sctp_output.c
+++ b/sys/netinet/sctp_output.c
@@ -11846,7 +11846,7 @@ sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
int
sctp_send_str_reset_req(struct sctp_tcb *stcb,
- int number_entries, uint16_t * list,
+ uint16_t number_entries, uint16_t * list,
uint8_t send_out_req,
uint8_t send_in_req,
uint8_t send_tsn_req,
@@ -11879,6 +11879,14 @@ sctp_send_str_reset_req(struct sctp_tcb *stcb,
SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
return (EINVAL);
}
+ if (number_entries > (MCLBYTES -
+ SCTP_MIN_OVERHEAD -
+ sizeof(struct sctp_chunkhdr) -
+ sizeof(struct sctp_stream_reset_out_request)) /
+ sizeof(uint16_t)) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
sctp_alloc_a_chunk(stcb, chk);
if (chk == NULL) {
SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
diff --git a/sys/netinet/sctp_output.h b/sys/netinet/sctp_output.h
index 59af5af..8789df9 100644
--- a/sys/netinet/sctp_output.h
+++ b/sys/netinet/sctp_output.h
@@ -181,8 +181,8 @@ sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *,
uint32_t, uint32_t, uint32_t, uint32_t);
int
-sctp_send_str_reset_req(struct sctp_tcb *, int, uint16_t *, uint8_t, uint8_t,
- uint8_t, uint8_t, uint16_t, uint16_t, uint8_t);
+sctp_send_str_reset_req(struct sctp_tcb *, uint16_t, uint16_t *, uint8_t,
+ uint8_t, uint8_t, uint8_t, uint16_t, uint16_t, uint8_t);
void
sctp_send_abort(struct mbuf *, int, struct sockaddr *, struct sockaddr *,
diff --git a/sys/netinet/sctp_syscalls.c b/sys/netinet/sctp_syscalls.c
new file mode 100644
index 0000000..3d0f549
--- /dev/null
+++ b/sys/netinet/sctp_syscalls.c
@@ -0,0 +1,588 @@
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_capsicum.h"
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_sctp.h"
+#include "opt_compat.h"
+#include "opt_ktrace.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/capsicum.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/sysproto.h>
+#include <sys/malloc.h>
+#include <sys/filedesc.h>
+#include <sys/event.h>
+#include <sys/proc.h>
+#include <sys/fcntl.h>
+#include <sys/file.h>
+#include <sys/filio.h>
+#include <sys/jail.h>
+#include <sys/mount.h>
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/sf_buf.h>
+#include <sys/sysent.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/signalvar.h>
+#include <sys/syscall.h>
+#include <sys/syscallsubr.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+#include <sys/vnode.h>
+#ifdef KTRACE
+#include <sys/ktrace.h>
+#endif
+#ifdef COMPAT_FREEBSD32
+#include <compat/freebsd32/freebsd32_util.h>
+#endif
+
+#include <net/vnet.h>
+
+#include <security/audit/audit.h>
+#include <security/mac/mac_framework.h>
+
+#include <netinet/sctp.h>
+#include <netinet/sctp_peeloff.h>
+
+static struct syscall_helper_data sctp_syscalls[] = {
+ SYSCALL_INIT_HELPER(sctp_peeloff),
+ SYSCALL_INIT_HELPER(sctp_generic_sendmsg),
+ SYSCALL_INIT_HELPER(sctp_generic_sendmsg_iov),
+ SYSCALL_INIT_HELPER(sctp_generic_recvmsg),
+ SYSCALL_INIT_LAST
+};
+
+static void
+sctp_syscalls_init(void *unused __unused)
+{
+ int error;
+
+ error = syscall_helper_register(sctp_syscalls);
+ KASSERT((error == 0),
+ ("%s: syscall_helper_register failed for sctp syscalls", __func__));
+#ifdef COMPAT_FREEBSD32
+ error = syscall32_helper_register(sctp_syscalls);
+ KASSERT((error == 0),
+ ("%s: syscall32_helper_register failed for sctp syscalls",
+ __func__));
+#endif
+}
+SYSINIT(sctp_syscalls, SI_SUB_SYSCALLS, SI_ORDER_ANY, sctp_syscalls_init, NULL);
+
+/*
+ * SCTP syscalls.
+ * Functionality only compiled in if SCTP is defined in the kernel Makefile,
+ * otherwise all return EOPNOTSUPP.
+ * XXX: We should make this loadable one day.
+ */
+int
+sys_sctp_peeloff(td, uap)
+ struct thread *td;
+ struct sctp_peeloff_args /* {
+ int sd;
+ caddr_t name;
+ } */ *uap;
+{
+#if (defined(INET) || defined(INET6)) && defined(SCTP)
+ struct file *nfp = NULL;
+ struct socket *head, *so;
+ cap_rights_t rights;
+ u_int fflag;
+ int error, fd;
+
+ AUDIT_ARG_FD(uap->sd);
+ error = fgetsock(td, uap->sd, cap_rights_init(&rights, CAP_PEELOFF),
+ &head, &fflag);
+ if (error != 0)
+ goto done2;
+ if (head->so_proto->pr_protocol != IPPROTO_SCTP) {
+ error = EOPNOTSUPP;
+ goto done;
+ }
+ error = sctp_can_peel_off(head, (sctp_assoc_t)uap->name);
+ if (error != 0)
+ goto done;
+ /*
+ * At this point we know we do have a assoc to pull
+ * we proceed to get the fd setup. This may block
+ * but that is ok.
+ */
+
+ error = falloc(td, &nfp, &fd, 0);
+ if (error != 0)
+ goto done;
+ td->td_retval[0] = fd;
+
+ CURVNET_SET(head->so_vnet);
+ so = sonewconn(head, SS_ISCONNECTED);
+ if (so == NULL) {
+ error = ENOMEM;
+ goto noconnection;
+ }
+ /*
+ * Before changing the flags on the socket, we have to bump the
+ * reference count. Otherwise, if the protocol calls sofree(),
+ * the socket will be released due to a zero refcount.
+ */
+ SOCK_LOCK(so);
+ soref(so); /* file descriptor reference */
+ SOCK_UNLOCK(so);
+
+ ACCEPT_LOCK();
+
+ TAILQ_REMOVE(&head->so_comp, so, so_list);
+ head->so_qlen--;
+ so->so_state |= (head->so_state & SS_NBIO);
+ so->so_state &= ~SS_NOFDREF;
+ so->so_qstate &= ~SQ_COMP;
+ so->so_head = NULL;
+ ACCEPT_UNLOCK();
+ finit(nfp, fflag, DTYPE_SOCKET, so, &socketops);
+ error = sctp_do_peeloff(head, so, (sctp_assoc_t)uap->name);
+ if (error != 0)
+ goto noconnection;
+ if (head->so_sigio != NULL)
+ fsetown(fgetown(&head->so_sigio), &so->so_sigio);
+
+noconnection:
+ /*
+ * close the new descriptor, assuming someone hasn't ripped it
+ * out from under us.
+ */
+ if (error != 0)
+ fdclose(td->td_proc->p_fd, nfp, fd, td);
+
+ /*
+ * Release explicitly held references before returning.
+ */
+ CURVNET_RESTORE();
+done:
+ if (nfp != NULL)
+ fdrop(nfp, td);
+ fputsock(head);
+done2:
+ return (error);
+#else /* SCTP */
+ return (EOPNOTSUPP);
+#endif /* SCTP */
+}
+
+int
+sys_sctp_generic_sendmsg (td, uap)
+ struct thread *td;
+ struct sctp_generic_sendmsg_args /* {
+ int sd,
+ caddr_t msg,
+ int mlen,
+ caddr_t to,
+ __socklen_t tolen,
+ struct sctp_sndrcvinfo *sinfo,
+ int flags
+ } */ *uap;
+{
+#if (defined(INET) || defined(INET6)) && defined(SCTP)
+ struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
+ struct socket *so;
+ struct file *fp = NULL;
+ struct sockaddr *to = NULL;
+#ifdef KTRACE
+ struct uio *ktruio = NULL;
+#endif
+ struct uio auio;
+ struct iovec iov[1];
+ cap_rights_t rights;
+ int error = 0, len;
+
+ if (uap->sinfo != NULL) {
+ error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
+ if (error != 0)
+ return (error);
+ u_sinfo = &sinfo;
+ }
+
+ cap_rights_init(&rights, CAP_SEND);
+ if (uap->tolen != 0) {
+ error = getsockaddr(&to, uap->to, uap->tolen);
+ if (error != 0) {
+ to = NULL;
+ goto sctp_bad2;
+ }
+ cap_rights_set(&rights, CAP_CONNECT);
+ }
+
+ AUDIT_ARG_FD(uap->sd);
+ error = getsock_cap(td->td_proc->p_fd, uap->sd, &rights, &fp, NULL);
+ if (error != 0)
+ goto sctp_bad;
+#ifdef KTRACE
+ if (to && (KTRPOINT(td, KTR_STRUCT)))
+ ktrsockaddr(to);
+#endif
+
+ iov[0].iov_base = uap->msg;
+ iov[0].iov_len = uap->mlen;
+
+ so = (struct socket *)fp->f_data;
+ if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
+ error = EOPNOTSUPP;
+ goto sctp_bad;
+ }
+#ifdef MAC
+ error = mac_socket_check_send(td->td_ucred, so);
+ if (error != 0)
+ goto sctp_bad;
+#endif /* MAC */
+
+ auio.uio_iov = iov;
+ auio.uio_iovcnt = 1;
+ auio.uio_segflg = UIO_USERSPACE;
+ auio.uio_rw = UIO_WRITE;
+ auio.uio_td = td;
+ auio.uio_offset = 0; /* XXX */
+ auio.uio_resid = 0;
+ len = auio.uio_resid = uap->mlen;
+ CURVNET_SET(so->so_vnet);
+ error = sctp_lower_sosend(so, to, &auio, (struct mbuf *)NULL,
+ (struct mbuf *)NULL, uap->flags, u_sinfo, td);
+ CURVNET_RESTORE();
+ if (error != 0) {
+ if (auio.uio_resid != len && (error == ERESTART ||
+ error == EINTR || error == EWOULDBLOCK))
+ error = 0;
+ /* Generation of SIGPIPE can be controlled per socket. */
+ if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
+ !(uap->flags & MSG_NOSIGNAL)) {
+ PROC_LOCK(td->td_proc);
+ tdsignal(td, SIGPIPE);
+ PROC_UNLOCK(td->td_proc);
+ }
+ }
+ if (error == 0)
+ td->td_retval[0] = len - auio.uio_resid;
+#ifdef KTRACE
+ if (ktruio != NULL) {
+ ktruio->uio_resid = td->td_retval[0];
+ ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
+ }
+#endif /* KTRACE */
+sctp_bad:
+ if (fp != NULL)
+ fdrop(fp, td);
+sctp_bad2:
+ free(to, M_SONAME);
+ return (error);
+#else /* SCTP */
+ return (EOPNOTSUPP);
+#endif /* SCTP */
+}
+
+int
+sys_sctp_generic_sendmsg_iov(td, uap)
+ struct thread *td;
+ struct sctp_generic_sendmsg_iov_args /* {
+ int sd,
+ struct iovec *iov,
+ int iovlen,
+ caddr_t to,
+ __socklen_t tolen,
+ struct sctp_sndrcvinfo *sinfo,
+ int flags
+ } */ *uap;
+{
+#if (defined(INET) || defined(INET6)) && defined(SCTP)
+ struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
+ struct socket *so;
+ struct file *fp = NULL;
+ struct sockaddr *to = NULL;
+#ifdef KTRACE
+ struct uio *ktruio = NULL;
+#endif
+ struct uio auio;
+ struct iovec *iov, *tiov;
+ cap_rights_t rights;
+ ssize_t len;
+ int error, i;
+
+ if (uap->sinfo != NULL) {
+ error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
+ if (error != 0)
+ return (error);
+ u_sinfo = &sinfo;
+ }
+ cap_rights_init(&rights, CAP_SEND);
+ if (uap->tolen != 0) {
+ error = getsockaddr(&to, uap->to, uap->tolen);
+ if (error != 0) {
+ to = NULL;
+ goto sctp_bad2;
+ }
+ cap_rights_set(&rights, CAP_CONNECT);
+ }
+
+ AUDIT_ARG_FD(uap->sd);
+ error = getsock_cap(td->td_proc->p_fd, uap->sd, &rights, &fp, NULL);
+ if (error != 0)
+ goto sctp_bad1;
+
+#ifdef COMPAT_FREEBSD32
+ if (SV_CURPROC_FLAG(SV_ILP32))
+ error = freebsd32_copyiniov((struct iovec32 *)uap->iov,
+ uap->iovlen, &iov, EMSGSIZE);
+ else
+#endif
+ error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
+ if (error != 0)
+ goto sctp_bad1;
+#ifdef KTRACE
+ if (to && (KTRPOINT(td, KTR_STRUCT)))
+ ktrsockaddr(to);
+#endif
+
+ so = (struct socket *)fp->f_data;
+ if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
+ error = EOPNOTSUPP;
+ goto sctp_bad;
+ }
+#ifdef MAC
+ error = mac_socket_check_send(td->td_ucred, so);
+ if (error != 0)
+ goto sctp_bad;
+#endif /* MAC */
+
+ auio.uio_iov = iov;
+ auio.uio_iovcnt = uap->iovlen;
+ auio.uio_segflg = UIO_USERSPACE;
+ auio.uio_rw = UIO_WRITE;
+ auio.uio_td = td;
+ auio.uio_offset = 0; /* XXX */
+ auio.uio_resid = 0;
+ tiov = iov;
+ for (i = 0; i <uap->iovlen; i++, tiov++) {
+ if ((auio.uio_resid += tiov->iov_len) < 0) {
+ error = EINVAL;
+ goto sctp_bad;
+ }
+ }
+ len = auio.uio_resid;
+ CURVNET_SET(so->so_vnet);
+ error = sctp_lower_sosend(so, to, &auio,
+ (struct mbuf *)NULL, (struct mbuf *)NULL,
+ uap->flags, u_sinfo, td);
+ CURVNET_RESTORE();
+ if (error != 0) {
+ if (auio.uio_resid != len && (error == ERESTART ||
+ error == EINTR || error == EWOULDBLOCK))
+ error = 0;
+ /* Generation of SIGPIPE can be controlled per socket */
+ if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
+ !(uap->flags & MSG_NOSIGNAL)) {
+ PROC_LOCK(td->td_proc);
+ tdsignal(td, SIGPIPE);
+ PROC_UNLOCK(td->td_proc);
+ }
+ }
+ if (error == 0)
+ td->td_retval[0] = len - auio.uio_resid;
+#ifdef KTRACE
+ if (ktruio != NULL) {
+ ktruio->uio_resid = td->td_retval[0];
+ ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
+ }
+#endif /* KTRACE */
+sctp_bad:
+ free(iov, M_IOV);
+sctp_bad1:
+ if (fp != NULL)
+ fdrop(fp, td);
+sctp_bad2:
+ free(to, M_SONAME);
+ return (error);
+#else /* SCTP */
+ return (EOPNOTSUPP);
+#endif /* SCTP */
+}
+
+int
+sys_sctp_generic_recvmsg(td, uap)
+ struct thread *td;
+ struct sctp_generic_recvmsg_args /* {
+ int sd,
+ struct iovec *iov,
+ int iovlen,
+ struct sockaddr *from,
+ __socklen_t *fromlenaddr,
+ struct sctp_sndrcvinfo *sinfo,
+ int *msg_flags
+ } */ *uap;
+{
+#if (defined(INET) || defined(INET6)) && defined(SCTP)
+ uint8_t sockbufstore[256];
+ struct uio auio;
+ struct iovec *iov, *tiov;
+ struct sctp_sndrcvinfo sinfo;
+ struct socket *so;
+ struct file *fp = NULL;
+ struct sockaddr *fromsa;
+ cap_rights_t rights;
+#ifdef KTRACE
+ struct uio *ktruio = NULL;
+#endif
+ ssize_t len;
+ int error, fromlen, i, msg_flags;
+
+ AUDIT_ARG_FD(uap->sd);
+ error = getsock_cap(td->td_proc->p_fd, uap->sd,
+ cap_rights_init(&rights, CAP_RECV), &fp, NULL);
+ if (error != 0)
+ return (error);
+#ifdef COMPAT_FREEBSD32
+ if (SV_CURPROC_FLAG(SV_ILP32))
+ error = freebsd32_copyiniov((struct iovec32 *)uap->iov,
+ uap->iovlen, &iov, EMSGSIZE);
+ else
+#endif
+ error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
+ if (error != 0)
+ goto out1;
+
+ so = fp->f_data;
+ if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+#ifdef MAC
+ error = mac_socket_check_receive(td->td_ucred, so);
+ if (error != 0)
+ goto out;
+#endif /* MAC */
+
+ if (uap->fromlenaddr != NULL) {
+ error = copyin(uap->fromlenaddr, &fromlen, sizeof (fromlen));
+ if (error != 0)
+ goto out;
+ } else {
+ fromlen = 0;
+ }
+ if (uap->msg_flags) {
+ error = copyin(uap->msg_flags, &msg_flags, sizeof (int));
+ if (error != 0)
+ goto out;
+ } else {
+ msg_flags = 0;
+ }
+ auio.uio_iov = iov;
+ auio.uio_iovcnt = uap->iovlen;
+ auio.uio_segflg = UIO_USERSPACE;
+ auio.uio_rw = UIO_READ;
+ auio.uio_td = td;
+ auio.uio_offset = 0; /* XXX */
+ auio.uio_resid = 0;
+ tiov = iov;
+ for (i = 0; i <uap->iovlen; i++, tiov++) {
+ if ((auio.uio_resid += tiov->iov_len) < 0) {
+ error = EINVAL;
+ goto out;
+ }
+ }
+ len = auio.uio_resid;
+ fromsa = (struct sockaddr *)sockbufstore;
+
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_GENIO))
+ ktruio = cloneuio(&auio);
+#endif /* KTRACE */
+ memset(&sinfo, 0, sizeof(struct sctp_sndrcvinfo));
+ CURVNET_SET(so->so_vnet);
+ error = sctp_sorecvmsg(so, &auio, (struct mbuf **)NULL,
+ fromsa, fromlen, &msg_flags,
+ (struct sctp_sndrcvinfo *)&sinfo, 1);
+ CURVNET_RESTORE();
+ if (error != 0) {
+ if (auio.uio_resid != len && (error == ERESTART ||
+ error == EINTR || error == EWOULDBLOCK))
+ error = 0;
+ } else {
+ if (uap->sinfo)
+ error = copyout(&sinfo, uap->sinfo, sizeof (sinfo));
+ }
+#ifdef KTRACE
+ if (ktruio != NULL) {
+ ktruio->uio_resid = len - auio.uio_resid;
+ ktrgenio(uap->sd, UIO_READ, ktruio, error);
+ }
+#endif /* KTRACE */
+ if (error != 0)
+ goto out;
+ td->td_retval[0] = len - auio.uio_resid;
+
+ if (fromlen && uap->from) {
+ len = fromlen;
+ if (len <= 0 || fromsa == 0)
+ len = 0;
+ else {
+ len = MIN(len, fromsa->sa_len);
+ error = copyout(fromsa, uap->from, (size_t)len);
+ if (error != 0)
+ goto out;
+ }
+ error = copyout(&len, uap->fromlenaddr, sizeof (socklen_t));
+ if (error != 0)
+ goto out;
+ }
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_STRUCT))
+ ktrsockaddr(fromsa);
+#endif
+ if (uap->msg_flags) {
+ error = copyout(&msg_flags, uap->msg_flags, sizeof (int));
+ if (error != 0)
+ goto out;
+ }
+out:
+ free(iov, M_IOV);
+out1:
+ if (fp != NULL)
+ fdrop(fp, td);
+
+ return (error);
+#else /* SCTP */
+ return (EOPNOTSUPP);
+#endif /* SCTP */
+}
diff --git a/sys/netinet/sctp_usrreq.c b/sys/netinet/sctp_usrreq.c
index e1fa351..e1ad178 100644
--- a/sys/netinet/sctp_usrreq.c
+++ b/sys/netinet/sctp_usrreq.c
@@ -4431,6 +4431,12 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
SCTP_TCB_UNLOCK(stcb);
break;
}
+ if (sizeof(struct sctp_reset_streams) +
+ strrst->srs_number_streams * sizeof(uint16_t) > optsize) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
if (stcb->asoc.stream_reset_outstanding) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
error = EALREADY;
diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c
index 07dd5c3..7db0e50 100644
--- a/sys/netinet/tcp_output.c
+++ b/sys/netinet/tcp_output.c
@@ -675,6 +675,12 @@ just_return:
send:
SOCKBUF_LOCK_ASSERT(&so->so_snd);
+ if (len > 0) {
+ if (len >= tp->t_maxseg)
+ tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
+ else
+ tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
+ }
/*
* Before ESTABLISHED, force sending of initial options
* unless TCP set not to do any options.
@@ -1303,8 +1309,12 @@ send:
*
* NB: Don't set DF on small MTU/MSS to have a safe fallback.
*/
- if (V_path_mtu_discovery && tp->t_maxopd > V_tcp_minmss)
+ if (V_path_mtu_discovery && tp->t_maxopd > V_tcp_minmss) {
ip->ip_off |= htons(IP_DF);
+ tp->t_flags2 |= TF2_PLPMTU_PMTUD;
+ } else {
+ tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
+ }
if (tp->t_state == TCPS_SYN_SENT)
TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
diff --git a/sys/netinet/tcp_timer.c b/sys/netinet/tcp_timer.c
index 1767e1e..62a9b6d 100644
--- a/sys/netinet/tcp_timer.c
+++ b/sys/netinet/tcp_timer.c
@@ -66,6 +66,9 @@ __FBSDID("$FreeBSD$");
#include <netinet/tcp_fsm.h>
#include <netinet/tcp_timer.h>
#include <netinet/tcp_var.h>
+#ifdef INET6
+#include <netinet6/tcp6_var.h>
+#endif
#include <netinet/tcpip.h>
#ifdef TCPDEBUG
#include <netinet/tcp_debug.h>
@@ -127,6 +130,54 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, rexmit_drop_options, CTLFLAG_RW,
&tcp_rexmit_drop_options, 0,
"Drop TCP options from 3rd and later retransmitted SYN");
+static VNET_DEFINE(int, tcp_pmtud_blackhole_detect);
+#define V_tcp_pmtud_blackhole_detect VNET(tcp_pmtud_blackhole_detect)
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection,
+ CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_VNET,
+ &VNET_NAME(tcp_pmtud_blackhole_detect), 0,
+ "Path MTU Discovery Black Hole Detection Enabled");
+
+static VNET_DEFINE(int, tcp_pmtud_blackhole_activated);
+#define V_tcp_pmtud_blackhole_activated \
+ VNET(tcp_pmtud_blackhole_activated)
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_activated,
+ CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_VNET,
+ &VNET_NAME(tcp_pmtud_blackhole_activated), 0,
+ "Path MTU Discovery Black Hole Detection, Activation Count");
+
+static VNET_DEFINE(int, tcp_pmtud_blackhole_activated_min_mss);
+#define V_tcp_pmtud_blackhole_activated_min_mss \
+ VNET(tcp_pmtud_blackhole_activated_min_mss)
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_activated_min_mss,
+ CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_VNET,
+ &VNET_NAME(tcp_pmtud_blackhole_activated_min_mss), 0,
+ "Path MTU Discovery Black Hole Detection, Activation Count at min MSS");
+
+static VNET_DEFINE(int, tcp_pmtud_blackhole_failed);
+#define V_tcp_pmtud_blackhole_failed VNET(tcp_pmtud_blackhole_failed)
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_failed,
+ CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_VNET,
+ &VNET_NAME(tcp_pmtud_blackhole_failed), 0,
+ "Path MTU Discovery Black Hole Detection, Failure Count");
+
+#ifdef INET
+static VNET_DEFINE(int, tcp_pmtud_blackhole_mss) = 1200;
+#define V_tcp_pmtud_blackhole_mss VNET(tcp_pmtud_blackhole_mss)
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss,
+ CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_VNET,
+ &VNET_NAME(tcp_pmtud_blackhole_mss), 0,
+ "Path MTU Discovery Black Hole Detection lowered MSS");
+#endif
+
+#ifdef INET6
+static VNET_DEFINE(int, tcp_v6pmtud_blackhole_mss) = 1220;
+#define V_tcp_v6pmtud_blackhole_mss VNET(tcp_v6pmtud_blackhole_mss)
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, v6pmtud_blackhole_mss,
+ CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_VNET,
+ &VNET_NAME(tcp_v6pmtud_blackhole_mss), 0,
+ "Path MTU Discovery IPv6 Black Hole Detection lowered MSS");
+#endif
+
#ifdef RSS
static int per_cpu_timers = 1;
#else
@@ -539,6 +590,7 @@ tcp_timer_rexmt(void * xtp)
ostate = tp->t_state;
#endif
+
INP_INFO_RLOCK(&V_tcbinfo);
inp = tp->t_inpcb;
/*
@@ -640,6 +692,102 @@ tcp_timer_rexmt(void * xtp)
rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
TCPT_RANGESET(tp->t_rxtcur, rexmt,
tp->t_rttmin, TCPTV_REXMTMAX);
+
+ if (V_tcp_pmtud_blackhole_detect && (tp->t_state == TCPS_ESTABLISHED)) {
+ int optlen;
+#ifdef INET6
+ int isipv6;
+#endif
+
+ if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) ==
+ (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) &&
+ (tp->t_rxtshift <= 2)) {
+ /*
+ * Enter Path MTU Black-hole Detection mechanism:
+ * - Disable Path MTU Discovery (IP "DF" bit).
+ * - Reduce MTU to lower value than what we
+ * negotiated with peer.
+ */
+ /* Record that we may have found a black hole. */
+ tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
+
+ /* Keep track of previous MSS. */
+ optlen = tp->t_maxopd - tp->t_maxseg;
+ tp->t_pmtud_saved_maxopd = tp->t_maxopd;
+
+ /*
+ * Reduce the MSS to blackhole value or to the default
+ * in an attempt to retransmit.
+ */
+#ifdef INET6
+ isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? 1 : 0;
+ if (isipv6 &&
+ tp->t_maxopd > V_tcp_v6pmtud_blackhole_mss) {
+ /* Use the sysctl tuneable blackhole MSS. */
+ tp->t_maxopd = V_tcp_v6pmtud_blackhole_mss;
+ V_tcp_pmtud_blackhole_activated++;
+ } else if (isipv6) {
+ /* Use the default MSS. */
+ tp->t_maxopd = V_tcp_v6mssdflt;
+ /*
+ * Disable Path MTU Discovery when we switch to
+ * minmss.
+ */
+ tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
+ V_tcp_pmtud_blackhole_activated_min_mss++;
+ }
+#endif
+#if defined(INET6) && defined(INET)
+ else
+#endif
+#ifdef INET
+ if (tp->t_maxopd > V_tcp_pmtud_blackhole_mss) {
+ /* Use the sysctl tuneable blackhole MSS. */
+ tp->t_maxopd = V_tcp_pmtud_blackhole_mss;
+ V_tcp_pmtud_blackhole_activated++;
+ } else {
+ /* Use the default MSS. */
+ tp->t_maxopd = V_tcp_mssdflt;
+ /*
+ * Disable Path MTU Discovery when we switch to
+ * minmss.
+ */
+ tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
+ V_tcp_pmtud_blackhole_activated_min_mss++;
+ }
+#endif
+ tp->t_maxseg = tp->t_maxopd - optlen;
+ /*
+ * Reset the slow-start flight size
+ * as it may depend on the new MSS.
+ */
+ if (CC_ALGO(tp)->conn_init != NULL)
+ CC_ALGO(tp)->conn_init(tp->ccv);
+ } else {
+ /*
+ * If further retransmissions are still unsuccessful
+ * with a lowered MTU, maybe this isn't a blackhole and
+ * we restore the previous MSS and blackhole detection
+ * flags.
+ */
+ if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
+ (tp->t_rxtshift > 4)) {
+ tp->t_flags2 |= TF2_PLPMTU_PMTUD;
+ tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
+ optlen = tp->t_maxopd - tp->t_maxseg;
+ tp->t_maxopd = tp->t_pmtud_saved_maxopd;
+ tp->t_maxseg = tp->t_maxopd - optlen;
+ V_tcp_pmtud_blackhole_failed++;
+ /*
+ * Reset the slow-start flight size as it
+ * may depend on the new MSS.
+ */
+ if (CC_ALGO(tp)->conn_init != NULL)
+ CC_ALGO(tp)->conn_init(tp->ccv);
+ }
+ }
+ }
+
/*
* Disable RFC1323 and SACK if we haven't got any response to
* our third SYN to work-around some broken terminal servers
diff --git a/sys/netinet/tcp_var.h b/sys/netinet/tcp_var.h
index ac87f88..9fe6447 100644
--- a/sys/netinet/tcp_var.h
+++ b/sys/netinet/tcp_var.h
@@ -200,6 +200,8 @@ struct tcpcb {
u_int t_keepcnt; /* number of keepalives before close */
u_int t_tsomax; /* TSO total burst length limit in bytes */
+ u_int t_pmtud_saved_maxopd; /* pre-blackhole MSS */
+ u_int t_flags2; /* More tcpcb flags storage */
uint32_t t_ispare[6]; /* 5 UTO, 1 TBD */
uint32_t t_tsomaxsegcount; /* TSO maximum segment count */
@@ -278,6 +280,13 @@ struct tcpcb {
#endif /* TCP_SIGNATURE */
/*
+ * Flags for PLPMTU handling, t_flags2
+ */
+#define TF2_PLPMTU_BLACKHOLE 0x00000001 /* Possible PLPMTUD Black Hole. */
+#define TF2_PLPMTU_PMTUD 0x00000002 /* Allowed to attempt PLPMTUD. */
+#define TF2_PLPMTU_MAXSEGSNT 0x00000004 /* Last seg sent was full seg. */
+
+/*
* Structure to hold TCP options that are only used during segment
* processing (in tcp_input), but not held in the tcpcb.
* It's basically used to reduce the number of parameters
diff --git a/sys/netinet/udp_usrreq.c b/sys/netinet/udp_usrreq.c
index 92edcd1..9c3918b 100644
--- a/sys/netinet/udp_usrreq.c
+++ b/sys/netinet/udp_usrreq.c
@@ -316,9 +316,6 @@ udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
return;
}
- if (n == NULL)
- return;
-
off += sizeof(struct udphdr);
#ifdef IPSEC
@@ -578,8 +575,10 @@ udp_input(struct mbuf **mp, int *offp, int proto)
if (last != NULL) {
struct mbuf *n;
- n = m_copy(m, 0, M_COPYALL);
- udp_append(last, ip, n, iphlen, &udp_in);
+ if ((n = m_copy(m, 0, M_COPYALL)) != NULL) {
+ udp_append(last, ip, n, iphlen,
+ &udp_in);
+ }
INP_RUNLOCK(last);
}
last = inp;
diff --git a/sys/netinet6/udp6_usrreq.c b/sys/netinet6/udp6_usrreq.c
index e36f778..9ee75a0 100644
--- a/sys/netinet6/udp6_usrreq.c
+++ b/sys/netinet6/udp6_usrreq.c
@@ -141,9 +141,18 @@ udp6_append(struct inpcb *inp, struct mbuf *n, int off,
{
struct socket *so;
struct mbuf *opts;
+ struct udpcb *up;
INP_LOCK_ASSERT(inp);
+ /*
+ * Engage the tunneling protocol.
+ */
+ up = intoudpcb(inp);
+ if (up->u_tun_func != NULL) {
+ (*up->u_tun_func)(n, off, inp);
+ return;
+ }
#ifdef IPSEC
/* Check AH/ESP integrity. */
if (ipsec6_in_reject(n, inp)) {
@@ -265,7 +274,7 @@ udp6_input(struct mbuf **mp, int *offp, int proto)
if (uh_sum != 0) {
UDPSTAT_INC(udps_badsum);
- /*goto badunlocked;*/
+ goto badunlocked;
}
/*
@@ -361,20 +370,7 @@ udp6_input(struct mbuf **mp, int *offp, int proto)
if ((n = m_copy(m, 0, M_COPYALL)) != NULL) {
INP_RLOCK(last);
- up = intoudpcb(last);
- if (up->u_tun_func == NULL) {
- udp6_append(last, n, off, &fromsa);
- } else {
- /*
- * Engage the tunneling
- * protocol we will have to
- * leave the info_lock up,
- * since we are hunting
- * through multiple UDP's.
- *
- */
- (*up->u_tun_func)(n, off, last);
- }
+ udp6_append(last, n, off, &fromsa);
INP_RUNLOCK(last);
}
}
@@ -404,16 +400,8 @@ udp6_input(struct mbuf **mp, int *offp, int proto)
}
INP_RLOCK(last);
INP_INFO_RUNLOCK(pcbinfo);
- up = intoudpcb(last);
UDP_PROBE(receive, NULL, last, ip6, last, uh);
- if (up->u_tun_func == NULL) {
- udp6_append(last, m, off, &fromsa);
- } else {
- /*
- * Engage the tunneling protocol.
- */
- (*up->u_tun_func)(m, off, last);
- }
+ udp6_append(last, m, off, &fromsa);
INP_RUNLOCK(last);
return (IPPROTO_DONE);
}
@@ -492,15 +480,7 @@ udp6_input(struct mbuf **mp, int *offp, int proto)
}
}
UDP_PROBE(receive, NULL, inp, ip6, inp, uh);
- if (up->u_tun_func == NULL) {
- udp6_append(inp, m, off, &fromsa);
- } else {
- /*
- * Engage the tunneling protocol.
- */
-
- (*up->u_tun_func)(m, off, inp);
- }
+ udp6_append(inp, m, off, &fromsa);
INP_RUNLOCK(inp);
return (IPPROTO_DONE);
diff --git a/sys/netpfil/ipfw/ip_fw2.c b/sys/netpfil/ipfw/ip_fw2.c
index 8771663..4fb3e3a 100644
--- a/sys/netpfil/ipfw/ip_fw2.c
+++ b/sys/netpfil/ipfw/ip_fw2.c
@@ -1774,12 +1774,10 @@ do { \
break;
case O_TCPOPTS:
- if (proto == IPPROTO_TCP && ulp != NULL) {
+ if (proto == IPPROTO_TCP && offset == 0 && ulp){
PULLUP_LEN(hlen, ulp,
(TCP(ulp)->th_off << 2));
- match = (proto == IPPROTO_TCP &&
- offset == 0 &&
- tcpopts_match(TCP(ulp), cmd));
+ match = tcpopts_match(TCP(ulp), cmd);
}
break;
diff --git a/sys/ofed/drivers/net/mlx4/mlx4.h b/sys/ofed/drivers/net/mlx4/mlx4.h
index 624a61c..47fde25 100644
--- a/sys/ofed/drivers/net/mlx4/mlx4.h
+++ b/sys/ofed/drivers/net/mlx4/mlx4.h
@@ -1234,6 +1234,7 @@ int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
u8 gid[16], u8 port,
int block_mcast_loopback,
enum mlx4_protocol prot, u64 *reg_id);
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/sys/ofed/include/linux/mlx4/device.h b/sys/ofed/include/linux/mlx4/device.h
index a1beedb..c1095a5 100644
--- a/sys/ofed/include/linux/mlx4/device.h
+++ b/sys/ofed/include/linux/mlx4/device.h
@@ -1208,7 +1208,6 @@ int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
-int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
diff --git a/sys/sys/bus.h b/sys/sys/bus.h
index f457905..7a2e56b5 100644
--- a/sys/sys/bus.h
+++ b/sys/sys/bus.h
@@ -331,6 +331,7 @@ struct resource_list *
bus_generic_get_resource_list (device_t, device_t);
void bus_generic_new_pass(device_t dev);
int bus_print_child_header(device_t dev, device_t child);
+int bus_print_child_domain(device_t dev, device_t child);
int bus_print_child_footer(device_t dev, device_t child);
int bus_generic_print_child(device_t dev, device_t child);
int bus_generic_probe(device_t dev);
@@ -364,6 +365,8 @@ int bus_generic_teardown_intr(device_t dev, device_t child,
int bus_generic_write_ivar(device_t dev, device_t child, int which,
uintptr_t value);
+int bus_generic_get_domain(device_t dev, device_t child, int *domain);
+
/*
* Wrapper functions for the BUS_*_RESOURCE methods to make client code
* a little simpler.
diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h
index d3e6ce0..a2411317 100644
--- a/sys/sys/mbuf.h
+++ b/sys/sys/mbuf.h
@@ -248,7 +248,7 @@ struct mbuf {
* Flags preserved when copying m_pkthdr.
*/
#define M_COPYFLAGS \
- (M_PKTHDR|M_EOR|M_RDONLY|M_BCAST|M_MCAST|M_VLANTAG|M_PROMISC| \
+ (M_PKTHDR|M_EOR|M_RDONLY|M_BCAST|M_MCAST|M_PROMISC|M_VLANTAG|M_FLOWID| \
M_PROTOFLAGS)
/*
diff --git a/sys/sys/param.h b/sys/sys/param.h
index 18d92b2..eabc134 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -58,7 +58,7 @@
* in the range 5 to 9.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 1100036 /* Master, propagated to newvers */
+#define __FreeBSD_version 1100037 /* Master, propagated to newvers */
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
diff --git a/sys/sys/socketvar.h b/sys/sys/socketvar.h
index 5c3933b..bfdae0d 100644
--- a/sys/sys/socketvar.h
+++ b/sys/sys/socketvar.h
@@ -43,6 +43,7 @@
#include <sys/sockbuf.h>
#include <sys/sockstate.h>
#ifdef _KERNEL
+#include <sys/caprights.h>
#include <sys/sockopt.h>
#endif
@@ -318,6 +319,8 @@ extern int maxsockets;
extern u_long sb_max;
extern so_gen_t so_gencnt;
+struct file;
+struct filedesc;
struct mbuf;
struct sockaddr;
struct ucred;
@@ -336,6 +339,8 @@ struct uio;
*/
int sockargs(struct mbuf **mp, caddr_t buf, int buflen, int type);
int getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len);
+int getsock_cap(struct filedesc *fdp, int fd, cap_rights_t *rightsp,
+ struct file **fpp, u_int *fflagp);
void soabort(struct socket *so);
int soaccept(struct socket *so, struct sockaddr **nam);
int socheckuid(struct socket *so, uid_t uid);
diff --git a/sys/sys/syscall.h b/sys/sys/syscall.h
index a495008..576b420 100644
--- a/sys/sys/syscall.h
+++ b/sys/sys/syscall.h
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/kern/syscalls.master 263318 2014-03-18 21:32:03Z attilio
+ * created from FreeBSD: head/sys/kern/syscalls.master 272823 2014-10-09 15:16:52Z marcel
*/
#define SYS_syscall 0
diff --git a/sys/sys/syscall.mk b/sys/sys/syscall.mk
index 7095556..47d2046 100644
--- a/sys/sys/syscall.mk
+++ b/sys/sys/syscall.mk
@@ -1,7 +1,7 @@
# FreeBSD system call names.
# DO NOT EDIT-- this file is automatically generated.
# $FreeBSD$
-# created from FreeBSD: head/sys/kern/syscalls.master 263318 2014-03-18 21:32:03Z attilio
+# created from FreeBSD: head/sys/kern/syscalls.master 272823 2014-10-09 15:16:52Z marcel
MIASM = \
syscall.o \
exit.o \
diff --git a/sys/sys/sysproto.h b/sys/sys/sysproto.h
index 5087a4c..3d203ad 100644
--- a/sys/sys/sysproto.h
+++ b/sys/sys/sysproto.h
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: head/sys/kern/syscalls.master 263318 2014-03-18 21:32:03Z attilio
+ * created from FreeBSD: head/sys/kern/syscalls.master 272823 2014-10-09 15:16:52Z marcel
*/
#ifndef _SYS_SYSPROTO_H_
diff --git a/sys/sys/systm.h b/sys/sys/systm.h
index c484b7b..eae7272 100644
--- a/sys/sys/systm.h
+++ b/sys/sys/systm.h
@@ -232,6 +232,7 @@ void hexdump(const void *ptr, int length, const char *hdr, int flags);
#define ovbcopy(f, t, l) bcopy((f), (t), (l))
void bcopy(const void *from, void *to, size_t len) __nonnull(1) __nonnull(2);
void bzero(void *buf, size_t len) __nonnull(1);
+void explicit_bzero(void *, size_t) __nonnull(1);;
void *memcpy(void *to, const void *from, size_t len) __nonnull(1) __nonnull(2);
void *memmove(void *dest, const void *src, size_t n) __nonnull(1) __nonnull(2);
diff --git a/sys/ufs/ufs/dir.h b/sys/ufs/ufs/dir.h
index 65f3ab1..82b9e77 100644
--- a/sys/ufs/ufs/dir.h
+++ b/sys/ufs/ufs/dir.h
@@ -110,7 +110,7 @@ struct direct {
*
*/
#define DIRECTSIZ(namlen) \
- (((uintptr_t)&((struct direct *)0)->d_name + \
+ ((offsetof(struct direct, d_name) + \
((namlen)+1)*sizeof(((struct direct *)0)->d_name[0]) + 3) & ~3)
#if (BYTE_ORDER == LITTLE_ENDIAN)
#define DIRSIZ(oldfmt, dp) \
diff --git a/sys/x86/acpica/srat.c b/sys/x86/acpica/srat.c
index a275220..c4d205b 100644
--- a/sys/x86/acpica/srat.c
+++ b/sys/x86/acpica/srat.c
@@ -62,6 +62,8 @@ int num_mem;
static ACPI_TABLE_SRAT *srat;
static vm_paddr_t srat_physaddr;
+static int vm_domains[VM_PHYSSEG_MAX];
+
static void srat_walk_table(acpi_subtable_handler *handler, void *arg);
/*
@@ -247,7 +249,6 @@ check_phys_avail(void)
static int
renumber_domains(void)
{
- int domains[VM_PHYSSEG_MAX];
int i, j, slot;
/* Enumerate all the domains. */
@@ -255,17 +256,17 @@ renumber_domains(void)
for (i = 0; i < num_mem; i++) {
/* See if this domain is already known. */
for (j = 0; j < vm_ndomains; j++) {
- if (domains[j] >= mem_info[i].domain)
+ if (vm_domains[j] >= mem_info[i].domain)
break;
}
- if (j < vm_ndomains && domains[j] == mem_info[i].domain)
+ if (j < vm_ndomains && vm_domains[j] == mem_info[i].domain)
continue;
/* Insert the new domain at slot 'j'. */
slot = j;
for (j = vm_ndomains; j > slot; j--)
- domains[j] = domains[j - 1];
- domains[slot] = mem_info[i].domain;
+ vm_domains[j] = vm_domains[j - 1];
+ vm_domains[slot] = mem_info[i].domain;
vm_ndomains++;
if (vm_ndomains > MAXMEMDOM) {
vm_ndomains = 1;
@@ -280,15 +281,15 @@ renumber_domains(void)
* If the domain is already the right value, no need
* to renumber.
*/
- if (domains[i] == i)
+ if (vm_domains[i] == i)
continue;
/* Walk the cpu[] and mem_info[] arrays to renumber. */
for (j = 0; j < num_mem; j++)
- if (mem_info[j].domain == domains[i])
+ if (mem_info[j].domain == vm_domains[i])
mem_info[j].domain = i;
for (j = 0; j <= MAX_APIC_ID; j++)
- if (cpus[j].enabled && cpus[j].domain == domains[i])
+ if (cpus[j].enabled && cpus[j].domain == vm_domains[i])
cpus[j].domain = i;
}
KASSERT(vm_ndomains > 0,
@@ -368,4 +369,23 @@ srat_set_cpus(void *dummy)
}
}
SYSINIT(srat_set_cpus, SI_SUB_CPU, SI_ORDER_ANY, srat_set_cpus, NULL);
+
+/*
+ * Map a _PXM value to a VM domain ID.
+ *
+ * Returns the domain ID, or -1 if no domain ID was found.
+ */
+int
+acpi_map_pxm_to_vm_domainid(int pxm)
+{
+ int i;
+
+ for (i = 0; i < vm_ndomains; i++) {
+ if (vm_domains[i] == pxm)
+ return (i);
+ }
+
+ return (-1);
+}
+
#endif /* MAXMEMDOM > 1 */
diff --git a/sys/x86/x86/dump_machdep.c b/sys/x86/x86/dump_machdep.c
index 940c519..4e048bf 100644
--- a/sys/x86/x86/dump_machdep.c
+++ b/sys/x86/x86/dump_machdep.c
@@ -275,10 +275,9 @@ dumpsys(struct dumperinfo *di)
size_t hdrsz;
int error;
- if (do_minidump) {
- minidumpsys(di);
- return (0);
- }
+ if (do_minidump)
+ return (minidumpsys(di));
+
bzero(&ehdr, sizeof(ehdr));
ehdr.e_ident[EI_MAG0] = ELFMAG0;
ehdr.e_ident[EI_MAG1] = ELFMAG1;
diff --git a/sys/x86/x86/local_apic.c b/sys/x86/x86/local_apic.c
index 2aa18f5..e3228ce 100644
--- a/sys/x86/x86/local_apic.c
+++ b/sys/x86/x86/local_apic.c
@@ -1366,7 +1366,7 @@ apic_init(void *dummy __unused)
printf("APIC: Using the %s enumerator.\n",
best_enum->apic_name);
-#ifndef __amd64__
+#ifdef I686_CPU
/*
* To work around an errata, we disable the local APIC on some
* CPUs during early startup. We need to turn the local APIC back
OpenPOWER on IntegriCloud