summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRenato Botelho <renato@netgate.com>2015-12-17 11:13:55 -0200
committerRenato Botelho <renato@netgate.com>2015-12-17 11:13:55 -0200
commit02eb529700650bb0a6247479d6c83d206e0a5985 (patch)
treef2dd3e19d51cd578d33f2c231100e5248a937ad2
parent4e66c4599e6123e69ca54036271685a3dde9a929 (diff)
parent5952006acb1f0577732ac15f481976227292807f (diff)
downloadFreeBSD-src-02eb529700650bb0a6247479d6c83d206e0a5985.zip
FreeBSD-src-02eb529700650bb0a6247479d6c83d206e0a5985.tar.gz
Merge remote-tracking branch 'origin/stable/10' into devel
-rw-r--r--Makefile.inc18
-rw-r--r--UPDATING5
-rw-r--r--bin/cat/Makefile6
-rw-r--r--bin/cat/tests/Makefile18
-rw-r--r--bin/freebsd-version/Makefile2
-rw-r--r--contrib/binutils/gas/config/tc-mips.c15
-rw-r--r--contrib/binutils/gas/config/tc-mips.h4
-rw-r--r--etc/defaults/rc.conf1
-rw-r--r--etc/mtree/BSD.tests.dist2
-rwxr-xr-xetc/rc.d/nfsd6
-rwxr-xr-xetc/rc.d/nfsuserd9
-rw-r--r--lib/Makefile12
-rw-r--r--sbin/geom/class/multipath/geom_multipath.c11
-rw-r--r--sbin/pfctl/pfctl.c1
-rw-r--r--sbin/swapon/swapon.c1
-rw-r--r--share/man/man4/arcmsr.46
-rw-r--r--share/man/man4/pass.4132
-rw-r--r--share/mk/bsd.README8
-rw-r--r--share/mk/bsd.obj.mk3
-rw-r--r--share/mk/bsd.test.mk3
-rw-r--r--share/mk/local.sys.mk13
-rw-r--r--share/mk/netbsd-tests.test.mk12
-rw-r--r--sys/boot/amd64/efi/main.c14
-rw-r--r--sys/boot/efi/include/efiapi.h54
-rw-r--r--sys/cam/ata/ata_da.c25
-rw-r--r--sys/cam/cam_ccb.h3
-rw-r--r--sys/cam/cam_xpt.c11
-rw-r--r--sys/cam/cam_xpt.h4
-rw-r--r--sys/cam/scsi/scsi_da.c29
-rw-r--r--sys/cam/scsi/scsi_pass.c1604
-rw-r--r--sys/cam/scsi/scsi_pass.h8
-rw-r--r--sys/dev/arcmsr/arcmsr.c860
-rw-r--r--sys/dev/arcmsr/arcmsr.h883
-rw-r--r--sys/dev/md/md.c307
-rw-r--r--sys/dev/netmap/ixgbe_netmap.h3
-rw-r--r--sys/fs/nfs/nfs.h18
-rw-r--r--sys/fs/nfs/nfs_commonport.c24
-rw-r--r--sys/fs/nfs/nfs_commonsubs.c730
-rw-r--r--sys/fs/nfs/nfs_var.h2
-rw-r--r--sys/fs/nfs/nfsrvstate.h17
-rw-r--r--sys/fs/nfsserver/nfs_nfsdport.c26
-rw-r--r--sys/geom/geom_disk.c188
-rw-r--r--sys/geom/geom_io.c9
-rw-r--r--sys/i386/conf/XEN3
-rw-r--r--sys/ia64/include/bus.h3
-rw-r--r--sys/kern/subr_bus_dma.c69
-rw-r--r--sys/kern/subr_uio.c54
-rw-r--r--sys/nfs/nfssvc.h1
-rw-r--r--sys/pc98/include/bus.h6
-rw-r--r--sys/sparc64/include/bus.h4
-rw-r--r--sys/sys/bio.h1
-rw-r--r--sys/sys/param.h2
-rw-r--r--sys/sys/uio.h5
-rw-r--r--sys/vm/vm_object.c181
-rw-r--r--tools/build/mk/OptionalObsoleteFiles.inc2
-rw-r--r--tools/build/options/WITHOUT_RCS4
-rwxr-xr-xtools/regression/mac/mac_portacl/misc.sh3
-rw-r--r--usr.bin/bmake/Makefile1
-rw-r--r--usr.bin/mkimg/ebr.c4
-rw-r--r--usr.bin/mkimg/mbr.c4
-rw-r--r--usr.bin/mkimg/scheme.c1
-rw-r--r--usr.bin/mkimg/scheme.h1
-rw-r--r--usr.sbin/Makefile6
-rw-r--r--usr.sbin/camdd/Makefile11
-rw-r--r--usr.sbin/camdd/camdd.8283
-rw-r--r--usr.sbin/camdd/camdd.c3428
-rw-r--r--usr.sbin/nfsuserd/nfsuserd.822
-rw-r--r--usr.sbin/nfsuserd/nfsuserd.c60
68 files changed, 7815 insertions, 1441 deletions
diff --git a/Makefile.inc1 b/Makefile.inc1
index 83c682d..51bd461 100644
--- a/Makefile.inc1
+++ b/Makefile.inc1
@@ -1615,13 +1615,7 @@ _prereq_libs= gnu/lib/libssp/libssp_nonshared gnu/lib/libgcc lib/libcompiler_rt
# all shared libraries for ELF.
#
_startup_libs= gnu/lib/csu
-.if exists(${.CURDIR}/lib/csu/${MACHINE_ARCH}-elf)
-_startup_libs+= lib/csu/${MACHINE_ARCH}-elf
-.elif exists(${.CURDIR}/lib/csu/${MACHINE_ARCH})
-_startup_libs+= lib/csu/${MACHINE_ARCH}
-.else
-_startup_libs+= lib/csu/${MACHINE_CPUARCH}
-.endif
+_startup_libs+= lib/csu
_startup_libs+= gnu/lib/libgcc
_startup_libs+= lib/libcompiler_rt
_startup_libs+= lib/libc
diff --git a/UPDATING b/UPDATING
index 954c303..4daf9c7 100644
--- a/UPDATING
+++ b/UPDATING
@@ -16,6 +16,11 @@ from older versions of FreeBSD, try WITHOUT_CLANG to bootstrap to the tip of
stable/10, and then rebuild without this option. The bootstrap process from
older version of current is a bit fragile.
+20151214:
+ r292223 changed the internal interface between the nfsd.ko and
+ nfscommon.ko modules. As such, they must both be upgraded to-gether.
+ __FreeBSD_version has been bumped because of this.
+
20151113:
Qlogic 24xx/25xx firmware images were updated from 5.5.0 to 7.3.0.
Kernel modules isp_2400_multi and isp_2500_multi were removed and
diff --git a/bin/cat/Makefile b/bin/cat/Makefile
index 672a4ee..3d6a7f0 100644
--- a/bin/cat/Makefile
+++ b/bin/cat/Makefile
@@ -1,6 +1,12 @@
# @(#)Makefile 8.1 (Berkeley) 5/31/93
# $FreeBSD$
+.include <bsd.own.mk>
+
PROG= cat
+.if ${MK_TESTS} != "no"
+SUBDIR+= tests
+.endif
+
.include <bsd.prog.mk>
diff --git a/bin/cat/tests/Makefile b/bin/cat/tests/Makefile
new file mode 100644
index 0000000..73f82e1
--- /dev/null
+++ b/bin/cat/tests/Makefile
@@ -0,0 +1,18 @@
+# $FreeBSD$
+
+OBJTOP= ${.OBJDIR}/../../..
+SRCTOP= ${.CURDIR}/../../..
+TESTSRC= ${SRCTOP}/contrib/netbsd-tests/bin/cat
+
+TESTSDIR= ${TESTSBASE}/bin/cat
+
+NETBSD_ATF_TESTS_SH= cat_test
+
+FILESDIR= ${TESTSDIR}
+
+FILES= d_align.in
+FILES+= d_align.out
+
+.include <netbsd-tests.test.mk>
+
+.include <bsd.test.mk>
diff --git a/bin/freebsd-version/Makefile b/bin/freebsd-version/Makefile
index e515d0c..91945ac 100644
--- a/bin/freebsd-version/Makefile
+++ b/bin/freebsd-version/Makefile
@@ -2,7 +2,7 @@
SCRIPTS = freebsd-version
MAN = freebsd-version.1
-CLEANFILES = freebsd-version.sh
+CLEANFILES = freebsd-version freebsd-version.sh
NEWVERS = ${.CURDIR}/../../sys/conf/newvers.sh
freebsd-version.sh: ${.CURDIR}/freebsd-version.sh.in ${NEWVERS}
diff --git a/contrib/binutils/gas/config/tc-mips.c b/contrib/binutils/gas/config/tc-mips.c
index e97193f..80fdfd0 100644
--- a/contrib/binutils/gas/config/tc-mips.c
+++ b/contrib/binutils/gas/config/tc-mips.c
@@ -15420,21 +15420,6 @@ MIPS options:\n\
-mno-octeon-useun generate MIPS unaligned load/store instructions\n"));
}
-enum dwarf2_format
-mips_dwarf2_format (void)
-{
- if (HAVE_64BIT_SYMBOLS)
- {
-#ifdef TE_IRIX
- return dwarf2_format_64bit_irix;
-#else
- return dwarf2_format_64bit;
-#endif
- }
- else
- return dwarf2_format_32bit;
-}
-
int
mips_dwarf2_addr_size (void)
{
diff --git a/contrib/binutils/gas/config/tc-mips.h b/contrib/binutils/gas/config/tc-mips.h
index 117417c..f246f68 100644
--- a/contrib/binutils/gas/config/tc-mips.h
+++ b/contrib/binutils/gas/config/tc-mips.h
@@ -155,10 +155,6 @@ extern void mips_emit_delays (void);
extern void mips_enable_auto_align (void);
#define md_elf_section_change_hook() mips_enable_auto_align()
-enum dwarf2_format;
-extern enum dwarf2_format mips_dwarf2_format (void);
-#define DWARF2_FORMAT() mips_dwarf2_format ()
-
extern int mips_dwarf2_addr_size (void);
#define DWARF2_ADDR_SIZE(bfd) mips_dwarf2_addr_size ()
diff --git a/etc/defaults/rc.conf b/etc/defaults/rc.conf
index f455bc3..d4e9701 100644
--- a/etc/defaults/rc.conf
+++ b/etc/defaults/rc.conf
@@ -330,6 +330,7 @@ nfs_access_cache="60" # Client cache timeout in seconds
nfs_server_enable="NO" # This host is an NFS server (or NO).
oldnfs_server_enable="NO" # Run the old NFS server (YES/NO).
nfs_server_flags="-u -t" # Flags to nfsd (if enabled).
+nfs_server_managegids="NO" # The NFS server maps gids for AUTH_SYS (or NO).
mountd_enable="NO" # Run mountd (or NO).
mountd_flags="-r" # Flags to mountd (if NFS server enabled).
weak_mountd_authentication="NO" # Allow non-root mount requests to be served.
diff --git a/etc/mtree/BSD.tests.dist b/etc/mtree/BSD.tests.dist
index b73ab33..41e8421 100644
--- a/etc/mtree/BSD.tests.dist
+++ b/etc/mtree/BSD.tests.dist
@@ -6,6 +6,8 @@
/set type=dir uname=root gname=wheel mode=0755
.
bin
+ cat
+ ..
chown
..
date
diff --git a/etc/rc.d/nfsd b/etc/rc.d/nfsd
index e067ad1..64e5a73 100755
--- a/etc/rc.d/nfsd
+++ b/etc/rc.d/nfsd
@@ -46,9 +46,13 @@ nfsd_precmd()
sysctl vfs.nfsd.nfs_privport=0 > /dev/null
fi
+ if checkyesno nfsv4_server_enable || \
+ checkyesno nfs_server_managegids; then
+ force_depend nfsuserd || err 1 "Cannot run nfsuserd"
+ fi
+
if checkyesno nfsv4_server_enable; then
sysctl vfs.nfsd.server_max_nfsvers=4 > /dev/null
- force_depend nfsuserd || err 1 "Cannot run nfsuserd"
else
echo 'NFSv4 is disabled'
sysctl vfs.nfsd.server_max_nfsvers=3 > /dev/null
diff --git a/etc/rc.d/nfsuserd b/etc/rc.d/nfsuserd
index 52246bb..d98d16e 100755
--- a/etc/rc.d/nfsuserd
+++ b/etc/rc.d/nfsuserd
@@ -15,5 +15,14 @@ command="/usr/sbin/${name}"
sig_stop="USR1"
load_rc_config $name
+start_precmd="nfsuserd_precmd"
+
+nfsuserd_precmd()
+{
+ if checkyesno nfs_server_managegids; then
+ rc_flags="-manage-gids ${nfsuserd_flags}"
+ fi
+ return 0
+}
run_rc_command "$1"
diff --git a/lib/Makefile b/lib/Makefile
index 572d374..72d3633 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -8,7 +8,7 @@
# and the main list to avoid needing a SUBDIR_DEPEND line on every library
# naming just these few items.
-SUBDIR_ORDERED= ${_csu} \
+SUBDIR_ORDERED= csu \
.WAIT \
libc \
libc_nonshared \
@@ -142,16 +142,6 @@ SUBDIR_DEPEND_libtacplus= libmd
SUBDIR_DEPEND_libulog= libmd
SUBDIR_DEPEND_libunbound= ${_libldns}
-.if exists(${.CURDIR}/csu/${MACHINE_ARCH}-elf)
-_csu=csu/${MACHINE_ARCH}-elf
-.elif exists(${.CURDIR}/csu/${MACHINE_ARCH})
-_csu=csu/${MACHINE_ARCH}
-.elif exists(${.CURDIR}/csu/${MACHINE_CPUARCH}/Makefile)
-_csu=csu/${MACHINE_CPUARCH}
-.else
-_csu=csu
-.endif
-
# NB: keep these sorted by MK_* knobs
.if ${MK_ATM} != "no"
diff --git a/sbin/geom/class/multipath/geom_multipath.c b/sbin/geom/class/multipath/geom_multipath.c
index cdf35d0..5743911 100644
--- a/sbin/geom/class/multipath/geom_multipath.c
+++ b/sbin/geom/class/multipath/geom_multipath.c
@@ -221,17 +221,15 @@ mp_label(struct gctl_req *req)
/*
* Allocate a sector to write as metadata.
*/
- sector = malloc(secsize);
+ sector = calloc(1, secsize);
if (sector == NULL) {
gctl_error(req, "unable to allocate metadata buffer");
return;
}
- memset(sector, 0, secsize);
rsector = malloc(secsize);
if (rsector == NULL) {
- free(sector);
gctl_error(req, "unable to allocate metadata buffer");
- return;
+ goto done;
}
/*
@@ -246,7 +244,7 @@ mp_label(struct gctl_req *req)
error = g_metadata_store(name, sector, secsize);
if (error != 0) {
gctl_error(req, "cannot store metadata on %s: %s.", name, strerror(error));
- return;
+ goto done;
}
/*
@@ -274,6 +272,9 @@ mp_label(struct gctl_req *req)
name2, name);
}
}
+done:
+ free(rsector);
+ free(sector);
}
diff --git a/sbin/pfctl/pfctl.c b/sbin/pfctl/pfctl.c
index 6fd5020..d606b62b 100644
--- a/sbin/pfctl/pfctl.c
+++ b/sbin/pfctl/pfctl.c
@@ -1873,6 +1873,7 @@ pfctl_set_debug(struct pfctl *pf, char *d)
}
pf->debug_set = 1;
+ level = pf->debug;
if ((pf->opts & PF_OPT_NOACTION) == 0)
if (ioctl(dev, DIOCSETDEBUG, &level))
diff --git a/sbin/swapon/swapon.c b/sbin/swapon/swapon.c
index 033c40a..6351053 100644
--- a/sbin/swapon/swapon.c
+++ b/sbin/swapon/swapon.c
@@ -640,6 +640,7 @@ run_cmd(int *ofd, const char *cmdline, ...)
rv = vasprintf(&cmd, cmdline, ap);
if (rv == -1) {
warn("%s", __func__);
+ va_end(ap);
return (rv);
}
va_end(ap);
diff --git a/share/man/man4/arcmsr.4 b/share/man/man4/arcmsr.4
index 87c7050..5fd9453 100644
--- a/share/man/man4/arcmsr.4
+++ b/share/man/man4/arcmsr.4
@@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd February 27, 2013
+.Dd December 4, 2015
.Dt ARCMSR 4
.Os
.Sh NAME
@@ -94,6 +94,8 @@ ARC-1200
.It
ARC-1201
.It
+ARC-1203
+.It
ARC-1210
.It
ARC-1212
@@ -143,6 +145,8 @@ ARC-1681
ARC-1880
.It
ARC-1882
+.It
+ARC-1883
.El
.Sh FILES
.Bl -tag -width ".Pa /dev/arcmsr?" -compact
diff --git a/share/man/man4/pass.4 b/share/man/man4/pass.4
index 7819ea3..00b9ccd 100644
--- a/share/man/man4/pass.4
+++ b/share/man/man4/pass.4
@@ -27,7 +27,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd October 10, 1998
+.Dd March 17, 2015
.Dt PASS 4
.Os
.Sh NAME
@@ -53,9 +53,13 @@ The
.Nm
driver attaches to every
.Tn SCSI
+and
+.Tn ATA
device found in the system.
Since it attaches to every device, it provides a generic means of accessing
.Tn SCSI
+and
+.Tn ATA
devices, and allows the user to access devices which have no
"standard" peripheral driver associated with them.
.Sh KERNEL CONFIGURATION
@@ -65,10 +69,12 @@ device in the kernel;
.Nm
devices are automatically allocated as
.Tn SCSI
+and
+.Tn ATA
devices are found.
.Sh IOCTLS
-.Bl -tag -width 012345678901234
-.It CAMIOCOMMAND
+.Bl -tag -width 5n
+.It CAMIOCOMMAND union ccb *
This ioctl takes most kinds of CAM CCBs and passes them through to the CAM
transport layer for action.
Note that some CCB types are not allowed
@@ -79,7 +85,7 @@ Some examples of xpt-only CCBs are XPT_SCAN_BUS,
XPT_DEV_MATCH, XPT_RESET_BUS, XPT_SCAN_LUN, XPT_ENG_INQ, and XPT_ENG_EXEC.
These CCB types have various attributes that make it illogical or
impossible to service them through the passthrough interface.
-.It CAMGETPASSTHRU
+.It CAMGETPASSTHRU union ccb *
This ioctl takes an XPT_GDEVLIST CCB, and returns the passthrough device
corresponding to the device in question.
Although this ioctl is available through the
@@ -90,6 +96,109 @@ ioctl.
It is probably more useful to issue this ioctl through the
.Xr xpt 4
device.
+.It CAMIOQUEUE union ccb *
+Queue a CCB to the
+.Xr pass 4
+driver to be executed asynchronously.
+The caller may use
+.Xr select 2 ,
+.Xr poll 2
+or
+.Xr kevent 2
+to receive notification when the CCB has completed.
+.Pp
+This ioctl takes most CAM CCBs, but some CCB types are not allowed through
+the pass device, and must be sent through the
+.Xr xpt 4
+device instead.
+Some examples of xpt-only CCBs are XPT_SCAN_BUS,
+XPT_DEV_MATCH, XPT_RESET_BUS, XPT_SCAN_LUN, XPT_ENG_INQ, and XPT_ENG_EXEC.
+These CCB types have various attributes that make it illogical or
+impossible to service them through the passthrough interface.
+.Pp
+Although the
+.Dv CAMIOQUEUE
+ioctl is not defined to take an argument, it does require a
+pointer to a union ccb.
+It is not defined to take an argument to avoid an extra malloc and copy
+inside the generic
+.Xr ioctl 2
+handler.
+.pp
+The completed CCB will be returned via the
+.Dv CAMIOGET
+ioctl.
+An error will only be returned from the
+.Dv CAMIOQUEUE
+ioctl if there is an error allocating memory for the request or copying
+memory from userland.
+All other errors will be reported as standard CAM CCB status errors.
+Since the CCB is not copied back to the user process from the pass driver
+in the
+.Dv CAMIOQUEUE
+ioctl, the user's passed-in CCB will not be modfied.
+This is the case even with immediate CCBs.
+Instead, the completed CCB must be retrieved via the
+.Dv CAMIOGET
+ioctl and the status examined.
+.Pp
+Multiple CCBs may be queued via the
+.Dv CAMIOQUEUE
+ioctl at any given time, and they may complete in a different order than
+the order that they were submitted.
+The caller must take steps to identify CCBs that are queued and completed.
+The
+.Dv periph_priv
+structure inside struct ccb_hdr is available for userland use with the
+.Dv CAMIOQUEUE
+and
+.Dv CAMIOGET
+ioctls, and will be preserved across calls.
+Also, the periph_links linked list pointers inside struct ccb_hdr are
+available for userland use with the
+.Dv CAMIOQUEUE
+and
+.Dv CAMIOGET
+ioctls and will be preserved across calls.
+.It CAMIOGET union ccb *
+Retrieve completed CAM CCBs queued via the
+.Dv CAMIOQUEUE
+ioctl.
+An error will only be returned from the
+.Dv CAMIOGET
+ioctl if the
+.Xr pass 4
+driver fails to copy data to the user process or if there are no completed
+CCBs available to retrieve.
+If no CCBs are available to retrieve,
+errno will be set to
+.Dv ENOENT .
+.Pp
+All other errors will be reported as standard CAM CCB status errors.
+.Pp
+Although the
+.Dv CAMIOGET
+ioctl is not defined to take an argument, it does require a
+pointer to a union ccb.
+It is not defined to take an argument to avoid an extra malloc and copy
+inside the generic
+.Xr ioctl 2
+handler.
+.Pp
+The pass driver will report via
+.Xr select 2 ,
+.Xr poll 2
+or
+.Xr kevent 2
+when a CCB has completed.
+One CCB may be retrieved per
+.Dv CAMIOGET
+call.
+CCBs may be returned in an order different than the order they were
+submitted.
+So the caller should use the
+.Dv periph_priv
+area inside the CCB header to store pointers to identifying information.
.El
.Sh FILES
.Bl -tag -width /dev/passn -compact
@@ -103,18 +212,21 @@ CAM subsystem.
.Sh DIAGNOSTICS
None.
.Sh SEE ALSO
+.Xr kqueue 2 ,
+.Xr poll 2 ,
+.Xr select 2 ,
.Xr cam 3 ,
.Xr cam 4 ,
.Xr cam_cdbparse 3 ,
+.Xr cd 4 ,
+.Xr ctl 4 ,
+.Xr da 4 ,
+.Xr sa 4 ,
.Xr xpt 4 ,
-.Xr camcontrol 8
+.Xr camcontrol 8 ,
+.Xr camdd 8
.Sh HISTORY
The CAM passthrough driver first appeared in
.Fx 3.0 .
.Sh AUTHORS
.An Kenneth Merry Aq ken@FreeBSD.org
-.Sh BUGS
-It might be nice to have a way to asynchronously send CCBs through the
-passthrough driver.
-This would probably require some sort of read/write
-interface or an asynchronous ioctl interface.
diff --git a/share/mk/bsd.README b/share/mk/bsd.README
index d8a707f..179bcba 100644
--- a/share/mk/bsd.README
+++ b/share/mk/bsd.README
@@ -425,10 +425,16 @@ It has seven targets:
It sets/uses the following variables, among many others:
-TESTDIR Path to the installed tests. Must be a subdirectory of
+TESTSBASE Installation prefix for tests. Defaults to /usr/tests
+
+TESTSDIR Path to the installed tests. Must be a subdirectory of
TESTSBASE and the subpath should match the relative
location of the tests within the src tree.
+ The value of TESTSDIR defaults to
+ ${TESTSBASE}/${RELDIR:H} , e.g. /usr/tests/bin/ls when
+ included from bin/ls/tests .
+
KYUAFILE If 'auto' (the default), generate a Kyuafile out of the
test programs defined in the Makefile. If 'yes', then a
manually-crafted Kyuafile must be supplied with the
diff --git a/share/mk/bsd.obj.mk b/share/mk/bsd.obj.mk
index a5e8e73..3745f1b 100644
--- a/share/mk/bsd.obj.mk
+++ b/share/mk/bsd.obj.mk
@@ -46,10 +46,13 @@ __<bsd.obj.mk>__:
CANONICALOBJDIR:=${MAKEOBJDIRPREFIX}${.CURDIR}
.elif defined(MAKEOBJDIR) && ${MAKEOBJDIR:M/*} != ""
CANONICALOBJDIR:=${MAKEOBJDIR}
+OBJTOP?= ${MAKEOBJDIR}
.else
CANONICALOBJDIR:=/usr/obj${.CURDIR}
.endif
+OBJTOP?= ${.OBJDIR:S,${.CURDIR},,}${SRCTOP}
+
#
# Warn of unorthodox object directory.
#
diff --git a/share/mk/bsd.test.mk b/share/mk/bsd.test.mk
index af12418..6f20d3d 100644
--- a/share/mk/bsd.test.mk
+++ b/share/mk/bsd.test.mk
@@ -10,6 +10,9 @@
__<bsd.test.mk>__:
+# Tests install directory
+TESTSDIR?= ${TESTSBASE}/${RELDIR:H}
+
# List of subdirectories containing tests into which to recurse. This has the
# same semantics as SUBDIR at build-time. However, the directories listed here
# get registered into the run-time test suite definitions so that the test
diff --git a/share/mk/local.sys.mk b/share/mk/local.sys.mk
new file mode 100644
index 0000000..d379e8f
--- /dev/null
+++ b/share/mk/local.sys.mk
@@ -0,0 +1,13 @@
+# $FreeBSD$
+
+.if defined(.PARSEDIR)
+SRCTOP:= ${.PARSEDIR:tA:H:H}
+.else
+SRCTOP:= ${.MAKE.MAKEFILES:M*/local.sys.mk:H:H:H}
+.endif
+
+.if ${.CURDIR} == ${SRCTOP}
+RELDIR = .
+.elif ${.CURDIR:M${SRCTOP}/*}
+RELDIR := ${.CURDIR:S,${SRCTOP}/,,}
+.endif
diff --git a/share/mk/netbsd-tests.test.mk b/share/mk/netbsd-tests.test.mk
index 5c31d77..92a401b 100644
--- a/share/mk/netbsd-tests.test.mk
+++ b/share/mk/netbsd-tests.test.mk
@@ -3,16 +3,12 @@
.if !target(__netbsd_tests.test.mk__)
__netbsd_tests.test.mk__:
-.if !defined(OBJTOP)
-.error "Please define OBJTOP to the absolute path of the top of the object tree"
-.endif
+OBJTOP?= ${.OBJDIR:S/${RELDIR}//}
-.if !defined(SRCTOP)
-.error "Please define SRCTOP to the absolute path of the top of the source tree"
-.endif
+TESTSRC?= ${SRCTOP}/contrib/netbsd-tests/${RELDIR:H}
-.if !defined(TESTSRC)
-.error "Please define TESTSRC to the absolute path of the test sources, e.g. contrib/netbsd-tests/lib/libc/stdio"
+.if !exists(${TESTSRC}/)
+.error "Please define TESTSRC to the absolute path of the test sources, e.g. $${SRCTOP}/contrib/netbsd-tests/lib/libc/stdio"
.endif
.PATH: ${TESTSRC}
diff --git a/sys/boot/amd64/efi/main.c b/sys/boot/amd64/efi/main.c
index eb6e69a..ea4c3e7 100644
--- a/sys/boot/amd64/efi/main.c
+++ b/sys/boot/amd64/efi/main.c
@@ -54,6 +54,10 @@ EFI_GUID imgid = LOADED_IMAGE_PROTOCOL;
EFI_GUID mps = MPS_TABLE_GUID;
EFI_GUID netid = EFI_SIMPLE_NETWORK_PROTOCOL;
EFI_GUID smbios = SMBIOS_TABLE_GUID;
+EFI_GUID dxe = DXE_SERVICES_TABLE_GUID;
+EFI_GUID hoblist = HOB_LIST_TABLE_GUID;
+EFI_GUID memtype = MEMORY_TYPE_INFORMATION_TABLE_GUID;
+EFI_GUID debugimg = DEBUG_IMAGE_INFO_TABLE_GUID;
EFI_STATUS
main(int argc, CHAR16 *argv[])
@@ -274,6 +278,14 @@ command_configuration(int argc, char *argv[])
printf("ACPI 2.0 Table");
else if (!memcmp(guid, &smbios, sizeof(EFI_GUID)))
printf("SMBIOS Table");
+ else if (!memcmp(guid, &dxe, sizeof(EFI_GUID)))
+ printf("DXE Table");
+ else if (!memcmp(guid, &hoblist, sizeof(EFI_GUID)))
+ printf("HOB List Table");
+ else if (!memcmp(guid, &memtype, sizeof(EFI_GUID)))
+ printf("Memory Type Information Table");
+ else if (!memcmp(guid, &debugimg, sizeof(EFI_GUID)))
+ printf("Debug Image Info Table");
else
printf("Unknown Table (%s)", guid_to_string(guid));
printf(" at %p\n", ST->ConfigurationTable[i].VendorTable);
@@ -329,7 +341,7 @@ command_mode(int argc, char *argv[])
}
if (i != 0)
- printf("Choose the mode with \"col <mode number>\"\n");
+ printf("Choose the mode with \"col <mode number>\"\n");
return (CMD_OK);
}
diff --git a/sys/boot/efi/include/efiapi.h b/sys/boot/efi/include/efiapi.h
index 187fe29..c8dbf9e 100644
--- a/sys/boot/efi/include/efiapi.h
+++ b/sys/boot/efi/include/efiapi.h
@@ -88,7 +88,7 @@ EFI_STATUS
IN VOID *Buffer
);
-typedef
+typedef
EFI_STATUS
(EFIAPI *EFI_SET_VIRTUAL_ADDRESS_MAP) (
IN UINTN MemoryMapSize,
@@ -103,7 +103,7 @@ EFI_STATUS
#define EFI_INTERNAL_PTR 0x00000004 // Pointer to internal runtime data
-typedef
+typedef
EFI_STATUS
(EFIAPI *EFI_CONVERT_POINTER) (
IN UINTN DebugDisposition,
@@ -168,7 +168,7 @@ EFI_STATUS
IN EFI_EVENT Event
);
-typedef
+typedef
EFI_STATUS
(EFIAPI *EFI_WAIT_FOR_EVENT) (
IN UINTN NumberOfEvents,
@@ -194,8 +194,8 @@ EFI_STATUS
#define TPL_APPLICATION 4
#define TPL_CALLBACK 8
-#define TPL_NOTIFY 16
-#define TPL_HIGH_LEVEL 31
+#define TPL_NOTIFY 16
+#define TPL_HIGH_LEVEL 31
typedef
EFI_TPL
@@ -324,14 +324,14 @@ EFI_STATUS
// Image Entry prototype
-typedef
+typedef
EFI_STATUS
(EFIAPI *EFI_IMAGE_ENTRY_POINT) (
IN EFI_HANDLE ImageHandle,
IN struct _EFI_SYSTEM_TABLE *SystemTable
);
-typedef
+typedef
EFI_STATUS
(EFIAPI *EFI_IMAGE_LOAD) (
IN BOOLEAN BootPolicy,
@@ -342,7 +342,7 @@ EFI_STATUS
OUT EFI_HANDLE *ImageHandle
);
-typedef
+typedef
EFI_STATUS
(EFIAPI *EFI_IMAGE_START) (
IN EFI_HANDLE ImageHandle,
@@ -359,7 +359,7 @@ EFI_STATUS
IN CHAR16 *ExitData OPTIONAL
);
-typedef
+typedef
EFI_STATUS
(EFIAPI *EFI_IMAGE_UNLOAD) (
IN EFI_HANDLE ImageHandle
@@ -495,7 +495,7 @@ EFI_STATUS
);
typedef
-EFI_STATUS
+EFI_STATUS
(EFIAPI *EFI_REGISTER_PROTOCOL_NOTIFY) (
IN EFI_GUID *Protocol,
IN EFI_EVENT Event,
@@ -539,7 +539,7 @@ EFI_STATUS
);
typedef
-EFI_STATUS
+EFI_STATUS
(EFIAPI *EFI_CONNECT_CONTROLLER) (
IN EFI_HANDLE ControllerHandle,
IN EFI_HANDLE *DriverImageHandle OPTIONAL,
@@ -548,19 +548,19 @@ EFI_STATUS
);
typedef
-EFI_STATUS
+EFI_STATUS
(EFIAPI *EFI_DISCONNECT_CONTROLLER)(
IN EFI_HANDLE ControllerHandle,
IN EFI_HANDLE DriverImageHandle, OPTIONAL
IN EFI_HANDLE ChildHandle OPTIONAL
- );
+ );
-#define EFI_OPEN_PROTOCOL_BY_HANDLE_PROTOCOL 0x00000001
-#define EFI_OPEN_PROTOCOL_GET_PROTOCOL 0x00000002
-#define EFI_OPEN_PROTOCOL_TEST_PROTOCOL 0x00000004
-#define EFI_OPEN_PROTOCOL_BY_CHILD_CONTROLLER 0x00000008
-#define EFI_OPEN_PROTOCOL_BY_DRIVER 0x00000010
-#define EFI_OPEN_PROTOCOL_EXCLUSIVE 0x00000020
+#define EFI_OPEN_PROTOCOL_BY_HANDLE_PROTOCOL 0x00000001
+#define EFI_OPEN_PROTOCOL_GET_PROTOCOL 0x00000002
+#define EFI_OPEN_PROTOCOL_TEST_PROTOCOL 0x00000004
+#define EFI_OPEN_PROTOCOL_BY_CHILD_CONTROLLER 0x00000008
+#define EFI_OPEN_PROTOCOL_BY_DRIVER 0x00000010
+#define EFI_OPEN_PROTOCOL_EXCLUSIVE 0x00000020
typedef
EFI_STATUS
@@ -808,7 +808,7 @@ typedef struct {
//
EFI_PROTOCOLS_PER_HANDLE ProtocolsPerHandle;
EFI_LOCATE_HANDLE_BUFFER LocateHandleBuffer;
- EFI_LOCATE_PROTOCOL LocateProtocol;
+ EFI_LOCATE_PROTOCOL LocateProtocol;
EFI_INSTALL_MULTIPLE_PROTOCOL_INTERFACES InstallMultipleProtocolInterfaces;
EFI_UNINSTALL_MULTIPLE_PROTOCOL_INTERFACES UninstallMultipleProtocolInterfaces;
@@ -846,6 +846,20 @@ typedef struct {
#define SAL_SYSTEM_TABLE_GUID \
{ 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d }
+#define FDT_TABLE_GUID \
+ { 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 }
+
+#define DXE_SERVICES_TABLE_GUID \
+ { 0x5ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9 }
+
+#define HOB_LIST_TABLE_GUID \
+ { 0x7739f24c, 0x93d7, 0x11d4, 0x9a, 0x3a, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d }
+
+#define MEMORY_TYPE_INFORMATION_TABLE_GUID \
+ { 0x4c19049f, 0x4137, 0x4dd3, 0x9c, 0x10, 0x8b, 0x97, 0xa8, 0x3f, 0xfd, 0xfa }
+
+#define DEBUG_IMAGE_INFO_TABLE_GUID \
+ { 0x49152e77, 0x1ada, 0x4764, 0xb7, 0xa2, 0x7a, 0xfe, 0xfe, 0xd9, 0x5e, 0x8b }
typedef struct _EFI_CONFIGURATION_TABLE {
EFI_GUID VendorGuid;
diff --git a/sys/cam/ata/ata_da.c b/sys/cam/ata/ata_da.c
index f88899e..005c684 100644
--- a/sys/cam/ata/ata_da.c
+++ b/sys/cam/ata/ata_da.c
@@ -1573,12 +1573,26 @@ adastart(struct cam_periph *periph, union ccb *start_ccb)
}
switch (bp->bio_cmd) {
case BIO_WRITE:
- softc->flags |= ADA_FLAG_DIRTY;
- /* FALLTHROUGH */
case BIO_READ:
{
uint64_t lba = bp->bio_pblkno;
uint16_t count = bp->bio_bcount / softc->params.secsize;
+ void *data_ptr;
+ int rw_op;
+
+ if (bp->bio_cmd == BIO_WRITE) {
+ softc->flags |= ADA_FLAG_DIRTY;
+ rw_op = CAM_DIR_OUT;
+ } else {
+ rw_op = CAM_DIR_IN;
+ }
+
+ data_ptr = bp->bio_data;
+ if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
+ rw_op |= CAM_DATA_BIO;
+ data_ptr = bp;
+ }
+
#ifdef ADA_TEST_FAILURE
int fail = 0;
@@ -1623,12 +1637,9 @@ adastart(struct cam_periph *periph, union ccb *start_ccb)
cam_fill_ataio(ataio,
ada_retry_count,
adadone,
- (bp->bio_cmd == BIO_READ ? CAM_DIR_IN :
- CAM_DIR_OUT) | ((bp->bio_flags & BIO_UNMAPPED)
- != 0 ? CAM_DATA_BIO : 0),
+ rw_op,
tag_code,
- ((bp->bio_flags & BIO_UNMAPPED) != 0) ? (void *)bp :
- bp->bio_data,
+ data_ptr,
bp->bio_bcount,
ada_default_timeout*1000);
diff --git a/sys/cam/cam_ccb.h b/sys/cam/cam_ccb.h
index 98bb9ea..12d3803 100644
--- a/sys/cam/cam_ccb.h
+++ b/sys/cam/cam_ccb.h
@@ -111,6 +111,9 @@ typedef enum {
typedef enum {
CAM_EXTLUN_VALID = 0x00000001,/* 64bit lun field is valid */
+ CAM_USER_DATA_ADDR = 0x00000002,/* Userspace data pointers */
+ CAM_SG_FORMAT_IOVEC = 0x00000004,/* iovec instead of busdma S/G*/
+ CAM_UNMAPPED_BUF = 0x00000008 /* use unmapped I/O */
} ccb_xflags;
/* XPT Opcodes for xpt_action */
diff --git a/sys/cam/cam_xpt.c b/sys/cam/cam_xpt.c
index ba0863a..6773829 100644
--- a/sys/cam/cam_xpt.c
+++ b/sys/cam/cam_xpt.c
@@ -3337,7 +3337,8 @@ xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
}
void
-xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
+xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
+ u_int32_t priority, u_int32_t flags)
{
CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
@@ -3355,10 +3356,16 @@ xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
ccb_h->target_lun = CAM_TARGET_WILDCARD;
}
ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
- ccb_h->flags = 0;
+ ccb_h->flags = flags;
ccb_h->xflags = 0;
}
+void
+xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
+{
+ xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
+}
+
/* Path manipulation functions */
cam_status
xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
diff --git a/sys/cam/cam_xpt.h b/sys/cam/cam_xpt.h
index 1d983c9..ca7dccc 100644
--- a/sys/cam/cam_xpt.h
+++ b/sys/cam/cam_xpt.h
@@ -70,6 +70,10 @@ void xpt_action_default(union ccb *new_ccb);
union ccb *xpt_alloc_ccb(void);
union ccb *xpt_alloc_ccb_nowait(void);
void xpt_free_ccb(union ccb *free_ccb);
+void xpt_setup_ccb_flags(struct ccb_hdr *ccb_h,
+ struct cam_path *path,
+ u_int32_t priority,
+ u_int32_t flags);
void xpt_setup_ccb(struct ccb_hdr *ccb_h,
struct cam_path *path,
u_int32_t priority);
diff --git a/sys/cam/scsi/scsi_da.c b/sys/cam/scsi/scsi_da.c
index 4e3fe76..1cd687a 100644
--- a/sys/cam/scsi/scsi_da.c
+++ b/sys/cam/scsi/scsi_da.c
@@ -2332,29 +2332,40 @@ skipstate:
switch (bp->bio_cmd) {
case BIO_WRITE:
- softc->flags |= DA_FLAG_DIRTY;
- /* FALLTHROUGH */
case BIO_READ:
+ {
+ void *data_ptr;
+ int rw_op;
+
+ if (bp->bio_cmd == BIO_WRITE) {
+ softc->flags |= DA_FLAG_DIRTY;
+ rw_op = SCSI_RW_WRITE;
+ } else {
+ rw_op = SCSI_RW_READ;
+ }
+
+ data_ptr = bp->bio_data;
+ if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
+ rw_op |= SCSI_RW_BIO;
+ data_ptr = bp;
+ }
+
scsi_read_write(&start_ccb->csio,
/*retries*/da_retry_count,
/*cbfcnp*/dadone,
/*tag_action*/tag_code,
- /*read_op*/(bp->bio_cmd == BIO_READ ?
- SCSI_RW_READ : SCSI_RW_WRITE) |
- ((bp->bio_flags & BIO_UNMAPPED) != 0 ?
- SCSI_RW_BIO : 0),
+ rw_op,
/*byte2*/0,
softc->minimum_cmd_size,
/*lba*/bp->bio_pblkno,
/*block_count*/bp->bio_bcount /
softc->params.secsize,
- /*data_ptr*/ (bp->bio_flags &
- BIO_UNMAPPED) != 0 ? (void *)bp :
- bp->bio_data,
+ data_ptr,
/*dxfer_len*/ bp->bio_bcount,
/*sense_len*/SSD_FULL_SIZE,
da_default_timeout * 1000);
break;
+ }
case BIO_FLUSH:
/*
* BIO_FLUSH doesn't currently communicate
diff --git a/sys/cam/scsi/scsi_pass.c b/sys/cam/scsi/scsi_pass.c
index 174151e..09cda5b 100644
--- a/sys/cam/scsi/scsi_pass.c
+++ b/sys/cam/scsi/scsi_pass.c
@@ -28,27 +28,39 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_kdtrace.h"
+
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
+#include <sys/conf.h>
#include <sys/types.h>
#include <sys/bio.h>
-#include <sys/malloc.h>
-#include <sys/fcntl.h>
-#include <sys/conf.h>
-#include <sys/errno.h>
+#include <sys/bus.h>
#include <sys/devicestat.h>
+#include <sys/errno.h>
+#include <sys/fcntl.h>
+#include <sys/malloc.h>
#include <sys/proc.h>
+#include <sys/poll.h>
+#include <sys/selinfo.h>
+#include <sys/sdt.h>
#include <sys/taskqueue.h>
+#include <vm/uma.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+
+#include <machine/bus.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_periph.h>
#include <cam/cam_queue.h>
+#include <cam/cam_xpt.h>
#include <cam/cam_xpt_periph.h>
#include <cam/cam_debug.h>
-#include <cam/cam_sim.h>
#include <cam/cam_compat.h>
+#include <cam/cam_xpt_periph.h>
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_pass.h>
@@ -57,7 +69,11 @@ typedef enum {
PASS_FLAG_OPEN = 0x01,
PASS_FLAG_LOCKED = 0x02,
PASS_FLAG_INVALID = 0x04,
- PASS_FLAG_INITIAL_PHYSPATH = 0x08
+ PASS_FLAG_INITIAL_PHYSPATH = 0x08,
+ PASS_FLAG_ZONE_INPROG = 0x10,
+ PASS_FLAG_ZONE_VALID = 0x20,
+ PASS_FLAG_UNMAPPED_CAPABLE = 0x40,
+ PASS_FLAG_ABANDONED_REF_SET = 0x80
} pass_flags;
typedef enum {
@@ -65,38 +81,104 @@ typedef enum {
} pass_state;
typedef enum {
- PASS_CCB_BUFFER_IO
+ PASS_CCB_BUFFER_IO,
+ PASS_CCB_QUEUED_IO
} pass_ccb_types;
#define ccb_type ppriv_field0
-#define ccb_bp ppriv_ptr1
+#define ccb_ioreq ppriv_ptr1
-struct pass_softc {
- pass_state state;
- pass_flags flags;
- u_int8_t pd_type;
- union ccb saved_ccb;
- int open_count;
- u_int maxio;
- struct devstat *device_stats;
- struct cdev *dev;
- struct cdev *alias_dev;
- struct task add_physpath_task;
+/*
+ * The maximum number of memory segments we preallocate.
+ */
+#define PASS_MAX_SEGS 16
+
+typedef enum {
+ PASS_IO_NONE = 0x00,
+ PASS_IO_USER_SEG_MALLOC = 0x01,
+ PASS_IO_KERN_SEG_MALLOC = 0x02,
+ PASS_IO_ABANDONED = 0x04
+} pass_io_flags;
+
+struct pass_io_req {
+ union ccb ccb;
+ union ccb *alloced_ccb;
+ union ccb *user_ccb_ptr;
+ camq_entry user_periph_links;
+ ccb_ppriv_area user_periph_priv;
+ struct cam_periph_map_info mapinfo;
+ pass_io_flags flags;
+ ccb_flags data_flags;
+ int num_user_segs;
+ bus_dma_segment_t user_segs[PASS_MAX_SEGS];
+ int num_kern_segs;
+ bus_dma_segment_t kern_segs[PASS_MAX_SEGS];
+ bus_dma_segment_t *user_segptr;
+ bus_dma_segment_t *kern_segptr;
+ int num_bufs;
+ uint32_t dirs[CAM_PERIPH_MAXMAPS];
+ uint32_t lengths[CAM_PERIPH_MAXMAPS];
+ uint8_t *user_bufs[CAM_PERIPH_MAXMAPS];
+ uint8_t *kern_bufs[CAM_PERIPH_MAXMAPS];
+ struct bintime start_time;
+ TAILQ_ENTRY(pass_io_req) links;
};
+struct pass_softc {
+ pass_state state;
+ pass_flags flags;
+ u_int8_t pd_type;
+ union ccb saved_ccb;
+ int open_count;
+ u_int maxio;
+ struct devstat *device_stats;
+ struct cdev *dev;
+ struct cdev *alias_dev;
+ struct task add_physpath_task;
+ struct task shutdown_kqueue_task;
+ struct selinfo read_select;
+ TAILQ_HEAD(, pass_io_req) incoming_queue;
+ TAILQ_HEAD(, pass_io_req) active_queue;
+ TAILQ_HEAD(, pass_io_req) abandoned_queue;
+ TAILQ_HEAD(, pass_io_req) done_queue;
+ struct cam_periph *periph;
+ char zone_name[12];
+ char io_zone_name[12];
+ uma_zone_t pass_zone;
+ uma_zone_t pass_io_zone;
+ size_t io_zone_size;
+};
static d_open_t passopen;
static d_close_t passclose;
static d_ioctl_t passioctl;
static d_ioctl_t passdoioctl;
+static d_poll_t passpoll;
+static d_kqfilter_t passkqfilter;
+static void passreadfiltdetach(struct knote *kn);
+static int passreadfilt(struct knote *kn, long hint);
static periph_init_t passinit;
static periph_ctor_t passregister;
static periph_oninv_t passoninvalidate;
static periph_dtor_t passcleanup;
-static void pass_add_physpath(void *context, int pending);
+static periph_start_t passstart;
+static void pass_shutdown_kqueue(void *context, int pending);
+static void pass_add_physpath(void *context, int pending);
static void passasync(void *callback_arg, u_int32_t code,
struct cam_path *path, void *arg);
+static void passdone(struct cam_periph *periph,
+ union ccb *done_ccb);
+static int passcreatezone(struct cam_periph *periph);
+static void passiocleanup(struct pass_softc *softc,
+ struct pass_io_req *io_req);
+static int passcopysglist(struct cam_periph *periph,
+ struct pass_io_req *io_req,
+ ccb_flags direction);
+static int passmemsetup(struct cam_periph *periph,
+ struct pass_io_req *io_req);
+static int passmemdone(struct cam_periph *periph,
+ struct pass_io_req *io_req);
static int passerror(union ccb *ccb, u_int32_t cam_flags,
u_int32_t sense_flags);
static int passsendccb(struct cam_periph *periph, union ccb *ccb,
@@ -116,9 +198,19 @@ static struct cdevsw pass_cdevsw = {
.d_open = passopen,
.d_close = passclose,
.d_ioctl = passioctl,
+ .d_poll = passpoll,
+ .d_kqfilter = passkqfilter,
.d_name = "pass",
};
+static struct filterops passread_filtops = {
+ .f_isfd = 1,
+ .f_detach = passreadfiltdetach,
+ .f_event = passreadfilt
+};
+
+static MALLOC_DEFINE(M_SCSIPASS, "scsi_pass", "scsi passthrough buffers");
+
static void
passinit(void)
{
@@ -138,6 +230,60 @@ passinit(void)
}
static void
+passrejectios(struct cam_periph *periph)
+{
+ struct pass_io_req *io_req, *io_req2;
+ struct pass_softc *softc;
+
+ softc = (struct pass_softc *)periph->softc;
+
+ /*
+ * The user can no longer get status for I/O on the done queue, so
+ * clean up all outstanding I/O on the done queue.
+ */
+ TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) {
+ TAILQ_REMOVE(&softc->done_queue, io_req, links);
+ passiocleanup(softc, io_req);
+ uma_zfree(softc->pass_zone, io_req);
+ }
+
+ /*
+ * The underlying device is gone, so we can't issue these I/Os.
+ * The devfs node has been shut down, so we can't return status to
+ * the user. Free any I/O left on the incoming queue.
+ */
+ TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links, io_req2) {
+ TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
+ passiocleanup(softc, io_req);
+ uma_zfree(softc->pass_zone, io_req);
+ }
+
+ /*
+ * Normally we would put I/Os on the abandoned queue and acquire a
+ * reference when we saw the final close. But, the device went
+ * away and devfs may have moved everything off to deadfs by the
+ * time the I/O done callback is called; as a result, we won't see
+ * any more closes. So, if we have any active I/Os, we need to put
+ * them on the abandoned queue. When the abandoned queue is empty,
+ * we'll release the remaining reference (see below) to the peripheral.
+ */
+ TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links, io_req2) {
+ TAILQ_REMOVE(&softc->active_queue, io_req, links);
+ io_req->flags |= PASS_IO_ABANDONED;
+ TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req, links);
+ }
+
+ /*
+ * If we put any I/O on the abandoned queue, acquire a reference.
+ */
+ if ((!TAILQ_EMPTY(&softc->abandoned_queue))
+ && ((softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0)) {
+ cam_periph_doacquire(periph);
+ softc->flags |= PASS_FLAG_ABANDONED_REF_SET;
+ }
+}
+
+static void
passdevgonecb(void *arg)
{
struct cam_periph *periph;
@@ -165,17 +311,26 @@ passdevgonecb(void *arg)
/*
* Release the reference held for the device node, it is gone now.
+ * Accordingly, inform all queued I/Os of their fate.
*/
cam_periph_release_locked(periph);
+ passrejectios(periph);
/*
- * We reference the lock directly here, instead of using
+ * We reference the SIM lock directly here, instead of using
* cam_periph_unlock(). The reason is that the final call to
* cam_periph_release_locked() above could result in the periph
* getting freed. If that is the case, dereferencing the periph
* with a cam_periph_unlock() call would cause a page fault.
*/
mtx_unlock(mtx);
+
+ /*
+ * We have to remove our kqueue context from a thread because it
+ * may sleep. It would be nice if we could get a callback from
+ * kqueue when it is done cleaning up resources.
+ */
+ taskqueue_enqueue(taskqueue_thread, &softc->shutdown_kqueue_task);
}
static void
@@ -197,12 +352,6 @@ passoninvalidate(struct cam_periph *periph)
* when it has cleaned up its state.
*/
destroy_dev_sched_cb(softc->dev, passdevgonecb, periph);
-
- /*
- * XXX Return all queued I/O with ENXIO.
- * XXX Handle any transactions queued to the card
- * with XPT_ABORT_CCB.
- */
}
static void
@@ -212,9 +361,40 @@ passcleanup(struct cam_periph *periph)
softc = (struct pass_softc *)periph->softc;
+ cam_periph_assert(periph, MA_OWNED);
+ KASSERT(TAILQ_EMPTY(&softc->active_queue),
+ ("%s called when there are commands on the active queue!\n",
+ __func__));
+ KASSERT(TAILQ_EMPTY(&softc->abandoned_queue),
+ ("%s called when there are commands on the abandoned queue!\n",
+ __func__));
+ KASSERT(TAILQ_EMPTY(&softc->incoming_queue),
+ ("%s called when there are commands on the incoming queue!\n",
+ __func__));
+ KASSERT(TAILQ_EMPTY(&softc->done_queue),
+ ("%s called when there are commands on the done queue!\n",
+ __func__));
+
devstat_remove_entry(softc->device_stats);
cam_periph_unlock(periph);
+
+ /*
+ * We call taskqueue_drain() for the physpath task to make sure it
+ * is complete. We drop the lock because this can potentially
+ * sleep. XXX KDM that is bad. Need a way to get a callback when
+ * a taskqueue is drained.
+ *
+ * Note that we don't drain the kqueue shutdown task queue. This
+ * is because we hold a reference on the periph for kqueue, and
+ * release that reference from the kqueue shutdown task queue. So
+ * we cannot come into this routine unless we've released that
+ * reference. Also, because that could be the last reference, we
+ * could be called from the cam_periph_release() call in
+ * pass_shutdown_kqueue(). In that case, the taskqueue_drain()
+ * would deadlock. It would be preferable if we had a way to
+ * get a callback when a taskqueue is done.
+ */
taskqueue_drain(taskqueue_thread, &softc->add_physpath_task);
cam_periph_lock(periph);
@@ -223,10 +403,29 @@ passcleanup(struct cam_periph *periph)
}
static void
+pass_shutdown_kqueue(void *context, int pending)
+{
+ struct cam_periph *periph;
+ struct pass_softc *softc;
+
+ periph = context;
+ softc = periph->softc;
+
+ knlist_clear(&softc->read_select.si_note, /*is_locked*/ 0);
+ knlist_destroy(&softc->read_select.si_note);
+
+ /*
+ * Release the reference we held for kqueue.
+ */
+ cam_periph_release(periph);
+}
+
+static void
pass_add_physpath(void *context, int pending)
{
struct cam_periph *periph;
struct pass_softc *softc;
+ struct mtx *mtx;
char *physpath;
/*
@@ -236,34 +435,38 @@ pass_add_physpath(void *context, int pending)
periph = context;
softc = periph->softc;
physpath = malloc(MAXPATHLEN, M_DEVBUF, M_WAITOK);
- cam_periph_lock(periph);
- if (periph->flags & CAM_PERIPH_INVALID) {
- cam_periph_unlock(periph);
+ mtx = cam_periph_mtx(periph);
+ mtx_lock(mtx);
+
+ if (periph->flags & CAM_PERIPH_INVALID)
goto out;
- }
+
if (xpt_getattr(physpath, MAXPATHLEN,
"GEOM::physpath", periph->path) == 0
&& strlen(physpath) != 0) {
- cam_periph_unlock(periph);
+ mtx_unlock(mtx);
make_dev_physpath_alias(MAKEDEV_WAITOK, &softc->alias_dev,
softc->dev, softc->alias_dev, physpath);
- cam_periph_lock(periph);
+ mtx_lock(mtx);
}
+out:
/*
* Now that we've made our alias, we no longer have to have a
* reference to the device.
*/
- if ((softc->flags & PASS_FLAG_INITIAL_PHYSPATH) == 0) {
+ if ((softc->flags & PASS_FLAG_INITIAL_PHYSPATH) == 0)
softc->flags |= PASS_FLAG_INITIAL_PHYSPATH;
- cam_periph_unlock(periph);
- dev_rel(softc->dev);
- }
- else
- cam_periph_unlock(periph);
-out:
+ /*
+ * We always acquire a reference to the periph before queueing this
+ * task queue function, so it won't go away before we run.
+ */
+ while (pending-- > 0)
+ cam_periph_release_locked(periph);
+ mtx_unlock(mtx);
+
free(physpath, M_DEVBUF);
}
@@ -291,7 +494,7 @@ passasync(void *callback_arg, u_int32_t code,
* process.
*/
status = cam_periph_alloc(passregister, passoninvalidate,
- passcleanup, NULL, "pass",
+ passcleanup, passstart, "pass",
CAM_PERIPH_BIO, path,
passasync, AC_FOUND_DEVICE, cgd);
@@ -315,8 +518,19 @@ passasync(void *callback_arg, u_int32_t code,
buftype = (uintptr_t)arg;
if (buftype == CDAI_TYPE_PHYS_PATH) {
struct pass_softc *softc;
+ cam_status status;
softc = (struct pass_softc *)periph->softc;
+ /*
+ * Acquire a reference to the periph before we
+ * start the taskqueue, so that we don't run into
+ * a situation where the periph goes away before
+ * the task queue has a chance to run.
+ */
+ status = cam_periph_acquire(periph);
+ if (status != CAM_REQ_CMP)
+ break;
+
taskqueue_enqueue(taskqueue_thread,
&softc->add_physpath_task);
}
@@ -361,6 +575,17 @@ passregister(struct cam_periph *periph, void *arg)
softc->pd_type = T_DIRECT;
periph->softc = softc;
+ softc->periph = periph;
+ TAILQ_INIT(&softc->incoming_queue);
+ TAILQ_INIT(&softc->active_queue);
+ TAILQ_INIT(&softc->abandoned_queue);
+ TAILQ_INIT(&softc->done_queue);
+ snprintf(softc->zone_name, sizeof(softc->zone_name), "%s%d",
+ periph->periph_name, periph->unit_number);
+ snprintf(softc->io_zone_name, sizeof(softc->io_zone_name), "%s%dIO",
+ periph->periph_name, periph->unit_number);
+ softc->io_zone_size = MAXPHYS;
+ knlist_init_mtx(&softc->read_select.si_note, cam_periph_mtx(periph));
bzero(&cpi, sizeof(cpi));
xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
@@ -374,6 +599,9 @@ passregister(struct cam_periph *periph, void *arg)
else
softc->maxio = cpi.maxio; /* real value */
+ if (cpi.hba_misc & PIM_UNMAPPED)
+ softc->flags |= PASS_FLAG_UNMAPPED_CAPABLE;
+
/*
* We pass in 0 for a blocksize, since we don't
* know what the blocksize of this device is, if
@@ -391,6 +619,23 @@ passregister(struct cam_periph *periph, void *arg)
DEVSTAT_PRIORITY_PASS);
/*
+ * Initialize the taskqueue handler for shutting down kqueue.
+ */
+ TASK_INIT(&softc->shutdown_kqueue_task, /*priority*/ 0,
+ pass_shutdown_kqueue, periph);
+
+ /*
+ * Acquire a reference to the periph that we can release once we've
+ * cleaned up the kqueue.
+ */
+ if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
+ xpt_print(periph->path, "%s: lost periph during "
+ "registration!\n", __func__);
+ cam_periph_lock(periph);
+ return (CAM_REQ_CMP_ERR);
+ }
+
+ /*
* Acquire a reference to the periph before we create the devfs
* instance for it. We'll release this reference once the devfs
* instance has been freed.
@@ -408,12 +653,15 @@ passregister(struct cam_periph *periph, void *arg)
periph->periph_name, periph->unit_number);
/*
- * Now that we have made the devfs instance, hold a reference to it
- * until the task queue has run to setup the physical path alias.
- * That way devfs won't get rid of the device before we add our
- * alias.
+ * Hold a reference to the periph before we create the physical
+ * path alias so it can't go away.
*/
- dev_ref(softc->dev);
+ if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
+ xpt_print(periph->path, "%s: lost periph during "
+ "registration!\n", __func__);
+ cam_periph_lock(periph);
+ return (CAM_REQ_CMP_ERR);
+ }
cam_periph_lock(periph);
softc->dev->si_drv1 = periph;
@@ -514,6 +762,55 @@ passclose(struct cdev *dev, int flag, int fmt, struct thread *td)
softc = periph->softc;
softc->open_count--;
+ if (softc->open_count == 0) {
+ struct pass_io_req *io_req, *io_req2;
+ int need_unlock;
+
+ need_unlock = 0;
+
+ TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) {
+ TAILQ_REMOVE(&softc->done_queue, io_req, links);
+ passiocleanup(softc, io_req);
+ uma_zfree(softc->pass_zone, io_req);
+ }
+
+ TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links,
+ io_req2) {
+ TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
+ passiocleanup(softc, io_req);
+ uma_zfree(softc->pass_zone, io_req);
+ }
+
+ /*
+ * If there are any active I/Os, we need to forcibly acquire a
+ * reference to the peripheral so that we don't go away
+ * before they complete. We'll release the reference when
+ * the abandoned queue is empty.
+ */
+ io_req = TAILQ_FIRST(&softc->active_queue);
+ if ((io_req != NULL)
+ && (softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0) {
+ cam_periph_doacquire(periph);
+ softc->flags |= PASS_FLAG_ABANDONED_REF_SET;
+ }
+
+ /*
+ * Since the I/O in the active queue is not under our
+ * control, just set a flag so that we can clean it up when
+ * it completes and put it on the abandoned queue. This
+ * will prevent our sending spurious completions in the
+ * event that the device is opened again before these I/Os
+ * complete.
+ */
+ TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links,
+ io_req2) {
+ TAILQ_REMOVE(&softc->active_queue, io_req, links);
+ io_req->flags |= PASS_IO_ABANDONED;
+ TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req,
+ links);
+ }
+ }
+
cam_periph_release_locked(periph);
/*
@@ -533,6 +830,915 @@ passclose(struct cdev *dev, int flag, int fmt, struct thread *td)
return (0);
}
+
+static void
+passstart(struct cam_periph *periph, union ccb *start_ccb)
+{
+ struct pass_softc *softc;
+
+ softc = (struct pass_softc *)periph->softc;
+
+ switch (softc->state) {
+ case PASS_STATE_NORMAL: {
+ struct pass_io_req *io_req;
+
+ /*
+ * Check for any queued I/O requests that require an
+ * allocated slot.
+ */
+ io_req = TAILQ_FIRST(&softc->incoming_queue);
+ if (io_req == NULL) {
+ xpt_release_ccb(start_ccb);
+ break;
+ }
+ TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
+ TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links);
+ /*
+ * Merge the user's CCB into the allocated CCB.
+ */
+ xpt_merge_ccb(start_ccb, &io_req->ccb);
+ start_ccb->ccb_h.ccb_type = PASS_CCB_QUEUED_IO;
+ start_ccb->ccb_h.ccb_ioreq = io_req;
+ start_ccb->ccb_h.cbfcnp = passdone;
+ io_req->alloced_ccb = start_ccb;
+ binuptime(&io_req->start_time);
+ devstat_start_transaction(softc->device_stats,
+ &io_req->start_time);
+
+ xpt_action(start_ccb);
+
+ /*
+ * If we have any more I/O waiting, schedule ourselves again.
+ */
+ if (!TAILQ_EMPTY(&softc->incoming_queue))
+ xpt_schedule(periph, CAM_PRIORITY_NORMAL);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static void
+passdone(struct cam_periph *periph, union ccb *done_ccb)
+{
+ struct pass_softc *softc;
+ struct ccb_scsiio *csio;
+
+ softc = (struct pass_softc *)periph->softc;
+
+ cam_periph_assert(periph, MA_OWNED);
+
+ csio = &done_ccb->csio;
+ switch (csio->ccb_h.ccb_type) {
+ case PASS_CCB_QUEUED_IO: {
+ struct pass_io_req *io_req;
+
+ io_req = done_ccb->ccb_h.ccb_ioreq;
+#if 0
+ xpt_print(periph->path, "%s: called for user CCB %p\n",
+ __func__, io_req->user_ccb_ptr);
+#endif
+ if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
+ && (done_ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER)
+ && ((io_req->flags & PASS_IO_ABANDONED) == 0)) {
+ int error;
+
+ error = passerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA | SF_NO_PRINT);
+
+ if (error == ERESTART) {
+ /*
+ * A retry was scheduled, so
+ * just return.
+ */
+ return;
+ }
+ }
+
+ /*
+ * Copy the allocated CCB contents back to the malloced CCB
+ * so we can give status back to the user when he requests it.
+ */
+ bcopy(done_ccb, &io_req->ccb, sizeof(*done_ccb));
+
+ /*
+ * Log data/transaction completion with devstat(9).
+ */
+ switch (done_ccb->ccb_h.func_code) {
+ case XPT_SCSI_IO:
+ devstat_end_transaction(softc->device_stats,
+ done_ccb->csio.dxfer_len - done_ccb->csio.resid,
+ done_ccb->csio.tag_action & 0x3,
+ ((done_ccb->ccb_h.flags & CAM_DIR_MASK) ==
+ CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
+ (done_ccb->ccb_h.flags & CAM_DIR_OUT) ?
+ DEVSTAT_WRITE : DEVSTAT_READ, NULL,
+ &io_req->start_time);
+ break;
+ case XPT_ATA_IO:
+ devstat_end_transaction(softc->device_stats,
+ done_ccb->ataio.dxfer_len - done_ccb->ataio.resid,
+ done_ccb->ataio.tag_action & 0x3,
+ ((done_ccb->ccb_h.flags & CAM_DIR_MASK) ==
+ CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
+ (done_ccb->ccb_h.flags & CAM_DIR_OUT) ?
+ DEVSTAT_WRITE : DEVSTAT_READ, NULL,
+ &io_req->start_time);
+ break;
+ case XPT_SMP_IO:
+ /*
+ * XXX KDM this isn't quite right, but there isn't
+ * currently an easy way to represent a bidirectional
+ * transfer in devstat. The only way to do it
+ * and have the byte counts come out right would
+ * mean that we would have to record two
+ * transactions, one for the request and one for the
+ * response. For now, so that we report something,
+ * just treat the entire thing as a read.
+ */
+ devstat_end_transaction(softc->device_stats,
+ done_ccb->smpio.smp_request_len +
+ done_ccb->smpio.smp_response_len,
+ DEVSTAT_TAG_SIMPLE, DEVSTAT_READ, NULL,
+ &io_req->start_time);
+ break;
+ default:
+ devstat_end_transaction(softc->device_stats, 0,
+ DEVSTAT_TAG_NONE, DEVSTAT_NO_DATA, NULL,
+ &io_req->start_time);
+ break;
+ }
+
+ /*
+ * In the normal case, take the completed I/O off of the
+ * active queue and put it on the done queue. Notitfy the
+ * user that we have a completed I/O.
+ */
+ if ((io_req->flags & PASS_IO_ABANDONED) == 0) {
+ TAILQ_REMOVE(&softc->active_queue, io_req, links);
+ TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links);
+ selwakeuppri(&softc->read_select, PRIBIO);
+ KNOTE_LOCKED(&softc->read_select.si_note, 0);
+ } else {
+ /*
+ * In the case of an abandoned I/O (final close
+ * without fetching the I/O), take it off of the
+ * abandoned queue and free it.
+ */
+ TAILQ_REMOVE(&softc->abandoned_queue, io_req, links);
+ passiocleanup(softc, io_req);
+ uma_zfree(softc->pass_zone, io_req);
+
+ /*
+ * Release the done_ccb here, since we may wind up
+ * freeing the peripheral when we decrement the
+ * reference count below.
+ */
+ xpt_release_ccb(done_ccb);
+
+ /*
+ * If the abandoned queue is empty, we can release
+ * our reference to the periph since we won't have
+ * any more completions coming.
+ */
+ if ((TAILQ_EMPTY(&softc->abandoned_queue))
+ && (softc->flags & PASS_FLAG_ABANDONED_REF_SET)) {
+ softc->flags &= ~PASS_FLAG_ABANDONED_REF_SET;
+ cam_periph_release_locked(periph);
+ }
+
+ /*
+ * We have already released the CCB, so we can
+ * return.
+ */
+ return;
+ }
+ break;
+ }
+ }
+ xpt_release_ccb(done_ccb);
+}
+
+static int
+passcreatezone(struct cam_periph *periph)
+{
+ struct pass_softc *softc;
+ int error;
+
+ error = 0;
+ softc = (struct pass_softc *)periph->softc;
+
+ cam_periph_assert(periph, MA_OWNED);
+ KASSERT(((softc->flags & PASS_FLAG_ZONE_VALID) == 0),
+ ("%s called when the pass(4) zone is valid!\n", __func__));
+ KASSERT((softc->pass_zone == NULL),
+ ("%s called when the pass(4) zone is allocated!\n", __func__));
+
+ if ((softc->flags & PASS_FLAG_ZONE_INPROG) == 0) {
+
+ /*
+ * We're the first context through, so we need to create
+ * the pass(4) UMA zone for I/O requests.
+ */
+ softc->flags |= PASS_FLAG_ZONE_INPROG;
+
+ /*
+ * uma_zcreate() does a blocking (M_WAITOK) allocation,
+ * so we cannot hold a mutex while we call it.
+ */
+ cam_periph_unlock(periph);
+
+ softc->pass_zone = uma_zcreate(softc->zone_name,
+ sizeof(struct pass_io_req), NULL, NULL, NULL, NULL,
+ /*align*/ 0, /*flags*/ 0);
+
+ softc->pass_io_zone = uma_zcreate(softc->io_zone_name,
+ softc->io_zone_size, NULL, NULL, NULL, NULL,
+ /*align*/ 0, /*flags*/ 0);
+
+ cam_periph_lock(periph);
+
+ if ((softc->pass_zone == NULL)
+ || (softc->pass_io_zone == NULL)) {
+ if (softc->pass_zone == NULL)
+ xpt_print(periph->path, "unable to allocate "
+ "IO Req UMA zone\n");
+ else
+ xpt_print(periph->path, "unable to allocate "
+ "IO UMA zone\n");
+ softc->flags &= ~PASS_FLAG_ZONE_INPROG;
+ goto bailout;
+ }
+
+ /*
+ * Set the flags appropriately and notify any other waiters.
+ */
+ softc->flags &= PASS_FLAG_ZONE_INPROG;
+ softc->flags |= PASS_FLAG_ZONE_VALID;
+ wakeup(&softc->pass_zone);
+ } else {
+ /*
+ * In this case, the UMA zone has not yet been created, but
+ * another context is in the process of creating it. We
+ * need to sleep until the creation is either done or has
+ * failed.
+ */
+ while ((softc->flags & PASS_FLAG_ZONE_INPROG)
+ && ((softc->flags & PASS_FLAG_ZONE_VALID) == 0)) {
+ error = msleep(&softc->pass_zone,
+ cam_periph_mtx(periph), PRIBIO,
+ "paszon", 0);
+ if (error != 0)
+ goto bailout;
+ }
+ /*
+ * If the zone creation failed, no luck for the user.
+ */
+ if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0){
+ error = ENOMEM;
+ goto bailout;
+ }
+ }
+bailout:
+ return (error);
+}
+
+static void
+passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req)
+{
+ union ccb *ccb;
+ u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
+ int i, numbufs;
+
+ ccb = &io_req->ccb;
+
+ switch (ccb->ccb_h.func_code) {
+ case XPT_DEV_MATCH:
+ numbufs = min(io_req->num_bufs, 2);
+
+ if (numbufs == 1) {
+ data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
+ } else {
+ data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
+ data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
+ }
+ break;
+ case XPT_SCSI_IO:
+ case XPT_CONT_TARGET_IO:
+ data_ptrs[0] = &ccb->csio.data_ptr;
+ numbufs = min(io_req->num_bufs, 1);
+ break;
+ case XPT_ATA_IO:
+ data_ptrs[0] = &ccb->ataio.data_ptr;
+ numbufs = min(io_req->num_bufs, 1);
+ break;
+ case XPT_SMP_IO:
+ numbufs = min(io_req->num_bufs, 2);
+ data_ptrs[0] = &ccb->smpio.smp_request;
+ data_ptrs[1] = &ccb->smpio.smp_response;
+ break;
+ case XPT_DEV_ADVINFO:
+ numbufs = min(io_req->num_bufs, 1);
+ data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
+ break;
+ default:
+ /* allow ourselves to be swapped once again */
+ return;
+ break; /* NOTREACHED */
+ }
+
+ if (io_req->flags & PASS_IO_USER_SEG_MALLOC) {
+ free(io_req->user_segptr, M_SCSIPASS);
+ io_req->user_segptr = NULL;
+ }
+
+ /*
+ * We only want to free memory we malloced.
+ */
+ if (io_req->data_flags == CAM_DATA_VADDR) {
+ for (i = 0; i < io_req->num_bufs; i++) {
+ if (io_req->kern_bufs[i] == NULL)
+ continue;
+
+ free(io_req->kern_bufs[i], M_SCSIPASS);
+ io_req->kern_bufs[i] = NULL;
+ }
+ } else if (io_req->data_flags == CAM_DATA_SG) {
+ for (i = 0; i < io_req->num_kern_segs; i++) {
+ if ((uint8_t *)(uintptr_t)
+ io_req->kern_segptr[i].ds_addr == NULL)
+ continue;
+
+ uma_zfree(softc->pass_io_zone, (uint8_t *)(uintptr_t)
+ io_req->kern_segptr[i].ds_addr);
+ io_req->kern_segptr[i].ds_addr = 0;
+ }
+ }
+
+ if (io_req->flags & PASS_IO_KERN_SEG_MALLOC) {
+ free(io_req->kern_segptr, M_SCSIPASS);
+ io_req->kern_segptr = NULL;
+ }
+
+ if (io_req->data_flags != CAM_DATA_PADDR) {
+ for (i = 0; i < numbufs; i++) {
+ /*
+ * Restore the user's buffer pointers to their
+ * previous values.
+ */
+ if (io_req->user_bufs[i] != NULL)
+ *data_ptrs[i] = io_req->user_bufs[i];
+ }
+ }
+
+}
+
+static int
+passcopysglist(struct cam_periph *periph, struct pass_io_req *io_req,
+ ccb_flags direction)
+{
+ bus_size_t kern_watermark, user_watermark, len_copied, len_to_copy;
+ bus_dma_segment_t *user_sglist, *kern_sglist;
+ int i, j, error;
+
+ error = 0;
+ kern_watermark = 0;
+ user_watermark = 0;
+ len_to_copy = 0;
+ len_copied = 0;
+ user_sglist = io_req->user_segptr;
+ kern_sglist = io_req->kern_segptr;
+
+ for (i = 0, j = 0; i < io_req->num_user_segs &&
+ j < io_req->num_kern_segs;) {
+ uint8_t *user_ptr, *kern_ptr;
+
+ len_to_copy = min(user_sglist[i].ds_len -user_watermark,
+ kern_sglist[j].ds_len - kern_watermark);
+
+ user_ptr = (uint8_t *)(uintptr_t)user_sglist[i].ds_addr;
+ user_ptr = user_ptr + user_watermark;
+ kern_ptr = (uint8_t *)(uintptr_t)kern_sglist[j].ds_addr;
+ kern_ptr = kern_ptr + kern_watermark;
+
+ user_watermark += len_to_copy;
+ kern_watermark += len_to_copy;
+
+ if (!useracc(user_ptr, len_to_copy,
+ (direction == CAM_DIR_IN) ? VM_PROT_WRITE : VM_PROT_READ)) {
+ xpt_print(periph->path, "%s: unable to access user "
+ "S/G list element %p len %zu\n", __func__,
+ user_ptr, len_to_copy);
+ error = EFAULT;
+ goto bailout;
+ }
+
+ if (direction == CAM_DIR_IN) {
+ error = copyout(kern_ptr, user_ptr, len_to_copy);
+ if (error != 0) {
+ xpt_print(periph->path, "%s: copyout of %u "
+ "bytes from %p to %p failed with "
+ "error %d\n", __func__, len_to_copy,
+ kern_ptr, user_ptr, error);
+ goto bailout;
+ }
+ } else {
+ error = copyin(user_ptr, kern_ptr, len_to_copy);
+ if (error != 0) {
+ xpt_print(periph->path, "%s: copyin of %u "
+ "bytes from %p to %p failed with "
+ "error %d\n", __func__, len_to_copy,
+ user_ptr, kern_ptr, error);
+ goto bailout;
+ }
+ }
+
+ len_copied += len_to_copy;
+
+ if (user_sglist[i].ds_len == user_watermark) {
+ i++;
+ user_watermark = 0;
+ }
+
+ if (kern_sglist[j].ds_len == kern_watermark) {
+ j++;
+ kern_watermark = 0;
+ }
+ }
+
+bailout:
+
+ return (error);
+}
+
+static int
+passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req)
+{
+ union ccb *ccb;
+ struct pass_softc *softc;
+ int numbufs, i;
+ uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
+ uint32_t lengths[CAM_PERIPH_MAXMAPS];
+ uint32_t dirs[CAM_PERIPH_MAXMAPS];
+ uint32_t num_segs;
+ uint16_t *seg_cnt_ptr;
+ size_t maxmap;
+ int error;
+
+ cam_periph_assert(periph, MA_NOTOWNED);
+
+ softc = periph->softc;
+
+ error = 0;
+ ccb = &io_req->ccb;
+ maxmap = 0;
+ num_segs = 0;
+ seg_cnt_ptr = NULL;
+
+ switch(ccb->ccb_h.func_code) {
+ case XPT_DEV_MATCH:
+ if (ccb->cdm.match_buf_len == 0) {
+ printf("%s: invalid match buffer length 0\n", __func__);
+ return(EINVAL);
+ }
+ if (ccb->cdm.pattern_buf_len > 0) {
+ data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
+ lengths[0] = ccb->cdm.pattern_buf_len;
+ dirs[0] = CAM_DIR_OUT;
+ data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
+ lengths[1] = ccb->cdm.match_buf_len;
+ dirs[1] = CAM_DIR_IN;
+ numbufs = 2;
+ } else {
+ data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
+ lengths[0] = ccb->cdm.match_buf_len;
+ dirs[0] = CAM_DIR_IN;
+ numbufs = 1;
+ }
+ io_req->data_flags = CAM_DATA_VADDR;
+ break;
+ case XPT_SCSI_IO:
+ case XPT_CONT_TARGET_IO:
+ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
+ return(0);
+
+ /*
+ * The user shouldn't be able to supply a bio.
+ */
+ if ((ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
+ return (EINVAL);
+
+ io_req->data_flags = ccb->ccb_h.flags & CAM_DATA_MASK;
+
+ data_ptrs[0] = &ccb->csio.data_ptr;
+ lengths[0] = ccb->csio.dxfer_len;
+ dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
+ num_segs = ccb->csio.sglist_cnt;
+ seg_cnt_ptr = &ccb->csio.sglist_cnt;
+ numbufs = 1;
+ maxmap = softc->maxio;
+ break;
+ case XPT_ATA_IO:
+ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
+ return(0);
+
+ /*
+ * We only support a single virtual address for ATA I/O.
+ */
+ if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
+ return (EINVAL);
+
+ io_req->data_flags = CAM_DATA_VADDR;
+
+ data_ptrs[0] = &ccb->ataio.data_ptr;
+ lengths[0] = ccb->ataio.dxfer_len;
+ dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
+ numbufs = 1;
+ maxmap = softc->maxio;
+ break;
+ case XPT_SMP_IO:
+ io_req->data_flags = CAM_DATA_VADDR;
+
+ data_ptrs[0] = &ccb->smpio.smp_request;
+ lengths[0] = ccb->smpio.smp_request_len;
+ dirs[0] = CAM_DIR_OUT;
+ data_ptrs[1] = &ccb->smpio.smp_response;
+ lengths[1] = ccb->smpio.smp_response_len;
+ dirs[1] = CAM_DIR_IN;
+ numbufs = 2;
+ maxmap = softc->maxio;
+ break;
+ case XPT_DEV_ADVINFO:
+ if (ccb->cdai.bufsiz == 0)
+ return (0);
+
+ io_req->data_flags = CAM_DATA_VADDR;
+
+ data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
+ lengths[0] = ccb->cdai.bufsiz;
+ dirs[0] = CAM_DIR_IN;
+ numbufs = 1;
+ break;
+ default:
+ return(EINVAL);
+ break; /* NOTREACHED */
+ }
+
+ io_req->num_bufs = numbufs;
+
+ /*
+ * If there is a maximum, check to make sure that the user's
+ * request fits within the limit. In general, we should only have
+ * a maximum length for requests that go to hardware. Otherwise it
+ * is whatever we're able to malloc.
+ */
+ for (i = 0; i < numbufs; i++) {
+ io_req->user_bufs[i] = *data_ptrs[i];
+ io_req->dirs[i] = dirs[i];
+ io_req->lengths[i] = lengths[i];
+
+ if (maxmap == 0)
+ continue;
+
+ if (lengths[i] <= maxmap)
+ continue;
+
+ xpt_print(periph->path, "%s: data length %u > max allowed %u "
+ "bytes\n", __func__, lengths[i], maxmap);
+ error = EINVAL;
+ goto bailout;
+ }
+
+ switch (io_req->data_flags) {
+ case CAM_DATA_VADDR:
+ /* Map or copy the buffer into kernel address space */
+ for (i = 0; i < numbufs; i++) {
+ uint8_t *tmp_buf;
+
+ /*
+ * If for some reason no length is specified, we
+ * don't need to allocate anything.
+ */
+ if (io_req->lengths[i] == 0)
+ continue;
+
+ /*
+ * Make sure that the user's buffer is accessible
+ * to that process.
+ */
+ if (!useracc(io_req->user_bufs[i], io_req->lengths[i],
+ (io_req->dirs[i] == CAM_DIR_IN) ? VM_PROT_WRITE :
+ VM_PROT_READ)) {
+ xpt_print(periph->path, "%s: user address %p "
+ "length %u is not accessible\n", __func__,
+ io_req->user_bufs[i], io_req->lengths[i]);
+ error = EFAULT;
+ goto bailout;
+ }
+
+ tmp_buf = malloc(lengths[i], M_SCSIPASS,
+ M_WAITOK | M_ZERO);
+ io_req->kern_bufs[i] = tmp_buf;
+ *data_ptrs[i] = tmp_buf;
+
+#if 0
+ xpt_print(periph->path, "%s: malloced %p len %u, user "
+ "buffer %p, operation: %s\n", __func__,
+ tmp_buf, lengths[i], io_req->user_bufs[i],
+ (dirs[i] == CAM_DIR_IN) ? "read" : "write");
+#endif
+ /*
+ * We only need to copy in if the user is writing.
+ */
+ if (dirs[i] != CAM_DIR_OUT)
+ continue;
+
+ error = copyin(io_req->user_bufs[i],
+ io_req->kern_bufs[i], lengths[i]);
+ if (error != 0) {
+ xpt_print(periph->path, "%s: copy of user "
+ "buffer from %p to %p failed with "
+ "error %d\n", __func__,
+ io_req->user_bufs[i],
+ io_req->kern_bufs[i], error);
+ goto bailout;
+ }
+ }
+ break;
+ case CAM_DATA_PADDR:
+ /* Pass down the pointer as-is */
+ break;
+ case CAM_DATA_SG: {
+ size_t sg_length, size_to_go, alloc_size;
+ uint32_t num_segs_needed;
+
+ /*
+ * Copy the user S/G list in, and then copy in the
+ * individual segments.
+ */
+ /*
+ * We shouldn't see this, but check just in case.
+ */
+ if (numbufs != 1) {
+ xpt_print(periph->path, "%s: cannot currently handle "
+ "more than one S/G list per CCB\n", __func__);
+ error = EINVAL;
+ goto bailout;
+ }
+
+ /*
+ * We have to have at least one segment.
+ */
+ if (num_segs == 0) {
+ xpt_print(periph->path, "%s: CAM_DATA_SG flag set, "
+ "but sglist_cnt=0!\n", __func__);
+ error = EINVAL;
+ goto bailout;
+ }
+
+ /*
+ * Make sure the user specified the total length and didn't
+ * just leave it to us to decode the S/G list.
+ */
+ if (lengths[0] == 0) {
+ xpt_print(periph->path, "%s: no dxfer_len specified, "
+ "but CAM_DATA_SG flag is set!\n", __func__);
+ error = EINVAL;
+ goto bailout;
+ }
+
+ /*
+ * We allocate buffers in io_zone_size increments for an
+ * S/G list. This will generally be MAXPHYS.
+ */
+ if (lengths[0] <= softc->io_zone_size)
+ num_segs_needed = 1;
+ else {
+ num_segs_needed = lengths[0] / softc->io_zone_size;
+ if ((lengths[0] % softc->io_zone_size) != 0)
+ num_segs_needed++;
+ }
+
+ /* Figure out the size of the S/G list */
+ sg_length = num_segs * sizeof(bus_dma_segment_t);
+ io_req->num_user_segs = num_segs;
+ io_req->num_kern_segs = num_segs_needed;
+
+ /* Save the user's S/G list pointer for later restoration */
+ io_req->user_bufs[0] = *data_ptrs[0];
+
+ /*
+ * If we have enough segments allocated by default to handle
+ * the length of the user's S/G list,
+ */
+ if (num_segs > PASS_MAX_SEGS) {
+ io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) *
+ num_segs, M_SCSIPASS, M_WAITOK | M_ZERO);
+ io_req->flags |= PASS_IO_USER_SEG_MALLOC;
+ } else
+ io_req->user_segptr = io_req->user_segs;
+
+ if (!useracc(*data_ptrs[0], sg_length, VM_PROT_READ)) {
+ xpt_print(periph->path, "%s: unable to access user "
+ "S/G list at %p\n", __func__, *data_ptrs[0]);
+ error = EFAULT;
+ goto bailout;
+ }
+
+ error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length);
+ if (error != 0) {
+ xpt_print(periph->path, "%s: copy of user S/G list "
+ "from %p to %p failed with error %d\n",
+ __func__, *data_ptrs[0], io_req->user_segptr,
+ error);
+ goto bailout;
+ }
+
+ if (num_segs_needed > PASS_MAX_SEGS) {
+ io_req->kern_segptr = malloc(sizeof(bus_dma_segment_t) *
+ num_segs_needed, M_SCSIPASS, M_WAITOK | M_ZERO);
+ io_req->flags |= PASS_IO_KERN_SEG_MALLOC;
+ } else {
+ io_req->kern_segptr = io_req->kern_segs;
+ }
+
+ /*
+ * Allocate the kernel S/G list.
+ */
+ for (size_to_go = lengths[0], i = 0;
+ size_to_go > 0 && i < num_segs_needed;
+ i++, size_to_go -= alloc_size) {
+ uint8_t *kern_ptr;
+
+ alloc_size = min(size_to_go, softc->io_zone_size);
+ kern_ptr = uma_zalloc(softc->pass_io_zone, M_WAITOK);
+ io_req->kern_segptr[i].ds_addr =
+ (bus_addr_t)(uintptr_t)kern_ptr;
+ io_req->kern_segptr[i].ds_len = alloc_size;
+ }
+ if (size_to_go > 0) {
+ printf("%s: size_to_go = %zu, software error!\n",
+ __func__, size_to_go);
+ error = EINVAL;
+ goto bailout;
+ }
+
+ *data_ptrs[0] = (uint8_t *)io_req->kern_segptr;
+ *seg_cnt_ptr = io_req->num_kern_segs;
+
+ /*
+ * We only need to copy data here if the user is writing.
+ */
+ if (dirs[0] == CAM_DIR_OUT)
+ error = passcopysglist(periph, io_req, dirs[0]);
+ break;
+ }
+ case CAM_DATA_SG_PADDR: {
+ size_t sg_length;
+
+ /*
+ * We shouldn't see this, but check just in case.
+ */
+ if (numbufs != 1) {
+ printf("%s: cannot currently handle more than one "
+ "S/G list per CCB\n", __func__);
+ error = EINVAL;
+ goto bailout;
+ }
+
+ /*
+ * We have to have at least one segment.
+ */
+ if (num_segs == 0) {
+ xpt_print(periph->path, "%s: CAM_DATA_SG_PADDR flag "
+ "set, but sglist_cnt=0!\n", __func__);
+ error = EINVAL;
+ goto bailout;
+ }
+
+ /*
+ * Make sure the user specified the total length and didn't
+ * just leave it to us to decode the S/G list.
+ */
+ if (lengths[0] == 0) {
+ xpt_print(periph->path, "%s: no dxfer_len specified, "
+ "but CAM_DATA_SG flag is set!\n", __func__);
+ error = EINVAL;
+ goto bailout;
+ }
+
+ /* Figure out the size of the S/G list */
+ sg_length = num_segs * sizeof(bus_dma_segment_t);
+ io_req->num_user_segs = num_segs;
+ io_req->num_kern_segs = io_req->num_user_segs;
+
+ /* Save the user's S/G list pointer for later restoration */
+ io_req->user_bufs[0] = *data_ptrs[0];
+
+ if (num_segs > PASS_MAX_SEGS) {
+ io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) *
+ num_segs, M_SCSIPASS, M_WAITOK | M_ZERO);
+ io_req->flags |= PASS_IO_USER_SEG_MALLOC;
+ } else
+ io_req->user_segptr = io_req->user_segs;
+
+ io_req->kern_segptr = io_req->user_segptr;
+
+ error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length);
+ if (error != 0) {
+ xpt_print(periph->path, "%s: copy of user S/G list "
+ "from %p to %p failed with error %d\n",
+ __func__, *data_ptrs[0], io_req->user_segptr,
+ error);
+ goto bailout;
+ }
+ break;
+ }
+ default:
+ case CAM_DATA_BIO:
+ /*
+ * A user shouldn't be attaching a bio to the CCB. It
+ * isn't a user-accessible structure.
+ */
+ error = EINVAL;
+ break;
+ }
+
+bailout:
+ if (error != 0)
+ passiocleanup(softc, io_req);
+
+ return (error);
+}
+
+static int
+passmemdone(struct cam_periph *periph, struct pass_io_req *io_req)
+{
+ struct pass_softc *softc;
+ union ccb *ccb;
+ int error;
+ int i;
+
+ error = 0;
+ softc = (struct pass_softc *)periph->softc;
+ ccb = &io_req->ccb;
+
+ switch (io_req->data_flags) {
+ case CAM_DATA_VADDR:
+ /*
+ * Copy back to the user buffer if this was a read.
+ */
+ for (i = 0; i < io_req->num_bufs; i++) {
+ if (io_req->dirs[i] != CAM_DIR_IN)
+ continue;
+
+ error = copyout(io_req->kern_bufs[i],
+ io_req->user_bufs[i], io_req->lengths[i]);
+ if (error != 0) {
+ xpt_print(periph->path, "Unable to copy %u "
+ "bytes from %p to user address %p\n",
+ io_req->lengths[i],
+ io_req->kern_bufs[i],
+ io_req->user_bufs[i]);
+ goto bailout;
+ }
+
+ }
+ break;
+ case CAM_DATA_PADDR:
+ /* Do nothing. The pointer is a physical address already */
+ break;
+ case CAM_DATA_SG:
+ /*
+ * Copy back to the user buffer if this was a read.
+ * Restore the user's S/G list buffer pointer.
+ */
+ if (io_req->dirs[0] == CAM_DIR_IN)
+ error = passcopysglist(periph, io_req, io_req->dirs[0]);
+ break;
+ case CAM_DATA_SG_PADDR:
+ /*
+ * Restore the user's S/G list buffer pointer. No need to
+ * copy.
+ */
+ break;
+ default:
+ case CAM_DATA_BIO:
+ error = EINVAL;
+ break;
+ }
+
+bailout:
+ /*
+ * Reset the user's pointers to their original values and free
+ * allocated memory.
+ */
+ passiocleanup(softc, io_req);
+
+ return (error);
+}
+
static int
passioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
{
@@ -622,15 +1828,317 @@ passdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread
break;
}
+ case CAMIOQUEUE:
+ {
+ struct pass_io_req *io_req;
+ union ccb **user_ccb, *ccb;
+ xpt_opcode fc;
+
+ if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0) {
+ error = passcreatezone(periph);
+ if (error != 0)
+ goto bailout;
+ }
+
+ /*
+ * We're going to do a blocking allocation for this I/O
+ * request, so we have to drop the lock.
+ */
+ cam_periph_unlock(periph);
+
+ io_req = uma_zalloc(softc->pass_zone, M_WAITOK | M_ZERO);
+ ccb = &io_req->ccb;
+ user_ccb = (union ccb **)addr;
+
+ /*
+ * Unlike the CAMIOCOMMAND ioctl above, we only have a
+ * pointer to the user's CCB, so we have to copy the whole
+ * thing in to a buffer we have allocated (above) instead
+ * of allowing the ioctl code to malloc a buffer and copy
+ * it in.
+ *
+ * This is an advantage for this asynchronous interface,
+ * since we don't want the memory to get freed while the
+ * CCB is outstanding.
+ */
+#if 0
+ xpt_print(periph->path, "Copying user CCB %p to "
+ "kernel address %p\n", *user_ccb, ccb);
+#endif
+ error = copyin(*user_ccb, ccb, sizeof(*ccb));
+ if (error != 0) {
+ xpt_print(periph->path, "Copy of user CCB %p to "
+ "kernel address %p failed with error %d\n",
+ *user_ccb, ccb, error);
+ uma_zfree(softc->pass_zone, io_req);
+ cam_periph_lock(periph);
+ break;
+ }
+
+ /*
+ * Some CCB types, like scan bus and scan lun can only go
+ * through the transport layer device.
+ */
+ if (ccb->ccb_h.func_code & XPT_FC_XPT_ONLY) {
+ xpt_print(periph->path, "CCB function code %#x is "
+ "restricted to the XPT device\n",
+ ccb->ccb_h.func_code);
+ uma_zfree(softc->pass_zone, io_req);
+ cam_periph_lock(periph);
+ error = ENODEV;
+ break;
+ }
+
+ /*
+ * Save the user's CCB pointer as well as his linked list
+ * pointers and peripheral private area so that we can
+ * restore these later.
+ */
+ io_req->user_ccb_ptr = *user_ccb;
+ io_req->user_periph_links = ccb->ccb_h.periph_links;
+ io_req->user_periph_priv = ccb->ccb_h.periph_priv;
+
+ /*
+ * Now that we've saved the user's values, we can set our
+ * own peripheral private entry.
+ */
+ ccb->ccb_h.ccb_ioreq = io_req;
+
+ /* Compatibility for RL/priority-unaware code. */
+ priority = ccb->ccb_h.pinfo.priority;
+ if (priority <= CAM_PRIORITY_OOB)
+ priority += CAM_PRIORITY_OOB + 1;
+
+ /*
+ * Setup fields in the CCB like the path and the priority.
+ * The path in particular cannot be done in userland, since
+ * it is a pointer to a kernel data structure.
+ */
+ xpt_setup_ccb_flags(&ccb->ccb_h, periph->path, priority,
+ ccb->ccb_h.flags);
+
+ /*
+ * Setup our done routine. There is no way for the user to
+ * have a valid pointer here.
+ */
+ ccb->ccb_h.cbfcnp = passdone;
+
+ fc = ccb->ccb_h.func_code;
+ /*
+ * If this function code has memory that can be mapped in
+ * or out, we need to call passmemsetup().
+ */
+ if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO)
+ || (fc == XPT_SMP_IO) || (fc == XPT_DEV_MATCH)
+ || (fc == XPT_DEV_ADVINFO)) {
+ error = passmemsetup(periph, io_req);
+ if (error != 0) {
+ uma_zfree(softc->pass_zone, io_req);
+ cam_periph_lock(periph);
+ break;
+ }
+ } else
+ io_req->mapinfo.num_bufs_used = 0;
+
+ cam_periph_lock(periph);
+
+ /*
+ * Everything goes on the incoming queue initially.
+ */
+ TAILQ_INSERT_TAIL(&softc->incoming_queue, io_req, links);
+
+ /*
+ * If the CCB is queued, and is not a user CCB, then
+ * we need to allocate a slot for it. Call xpt_schedule()
+ * so that our start routine will get called when a CCB is
+ * available.
+ */
+ if ((fc & XPT_FC_QUEUED)
+ && ((fc & XPT_FC_USER_CCB) == 0)) {
+ xpt_schedule(periph, priority);
+ break;
+ }
+
+ /*
+ * At this point, the CCB in question is either an
+ * immediate CCB (like XPT_DEV_ADVINFO) or it is a user CCB
+ * and therefore should be malloced, not allocated via a slot.
+ * Remove the CCB from the incoming queue and add it to the
+ * active queue.
+ */
+ TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
+ TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links);
+
+ xpt_action(ccb);
+
+ /*
+ * If this is not a queued CCB (i.e. it is an immediate CCB),
+ * then it is already done. We need to put it on the done
+ * queue for the user to fetch.
+ */
+ if ((fc & XPT_FC_QUEUED) == 0) {
+ TAILQ_REMOVE(&softc->active_queue, io_req, links);
+ TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links);
+ }
+ break;
+ }
+ case CAMIOGET:
+ {
+ union ccb **user_ccb;
+ struct pass_io_req *io_req;
+ int old_error;
+
+ user_ccb = (union ccb **)addr;
+ old_error = 0;
+
+ io_req = TAILQ_FIRST(&softc->done_queue);
+ if (io_req == NULL) {
+ error = ENOENT;
+ break;
+ }
+
+ /*
+ * Remove the I/O from the done queue.
+ */
+ TAILQ_REMOVE(&softc->done_queue, io_req, links);
+
+ /*
+ * We have to drop the lock during the copyout because the
+ * copyout can result in VM faults that require sleeping.
+ */
+ cam_periph_unlock(periph);
+
+ /*
+ * Do any needed copies (e.g. for reads) and revert the
+ * pointers in the CCB back to the user's pointers.
+ */
+ error = passmemdone(periph, io_req);
+
+ old_error = error;
+
+ io_req->ccb.ccb_h.periph_links = io_req->user_periph_links;
+ io_req->ccb.ccb_h.periph_priv = io_req->user_periph_priv;
+
+#if 0
+ xpt_print(periph->path, "Copying to user CCB %p from "
+ "kernel address %p\n", *user_ccb, &io_req->ccb);
+#endif
+
+ error = copyout(&io_req->ccb, *user_ccb, sizeof(union ccb));
+ if (error != 0) {
+ xpt_print(periph->path, "Copy to user CCB %p from "
+ "kernel address %p failed with error %d\n",
+ *user_ccb, &io_req->ccb, error);
+ }
+
+ /*
+ * Prefer the first error we got back, and make sure we
+ * don't overwrite bad status with good.
+ */
+ if (old_error != 0)
+ error = old_error;
+
+ cam_periph_lock(periph);
+
+ /*
+ * At this point, if there was an error, we could potentially
+ * re-queue the I/O and try again. But why? The error
+ * would almost certainly happen again. We might as well
+ * not leak memory.
+ */
+ uma_zfree(softc->pass_zone, io_req);
+ break;
+ }
default:
error = cam_periph_ioctl(periph, cmd, addr, passerror);
break;
}
+bailout:
cam_periph_unlock(periph);
+
return(error);
}
+static int
+passpoll(struct cdev *dev, int poll_events, struct thread *td)
+{
+ struct cam_periph *periph;
+ struct pass_softc *softc;
+ int revents;
+
+ periph = (struct cam_periph *)dev->si_drv1;
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct pass_softc *)periph->softc;
+
+ revents = poll_events & (POLLOUT | POLLWRNORM);
+ if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
+ cam_periph_lock(periph);
+
+ if (!TAILQ_EMPTY(&softc->done_queue)) {
+ revents |= poll_events & (POLLIN | POLLRDNORM);
+ }
+ cam_periph_unlock(periph);
+ if (revents == 0)
+ selrecord(td, &softc->read_select);
+ }
+
+ return (revents);
+}
+
+static int
+passkqfilter(struct cdev *dev, struct knote *kn)
+{
+ struct cam_periph *periph;
+ struct pass_softc *softc;
+
+ periph = (struct cam_periph *)dev->si_drv1;
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct pass_softc *)periph->softc;
+
+ kn->kn_hook = (caddr_t)periph;
+ kn->kn_fop = &passread_filtops;
+ knlist_add(&softc->read_select.si_note, kn, 0);
+
+ return (0);
+}
+
+static void
+passreadfiltdetach(struct knote *kn)
+{
+ struct cam_periph *periph;
+ struct pass_softc *softc;
+
+ periph = (struct cam_periph *)kn->kn_hook;
+ softc = (struct pass_softc *)periph->softc;
+
+ knlist_remove(&softc->read_select.si_note, kn, 0);
+}
+
+static int
+passreadfilt(struct knote *kn, long hint)
+{
+ struct cam_periph *periph;
+ struct pass_softc *softc;
+ int retval;
+
+ periph = (struct cam_periph *)kn->kn_hook;
+ softc = (struct pass_softc *)periph->softc;
+
+ cam_periph_assert(periph, MA_OWNED);
+
+ if (TAILQ_EMPTY(&softc->done_queue))
+ retval = 0;
+ else
+ retval = 1;
+
+ return (retval);
+}
+
/*
* Generally, "ccb" should be the CCB supplied by the kernel. "inccb"
* should be the CCB that is copied in from the user.
@@ -652,6 +2160,10 @@ passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
xpt_merge_ccb(ccb, inccb);
/*
+ */
+ ccb->ccb_h.cbfcnp = passdone;
+
+ /*
* Let cam_periph_mapmem do a sanity check on the data pointer format.
* Even if no data transfer is needed, it's a cheap check and it
* simplifies the code.
diff --git a/sys/cam/scsi/scsi_pass.h b/sys/cam/scsi/scsi_pass.h
index ae0e058..797ef08 100644
--- a/sys/cam/scsi/scsi_pass.h
+++ b/sys/cam/scsi/scsi_pass.h
@@ -39,4 +39,12 @@
#define CAMIOCOMMAND _IOWR(CAM_VERSION, 2, union ccb)
#define CAMGETPASSTHRU _IOWR(CAM_VERSION, 3, union ccb)
+/*
+ * These two ioctls take a union ccb *, but that is not explicitly declared
+ * to avoid having the ioctl handling code malloc and free their own copy
+ * of the CCB or the CCB pointer.
+ */
+#define CAMIOQUEUE _IO(CAM_VERSION, 4)
+#define CAMIOGET _IO(CAM_VERSION, 5)
+
#endif
diff --git a/sys/dev/arcmsr/arcmsr.c b/sys/dev/arcmsr/arcmsr.c
index 5d46ad3..b4e6ce5 100644
--- a/sys/dev/arcmsr/arcmsr.c
+++ b/sys/dev/arcmsr/arcmsr.c
@@ -75,6 +75,8 @@
** 1.20.00.26 12/14/2012 Ching Huang Added support ARC1214,1224,1264,1284
** 1.20.00.27 05/06/2013 Ching Huang Fixed out standing cmd full on ARC-12x4
** 1.20.00.28 09/13/2013 Ching Huang Removed recursive mutex in arcmsr_abort_dr_ccbs
+** 1.20.00.29 12/18/2013 Ching Huang Change simq allocation number, support ARC1883
+** 1.30.00.00 11/30/2015 Ching Huang Added support ARC1203
******************************************************************************************
*/
@@ -125,15 +127,15 @@ __FBSDID("$FreeBSD$");
**************************************************************************
*/
#if __FreeBSD_version >= 500005
- #include <sys/selinfo.h>
- #include <sys/mutex.h>
- #include <sys/endian.h>
- #include <dev/pci/pcivar.h>
- #include <dev/pci/pcireg.h>
+ #include <sys/selinfo.h>
+ #include <sys/mutex.h>
+ #include <sys/endian.h>
+ #include <dev/pci/pcivar.h>
+ #include <dev/pci/pcireg.h>
#else
- #include <sys/select.h>
- #include <pci/pcivar.h>
- #include <pci/pcireg.h>
+ #include <sys/select.h>
+ #include <pci/pcivar.h>
+ #include <pci/pcireg.h>
#endif
#if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025
@@ -146,7 +148,7 @@ __FBSDID("$FreeBSD$");
#define arcmsr_callout_init(a) callout_init(a);
#endif
-#define ARCMSR_DRIVER_VERSION "arcmsr version 1.20.00.28 2013-09-13"
+#define ARCMSR_DRIVER_VERSION "arcmsr version 1.30.00.00 2015-11-30"
#include <dev/arcmsr/arcmsr.h>
/*
**************************************************************************
@@ -180,8 +182,8 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *p
static int arcmsr_resume(device_t dev);
static int arcmsr_suspend(device_t dev);
static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb);
-static void arcmsr_polling_devmap(void *arg);
-static void arcmsr_srb_timeout(void *arg);
+static void arcmsr_polling_devmap(void *arg);
+static void arcmsr_srb_timeout(void *arg);
static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb);
#ifdef ARCMSR_DEBUG1
static void arcmsr_dump_data(struct AdapterControlBlock *acb);
@@ -219,11 +221,11 @@ static device_method_t arcmsr_methods[]={
{ 0, 0 }
#endif
};
-
+
static driver_t arcmsr_driver={
"arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
};
-
+
static devclass_t arcmsr_devclass;
DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0);
MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
@@ -246,38 +248,38 @@ static struct cdevsw arcmsr_cdevsw={
};
#else
#define ARCMSR_CDEV_MAJOR 180
-
+
static struct cdevsw arcmsr_cdevsw = {
- arcmsr_open, /* open */
- arcmsr_close, /* close */
- noread, /* read */
- nowrite, /* write */
- arcmsr_ioctl, /* ioctl */
- nopoll, /* poll */
- nommap, /* mmap */
- nostrategy, /* strategy */
- "arcmsr", /* name */
- ARCMSR_CDEV_MAJOR, /* major */
- nodump, /* dump */
- nopsize, /* psize */
- 0 /* flags */
+ arcmsr_open, /* open */
+ arcmsr_close, /* close */
+ noread, /* read */
+ nowrite, /* write */
+ arcmsr_ioctl, /* ioctl */
+ nopoll, /* poll */
+ nommap, /* mmap */
+ nostrategy, /* strategy */
+ "arcmsr", /* name */
+ ARCMSR_CDEV_MAJOR, /* major */
+ nodump, /* dump */
+ nopsize, /* psize */
+ 0 /* flags */
};
#endif
/*
**************************************************************************
**************************************************************************
*/
-#if __FreeBSD_version < 500005
+#if __FreeBSD_version < 500005
static int arcmsr_open(dev_t dev, int flags, int fmt, struct proc *proc)
#else
- #if __FreeBSD_version < 503000
+ #if __FreeBSD_version < 503000
static int arcmsr_open(dev_t dev, int flags, int fmt, struct thread *proc)
#else
static int arcmsr_open(struct cdev *dev, int flags, int fmt, struct thread *proc)
#endif
#endif
{
- #if __FreeBSD_version < 503000
+ #if __FreeBSD_version < 503000
struct AdapterControlBlock *acb = dev->si_drv1;
#else
int unit = dev2unit(dev);
@@ -292,17 +294,17 @@ static struct cdevsw arcmsr_cdevsw = {
**************************************************************************
**************************************************************************
*/
-#if __FreeBSD_version < 500005
+#if __FreeBSD_version < 500005
static int arcmsr_close(dev_t dev, int flags, int fmt, struct proc *proc)
#else
- #if __FreeBSD_version < 503000
+ #if __FreeBSD_version < 503000
static int arcmsr_close(dev_t dev, int flags, int fmt, struct thread *proc)
#else
static int arcmsr_close(struct cdev *dev, int flags, int fmt, struct thread *proc)
#endif
#endif
{
- #if __FreeBSD_version < 503000
+ #if __FreeBSD_version < 503000
struct AdapterControlBlock *acb = dev->si_drv1;
#else
int unit = dev2unit(dev);
@@ -317,17 +319,17 @@ static struct cdevsw arcmsr_cdevsw = {
**************************************************************************
**************************************************************************
*/
-#if __FreeBSD_version < 500005
+#if __FreeBSD_version < 500005
static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct proc *proc)
#else
- #if __FreeBSD_version < 503000
+ #if __FreeBSD_version < 503000
static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc)
#else
static int arcmsr_ioctl(struct cdev *dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc)
#endif
#endif
{
- #if __FreeBSD_version < 503000
+ #if __FreeBSD_version < 503000
struct AdapterControlBlock *acb = dev->si_drv1;
#else
int unit = dev2unit(dev);
@@ -346,7 +348,7 @@ static struct cdevsw arcmsr_cdevsw = {
static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
{
u_int32_t intmask_org = 0;
-
+
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
/* disable all outbound interrupt */
@@ -355,10 +357,11 @@ static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
}
break;
case ACB_ADAPTER_TYPE_B: {
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
/* disable all outbound interrupt */
- intmask_org = CHIP_REG_READ32(HBB_DOORBELL,
- 0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */
+ intmask_org = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask)
+ & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
+ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, 0); /* disable all interrupt */
}
break;
case ACB_ADAPTER_TYPE_C: {
@@ -383,7 +386,7 @@ static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org)
{
u_int32_t mask;
-
+
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
/* enable outbound Post Queue, outbound doorbell Interrupt */
@@ -393,9 +396,10 @@ static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t in
}
break;
case ACB_ADAPTER_TYPE_B: {
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
/* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
+ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
}
break;
@@ -424,7 +428,7 @@ static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
{
u_int32_t Index;
u_int8_t Retries = 0x00;
-
+
do {
for(Index=0; Index < 100; Index++) {
if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
@@ -444,12 +448,13 @@ static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
{
u_int32_t Index;
u_int8_t Retries = 0x00;
-
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
+
do {
for(Index=0; Index < 100; Index++) {
- if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
+ if(READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
+ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
return TRUE;
}
UDELAY(10000);
@@ -465,7 +470,7 @@ static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb)
{
u_int32_t Index;
u_int8_t Retries = 0x00;
-
+
do {
for(Index=0; Index < 100; Index++) {
if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
@@ -485,7 +490,7 @@ static u_int8_t arcmsr_hbd_wait_msgint_ready(struct AdapterControlBlock *acb)
{
u_int32_t Index;
u_int8_t Retries = 0x00;
-
+
do {
for(Index=0; Index < 100; Index++) {
if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) {
@@ -504,7 +509,7 @@ static u_int8_t arcmsr_hbd_wait_msgint_ready(struct AdapterControlBlock *acb)
static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
{
int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
-
+
CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
do {
if(arcmsr_hba_wait_msgint_ready(acb)) {
@@ -521,9 +526,9 @@ static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
{
int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
-
- CHIP_REG_WRITE32(HBB_DOORBELL,
- 0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
+
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
do {
if(arcmsr_hbb_wait_msgint_ready(acb)) {
break;
@@ -539,7 +544,7 @@ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
{
int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
-
+
CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
do {
@@ -557,7 +562,7 @@ static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
static void arcmsr_flush_hbd_cache(struct AdapterControlBlock *acb)
{
int retry_count = 30; /* enlarge wait flush adapter cache time: 10 minute */
-
+
CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
do {
if(arcmsr_hbd_wait_msgint_ready(acb)) {
@@ -599,7 +604,7 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
static int arcmsr_suspend(device_t dev)
{
struct AdapterControlBlock *acb = device_get_softc(dev);
-
+
/* flush controller */
arcmsr_iop_parking(acb);
/* disable all outbound interrupt */
@@ -613,7 +618,7 @@ static int arcmsr_suspend(device_t dev)
static int arcmsr_resume(device_t dev)
{
struct AdapterControlBlock *acb = device_get_softc(dev);
-
+
arcmsr_iop_init(acb);
return(0);
}
@@ -626,7 +631,7 @@ static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, vo
struct AdapterControlBlock *acb;
u_int8_t target_id, target_lun;
struct cam_sim *sim;
-
+
sim = (struct cam_sim *) cb_arg;
acb =(struct AdapterControlBlock *) cam_sim_softc(sim);
switch (code) {
@@ -649,7 +654,7 @@ static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, vo
static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
{
union ccb *pccb = srb->pccb;
-
+
pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
if(pccb->csio.sense_len) {
@@ -677,7 +682,8 @@ static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
*/
static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
{
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
if(!arcmsr_hbb_wait_msgint_ready(acb)) {
printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
}
@@ -738,12 +744,12 @@ static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
{
struct AdapterControlBlock *acb = srb->acb;
union ccb *pccb = srb->pccb;
-
+
if(srb->srb_flags & SRB_FLAG_TIMER_START)
callout_stop(&srb->ccb_callout);
if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
bus_dmasync_op_t op;
-
+
if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
op = BUS_DMASYNC_POSTREAD;
} else {
@@ -772,7 +778,7 @@ static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error)
{
int target, lun;
-
+
target = srb->pccb->ccb_h.target_id;
lun = srb->pccb->ccb_h.target_lun;
if(error == FALSE) {
@@ -823,7 +829,7 @@ static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct Comm
static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error)
{
struct CommandControlBlock *srb;
-
+
/* check if command done with no error*/
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_C:
@@ -859,7 +865,7 @@ static void arcmsr_srb_timeout(void *arg)
struct AdapterControlBlock *acb;
int target, lun;
u_int8_t cmd;
-
+
target = srb->pccb->ccb_h.target_id;
lun = srb->pccb->ccb_h.target_lun;
acb = srb->acb;
@@ -875,7 +881,7 @@ static void arcmsr_srb_timeout(void *arg)
}
ARCMSR_LOCK_RELEASE(&acb->isr_lock);
#ifdef ARCMSR_DEBUG1
- arcmsr_dump_data(acb);
+ arcmsr_dump_data(acb);
#endif
}
@@ -888,29 +894,29 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
int i=0;
u_int32_t flag_srb;
u_int16_t error;
-
+
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
u_int32_t outbound_intstatus;
-
+
/*clear and abort all outbound posted Q*/
outbound_intstatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
- error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
+ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
arcmsr_drain_donequeue(acb, flag_srb, error);
}
}
break;
case ACB_ADAPTER_TYPE_B: {
struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
-
+
/*clear all outbound posted Q*/
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
+ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
if((flag_srb = phbbmu->done_qbuffer[i]) != 0) {
phbbmu->done_qbuffer[i] = 0;
- error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
+ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
arcmsr_drain_donequeue(acb, flag_srb, error);
}
phbbmu->post_qbuffer[i] = 0;
@@ -920,10 +926,10 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
}
break;
case ACB_ADAPTER_TYPE_C: {
-
+
while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
- error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
+ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
arcmsr_drain_donequeue(acb, flag_srb, error);
}
}
@@ -943,7 +949,7 @@ static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
struct CommandControlBlock *srb;
u_int32_t intmask_org;
u_int32_t i=0;
-
+
if(acb->srboutstandingcount>0) {
/* disable all outbound interrupt */
intmask_org = arcmsr_disable_allintr(acb);
@@ -984,7 +990,7 @@ static void arcmsr_build_srb(struct CommandControlBlock *srb,
union ccb *pccb = srb->pccb;
struct ccb_scsiio *pcsio = &pccb->csio;
u_int32_t arccdbsize = 0x30;
-
+
memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
arcmsr_cdb->Bus = 0;
arcmsr_cdb->TargetID = pccb->ccb_h.target_id;
@@ -996,7 +1002,7 @@ static void arcmsr_build_srb(struct CommandControlBlock *srb,
struct AdapterControlBlock *acb = srb->acb;
bus_dmasync_op_t op;
u_int32_t length, i, cdb_sgcount = 0;
-
+
if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
op = BUS_DMASYNC_PREREAD;
} else {
@@ -1018,11 +1024,11 @@ static void arcmsr_build_srb(struct CommandControlBlock *srb,
arccdbsize += sizeof(struct SG32ENTRY);
} else {
u_int32_t sg64s_size = 0, tmplength = length;
-
+
while(1) {
u_int64_t span4G, length0;
struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
-
+
span4G = (u_int64_t)address_lo + tmplength;
pdma_sg->addresshigh = address_hi;
pdma_sg->address = address_lo;
@@ -1055,8 +1061,8 @@ static void arcmsr_build_srb(struct CommandControlBlock *srb,
} else {
arcmsr_cdb->DataLength = 0;
}
- srb->arc_cdb_size = arccdbsize;
- arcmsr_cdb->msgPages = (arccdbsize/256) + ((arccdbsize % 256) ? 1 : 0);
+ srb->arc_cdb_size = arccdbsize;
+ arcmsr_cdb->msgPages = (arccdbsize/256) + ((arccdbsize % 256) ? 1 : 0);
}
/*
**************************************************************************
@@ -1066,7 +1072,7 @@ static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandContr
{
u_int32_t cdb_phyaddr_low = (u_int32_t) srb->cdb_phyaddr_low;
struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&srb->arcmsr_cdb;
-
+
bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
atomic_add_int(&acb->srboutstandingcount, 1);
srb->srb_state = ARCMSR_SRB_START;
@@ -1083,7 +1089,7 @@ static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandContr
case ACB_ADAPTER_TYPE_B: {
struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
int ending_index, index;
-
+
index = phbbmu->postq_index;
ending_index = ((index+1) % ARCMSR_MAX_HBB_POSTQUEUE);
phbbmu->post_qbuffer[ending_index] = 0;
@@ -1095,26 +1101,26 @@ static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandContr
index++;
index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */
phbbmu->postq_index = index;
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
}
break;
- case ACB_ADAPTER_TYPE_C: {
- u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
+ case ACB_ADAPTER_TYPE_C: {
+ u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
- arc_cdb_size = (srb->arc_cdb_size > 0x300) ? 0x300 : srb->arc_cdb_size;
- ccb_post_stamp = (cdb_phyaddr_low | ((arc_cdb_size-1) >> 6) | 1);
+ arc_cdb_size = (srb->arc_cdb_size > 0x300) ? 0x300 : srb->arc_cdb_size;
+ ccb_post_stamp = (cdb_phyaddr_low | ((arc_cdb_size-1) >> 6) | 1);
cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
- if(cdb_phyaddr_hi32)
- {
- CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
- CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
- }
- else
- {
- CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
- }
- }
- break;
+ if(cdb_phyaddr_hi32)
+ {
+ CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
+ CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
+ }
+ else
+ {
+ CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
+ }
+ }
+ break;
case ACB_ADAPTER_TYPE_D: {
struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
u_int16_t index_stripped;
@@ -1152,29 +1158,29 @@ static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandContr
static struct QBUFFER *arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
{
struct QBUFFER *qbuffer=NULL;
-
+
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu;
-
+
qbuffer = (struct QBUFFER *)&phbamu->message_rbuffer;
}
break;
case ACB_ADAPTER_TYPE_B: {
struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
-
+
qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
}
break;
case ACB_ADAPTER_TYPE_C: {
struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu;
-
+
qbuffer = (struct QBUFFER *)&phbcmu->message_rbuffer;
}
break;
case ACB_ADAPTER_TYPE_D: {
struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
-
+
qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_rbuffer;
}
break;
@@ -1188,29 +1194,29 @@ static struct QBUFFER *arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
static struct QBUFFER *arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb)
{
struct QBUFFER *qbuffer = NULL;
-
+
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu;
-
+
qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
}
break;
case ACB_ADAPTER_TYPE_B: {
struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
-
+
qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
}
break;
case ACB_ADAPTER_TYPE_C: {
struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu;
-
+
qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer;
}
break;
case ACB_ADAPTER_TYPE_D: {
struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
-
+
qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_wbuffer;
}
break;
@@ -1230,8 +1236,9 @@ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
}
break;
case ACB_ADAPTER_TYPE_B: {
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
/* let IOP know data has been read */
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
}
break;
case ACB_ADAPTER_TYPE_C: {
@@ -1262,11 +1269,12 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
}
break;
case ACB_ADAPTER_TYPE_B: {
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
/*
** push inbound doorbell tell iop, driver data write ok
** and wait reply on next hwinterrupt for next Qbuffer post
*/
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
}
break;
case ACB_ADAPTER_TYPE_C: {
@@ -1295,7 +1303,7 @@ static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
{
acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
CHIP_REG_WRITE32(HBA_MessageUnit,
- 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
+ 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
if(!arcmsr_hba_wait_msgint_ready(acb)) {
printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
, acb->pci_unit);
@@ -1307,9 +1315,9 @@ static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
*/
static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
{
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
- CHIP_REG_WRITE32(HBB_DOORBELL,
- 0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
if(!arcmsr_hbb_wait_msgint_ready(acb)) {
printf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
, acb->pci_unit);
@@ -1387,7 +1395,7 @@ static void arcmsr_poll(struct cam_sim *psim)
**************************************************************************
*/
static u_int32_t arcmsr_Read_iop_rqbuffer_data_D(struct AdapterControlBlock *acb,
- struct QBUFFER *prbuffer) {
+ struct QBUFFER *prbuffer) {
u_int8_t *pQbuffer;
u_int8_t *buf1 = 0;
@@ -1432,13 +1440,13 @@ static u_int32_t arcmsr_Read_iop_rqbuffer_data_D(struct AdapterControlBlock *acb
**************************************************************************
*/
static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
- struct QBUFFER *prbuffer) {
+ struct QBUFFER *prbuffer) {
u_int8_t *pQbuffer;
u_int8_t *iop_data;
u_int32_t iop_len;
- if(acb->adapter_type == ACB_ADAPTER_TYPE_D) {
+ if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
return(arcmsr_Read_iop_rqbuffer_data_D(acb, prbuffer));
}
iop_data = (u_int8_t *)prbuffer->data;
@@ -1464,12 +1472,12 @@ static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
{
struct QBUFFER *prbuffer;
int my_empty_len;
-
+
/*check this iop data if overflow my rqbuffer*/
ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
prbuffer = arcmsr_get_iop_rqbuffer(acb);
my_empty_len = (acb->rqbuf_lastindex - acb->rqbuf_firstindex - 1) &
- (ARCMSR_MAX_QBUFFER-1);
+ (ARCMSR_MAX_QBUFFER-1);
if(my_empty_len >= prbuffer->data_len) {
if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
@@ -1489,7 +1497,7 @@ static void arcmsr_Write_data_2iop_wqbuffer_D(struct AdapterControlBlock *acb)
u_int8_t *buf1 = 0;
u_int32_t *iop_data, *buf2 = 0;
u_int32_t allxfer_len = 0, data_len;
-
+
if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
buf1 = malloc(128, M_DEVBUF, M_NOWAIT | M_ZERO);
buf2 = (u_int32_t *)buf1;
@@ -1532,8 +1540,8 @@ static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb)
struct QBUFFER *pwbuffer;
u_int8_t *iop_data;
int32_t allxfer_len=0;
-
- if(acb->adapter_type == ACB_ADAPTER_TYPE_D) {
+
+ if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
arcmsr_Write_data_2iop_wqbuffer_D(acb);
return;
}
@@ -1585,8 +1593,8 @@ static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb)
/*
if (ccb->ccb_h.status != CAM_REQ_CMP)
printf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x,"
- "failure status=%x\n", ccb->ccb_h.target_id,
- ccb->ccb_h.target_lun, ccb->ccb_h.status);
+ "failure status=%x\n", ccb->ccb_h.target_id,
+ ccb->ccb_h.target_lun, ccb->ccb_h.status);
else
printf("arcmsr_rescanLun_cb: Rescan lun successfully!\n");
*/
@@ -1600,7 +1608,7 @@ static void arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int l
union ccb *ccb;
if ((ccb = (union ccb *)xpt_alloc_ccb_nowait()) == NULL)
- return;
+ return;
if (xpt_create_path(&path, NULL, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP)
{
xpt_free_ccb(ccb);
@@ -1618,9 +1626,9 @@ static void arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int l
static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun)
{
- struct CommandControlBlock *srb;
+ struct CommandControlBlock *srb;
u_int32_t intmask_org;
- int i;
+ int i;
/* disable all outbound interrupts */
intmask_org = arcmsr_disable_allintr(acb);
@@ -1629,13 +1637,13 @@ static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, in
srb = acb->psrb_pool[i];
if (srb->srb_state == ARCMSR_SRB_START)
{
- if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
- {
- srb->srb_state = ARCMSR_SRB_ABORTED;
+ if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
+ {
+ srb->srb_state = ARCMSR_SRB_ABORTED;
srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
- arcmsr_srb_complete(srb, 1);
+ arcmsr_srb_complete(srb, 1);
printf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb);
- }
+ }
}
}
/* enable outbound Post Queue, outbound doorbell Interrupt */
@@ -1648,87 +1656,87 @@ static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, in
static void arcmsr_dr_handle(struct AdapterControlBlock *acb) {
u_int32_t devicemap;
u_int32_t target, lun;
- u_int32_t deviceMapCurrent[4]={0};
- u_int8_t *pDevMap;
+ u_int32_t deviceMapCurrent[4]={0};
+ u_int8_t *pDevMap;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
- devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
- for (target = 0; target < 4; target++)
- {
- deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap);
- devicemap += 4;
- }
- break;
+ devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
+ for (target = 0; target < 4; target++)
+ {
+ deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap);
+ devicemap += 4;
+ }
+ break;
case ACB_ADAPTER_TYPE_B:
- devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
- for (target = 0; target < 4; target++)
- {
- deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1], devicemap);
- devicemap += 4;
- }
- break;
+ devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
+ for (target = 0; target < 4; target++)
+ {
+ deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1], devicemap);
+ devicemap += 4;
+ }
+ break;
case ACB_ADAPTER_TYPE_C:
- devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
- for (target = 0; target < 4; target++)
- {
- deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap);
- devicemap += 4;
- }
- break;
+ devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
+ for (target = 0; target < 4; target++)
+ {
+ deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap);
+ devicemap += 4;
+ }
+ break;
case ACB_ADAPTER_TYPE_D:
- devicemap = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
- for (target = 0; target < 4; target++)
- {
- deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap);
- devicemap += 4;
- }
- break;
- }
-
- if(acb->acb_flags & ACB_F_BUS_HANG_ON)
+ devicemap = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
+ for (target = 0; target < 4; target++)
{
- acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
+ deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap);
+ devicemap += 4;
}
- /*
- ** adapter posted CONFIG message
- ** copy the new map, note if there are differences with the current map
- */
- pDevMap = (u_int8_t *)&deviceMapCurrent[0];
- for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++)
+ break;
+ }
+
+ if(acb->acb_flags & ACB_F_BUS_HANG_ON)
+ {
+ acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
+ }
+ /*
+ ** adapter posted CONFIG message
+ ** copy the new map, note if there are differences with the current map
+ */
+ pDevMap = (u_int8_t *)&deviceMapCurrent[0];
+ for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++)
+ {
+ if (*pDevMap != acb->device_map[target])
{
- if (*pDevMap != acb->device_map[target])
+ u_int8_t difference, bit_check;
+
+ difference = *pDevMap ^ acb->device_map[target];
+ for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
{
- u_int8_t difference, bit_check;
-
- difference = *pDevMap ^ acb->device_map[target];
- for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
- {
- bit_check = (1 << lun); /*check bit from 0....31*/
- if(difference & bit_check)
- {
- if(acb->device_map[target] & bit_check)
- {/* unit departed */
- printf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
- arcmsr_abort_dr_ccbs(acb, target, lun);
- arcmsr_rescan_lun(acb, target, lun);
- acb->devstate[target][lun] = ARECA_RAID_GONE;
- }
- else
- {/* unit arrived */
- printf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun);
- arcmsr_rescan_lun(acb, target, lun);
- acb->devstate[target][lun] = ARECA_RAID_GOOD;
- }
- }
- }
-/* printf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
- acb->device_map[target] = *pDevMap;
+ bit_check = (1 << lun); /*check bit from 0....31*/
+ if(difference & bit_check)
+ {
+ if(acb->device_map[target] & bit_check)
+ {/* unit departed */
+ printf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
+ arcmsr_abort_dr_ccbs(acb, target, lun);
+ arcmsr_rescan_lun(acb, target, lun);
+ acb->devstate[target][lun] = ARECA_RAID_GONE;
+ }
+ else
+ {/* unit arrived */
+ printf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun);
+ arcmsr_rescan_lun(acb, target, lun);
+ acb->devstate[target][lun] = ARECA_RAID_GOOD;
+ }
+ }
}
- pDevMap++;
+/* printf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
+ acb->device_map[target] = *pDevMap;
}
+ pDevMap++;
+ }
}
/*
**************************************************************************
@@ -1748,9 +1756,10 @@ static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) {
*/
static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) {
u_int32_t outbound_message;
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
/* clear interrupts */
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
+ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]);
if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
arcmsr_dr_handle( acb );
@@ -1785,8 +1794,8 @@ static void arcmsr_hbd_message_isr(struct AdapterControlBlock *acb) {
*/
static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
{
- u_int32_t outbound_doorbell;
-
+ u_int32_t doorbell_status;
+
/*
*******************************************************************
** Maybe here we need to check wrqbuffer_lock is lock or not
@@ -1794,14 +1803,12 @@ static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
** check if there are any mail need to pack from firmware
*******************************************************************
*/
- outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit,
- 0, outbound_doorbell);
- CHIP_REG_WRITE32(HBA_MessageUnit,
- 0, outbound_doorbell, outbound_doorbell); /* clear doorbell interrupt */
- if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
+ doorbell_status = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
+ CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
+ if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
arcmsr_iop2drv_data_wrote_handle(acb);
}
- if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
+ if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
arcmsr_iop2drv_data_read_handle(acb);
}
}
@@ -1811,8 +1818,8 @@ static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
*/
static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
{
- u_int32_t outbound_doorbell;
-
+ u_int32_t doorbell_status;
+
/*
*******************************************************************
** Maybe here we need to check wrqbuffer_lock is lock or not
@@ -1820,15 +1827,15 @@ static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
** check if there are any mail need to pack from firmware
*******************************************************************
*/
- outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
- CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /* clear doorbell interrupt */
- if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
+ doorbell_status = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
+ CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, doorbell_status); /* clear doorbell interrupt */
+ if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
arcmsr_iop2drv_data_wrote_handle(acb);
}
- if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
+ if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
arcmsr_iop2drv_data_read_handle(acb);
}
- if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
arcmsr_hbc_message_isr(acb); /* messenger of "driver to iop commands" */
}
}
@@ -1838,8 +1845,8 @@ static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
*/
static void arcmsr_hbd_doorbell_isr(struct AdapterControlBlock *acb)
{
- u_int32_t outbound_Doorbell;
-
+ u_int32_t doorbell_status;
+
/*
*******************************************************************
** Maybe here we need to check wrqbuffer_lock is lock or not
@@ -1847,22 +1854,22 @@ static void arcmsr_hbd_doorbell_isr(struct AdapterControlBlock *acb)
** check if there are any mail need to pack from firmware
*******************************************************************
*/
- outbound_Doorbell = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE;
- if(outbound_Doorbell)
- CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, outbound_Doorbell); /* clear doorbell interrupt */
- while( outbound_Doorbell & ARCMSR_HBDMU_F0_DOORBELL_CAUSE ) {
- if(outbound_Doorbell & ARCMSR_HBDMU_IOP2DRV_DATA_WRITE_OK) {
+ doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE;
+ if(doorbell_status)
+ CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
+ while( doorbell_status & ARCMSR_HBDMU_F0_DOORBELL_CAUSE ) {
+ if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_WRITE_OK) {
arcmsr_iop2drv_data_wrote_handle(acb);
}
- if(outbound_Doorbell & ARCMSR_HBDMU_IOP2DRV_DATA_READ_OK) {
+ if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_READ_OK) {
arcmsr_iop2drv_data_read_handle(acb);
}
- if(outbound_Doorbell & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) {
arcmsr_hbd_message_isr(acb); /* messenger of "driver to iop commands" */
}
- outbound_Doorbell = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE;
- if(outbound_Doorbell)
- CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, outbound_Doorbell); /* clear doorbell interrupt */
+ doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE;
+ if(doorbell_status)
+ CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
}
}
/*
@@ -1873,7 +1880,7 @@ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
{
u_int32_t flag_srb;
u_int16_t error;
-
+
/*
*****************************************************************************
** areca cdb command done
@@ -1884,7 +1891,7 @@ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
while((flag_srb = CHIP_REG_READ32(HBA_MessageUnit,
0, outbound_queueport)) != 0xFFFFFFFF) {
/* check if command done with no error*/
- error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0) ? TRUE : FALSE;
+ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0) ? TRUE : FALSE;
arcmsr_drain_donequeue(acb, flag_srb, error);
} /*drain reply FIFO*/
}
@@ -1913,7 +1920,7 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */
phbbmu->doneq_index = index;
/* check if command done with no error*/
- error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
+ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
arcmsr_drain_donequeue(acb, flag_srb, error);
} /*drain reply FIFO*/
}
@@ -1925,26 +1932,26 @@ static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
{
u_int32_t flag_srb,throttling = 0;
u_int16_t error;
-
+
/*
*****************************************************************************
** areca cdb command done
*****************************************************************************
*/
bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
-
- while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
-
+ do {
flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
+ if (flag_srb == 0xFFFFFFFF)
+ break;
/* check if command done with no error*/
- error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
+ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
arcmsr_drain_donequeue(acb, flag_srb, error);
- throttling++;
- if(throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
- CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
+ throttling++;
+ if(throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
+ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
throttling = 0;
- }
- } /*drain reply FIFO*/
+ }
+ } while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR);
}
/*
**********************************************************************
@@ -1988,8 +1995,8 @@ static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb)
*****************************************************************************
*/
if((CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause) &
- ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT) == 0)
- return;
+ ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT) == 0)
+ return;
bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow;
@@ -2042,19 +2049,20 @@ static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb)
static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb)
{
u_int32_t outbound_doorbell;
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
/*
*********************************************
** check outbound intstatus
*********************************************
*/
- outbound_doorbell = CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable;
+ outbound_doorbell = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & acb->outbound_int_enable;
if(!outbound_doorbell) {
/*it must be share irq*/
return;
}
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
- CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell);
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
+ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
+ READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell);
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
/* MU ioctl transfer doorbell interrupts*/
if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
arcmsr_iop2drv_data_wrote_handle(acb);
@@ -2082,19 +2090,24 @@ static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb)
** check outbound intstatus
*********************************************
*/
- host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
+ host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) &
+ (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
if(!host_interrupt_status) {
/*it must be share irq*/
return;
}
- /* MU doorbell interrupts*/
- if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
- arcmsr_hbc_doorbell_isr(acb);
- }
- /* MU post queue interrupts*/
- if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
- arcmsr_hbc_postqueue_isr(acb);
- }
+ do {
+ /* MU doorbell interrupts*/
+ if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
+ arcmsr_hbc_doorbell_isr(acb);
+ }
+ /* MU post queue interrupts*/
+ if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
+ arcmsr_hbc_postqueue_isr(acb);
+ }
+ host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
+ } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
}
/*
**********************************************************************
@@ -2161,7 +2174,7 @@ static void arcmsr_interrupt(struct AdapterControlBlock *acb)
static void arcmsr_intr_handler(void *arg)
{
struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
-
+
ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
arcmsr_interrupt(acb);
ARCMSR_LOCK_RELEASE(&acb->isr_lock);
@@ -2174,22 +2187,24 @@ static void arcmsr_polling_devmap(void *arg)
{
struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
switch (acb->adapter_type) {
- case ACB_ADAPTER_TYPE_A:
- CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
- break;
+ case ACB_ADAPTER_TYPE_A:
+ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
+ break;
- case ACB_ADAPTER_TYPE_B:
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
- break;
+ case ACB_ADAPTER_TYPE_B: {
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
+ }
+ break;
- case ACB_ADAPTER_TYPE_C:
- CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
- CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
- break;
+ case ACB_ADAPTER_TYPE_C:
+ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
+ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
+ break;
- case ACB_ADAPTER_TYPE_D:
- CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
- break;
+ case ACB_ADAPTER_TYPE_D:
+ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
+ break;
}
if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)
@@ -2226,7 +2241,7 @@ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_c
{
struct CMD_MESSAGE_FIELD *pcmdmessagefld;
u_int32_t retvalue = EINVAL;
-
+
pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) arg;
if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
return retvalue;
@@ -2237,7 +2252,7 @@ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_c
u_int8_t *pQbuffer;
u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer;
u_int32_t allxfer_len=0;
-
+
while((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
&& (allxfer_len < 1031)) {
/*copy READ QBUFFER to srb*/
@@ -2251,7 +2266,7 @@ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_c
}
if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
struct QBUFFER *prbuffer;
-
+
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
prbuffer = arcmsr_get_iop_rqbuffer(acb);
if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
@@ -2266,7 +2281,7 @@ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_c
u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
u_int8_t *pQbuffer;
u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer;
-
+
user_len = pcmdmessagefld->cmdmessage.Length;
/*check if data xfer length of this request will overflow my array qbuffer */
wqbuf_lastindex = acb->wqbuf_lastindex;
@@ -2276,7 +2291,7 @@ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_c
pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR;
} else {
my_empty_len = (wqbuf_firstindex - wqbuf_lastindex - 1) &
- (ARCMSR_MAX_QBUFFER - 1);
+ (ARCMSR_MAX_QBUFFER - 1);
if(my_empty_len >= user_len) {
while(user_len > 0) {
/*copy srb data to wqbuffer*/
@@ -2303,7 +2318,7 @@ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_c
break;
case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
u_int8_t *pQbuffer = acb->rqbuffer;
-
+
if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
arcmsr_iop_message_read(acb);
@@ -2320,10 +2335,10 @@ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_c
case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
{
u_int8_t *pQbuffer = acb->wqbuffer;
-
+
if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
- arcmsr_iop_message_read(acb);
+ arcmsr_iop_message_read(acb);
/*signature, let IOP know data has been readed */
}
acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
@@ -2336,10 +2351,10 @@ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_c
break;
case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
u_int8_t *pQbuffer;
-
+
if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
- arcmsr_iop_message_read(acb);
+ arcmsr_iop_message_read(acb);
/*signature, let IOP know data has been readed */
}
acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
@@ -2365,7 +2380,7 @@ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_c
case ARCMSR_MESSAGE_SAY_HELLO: {
u_int8_t *hello_string = "Hello! I am ARCMSR";
u_int8_t *puserbuffer = (u_int8_t *)pcmdmessagefld->messagedatabuffer;
-
+
if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR;
ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
@@ -2396,7 +2411,7 @@ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_c
static void arcmsr_free_srb(struct CommandControlBlock *srb)
{
struct AdapterControlBlock *acb;
-
+
acb = srb->acb;
ARCMSR_LOCK_ACQUIRE(&acb->srb_lock);
srb->srb_state = ARCMSR_SRB_DONE;
@@ -2460,7 +2475,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *p
u_int8_t *pQbuffer;
u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer;
int32_t allxfer_len = 0;
-
+
ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
&& (allxfer_len < 1031)) {
@@ -2473,7 +2488,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *p
}
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
struct QBUFFER *prbuffer;
-
+
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
prbuffer = arcmsr_get_iop_rqbuffer(acb);
if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
@@ -2489,7 +2504,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *p
int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
u_int8_t *pQbuffer;
u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer;
-
+
user_len = pcmdmessagefld->cmdmessage.Length;
ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
wqbuf_lastindex = acb->wqbuf_lastindex;
@@ -2497,7 +2512,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *p
if (wqbuf_lastindex != wqbuf_firstindex) {
arcmsr_Write_data_2iop_wqbuffer(acb);
/* has error report sensedata */
- if(pccb->csio.sense_len) {
+ if(pccb->csio.sense_len) {
((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
/* Valid,ErrorCode */
((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
@@ -2545,7 +2560,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *p
break;
case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
u_int8_t *pQbuffer = acb->rqbuffer;
-
+
ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -2562,7 +2577,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *p
break;
case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
u_int8_t *pQbuffer = acb->wqbuffer;
-
+
ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -2581,7 +2596,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *p
break;
case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
u_int8_t *pQbuffer;
-
+
ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -2609,7 +2624,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *p
break;
case ARCMSR_MESSAGE_SAY_HELLO: {
int8_t *hello_string = "Hello! I am ARCMSR";
-
+
memcpy(pcmdmessagefld->messagedatabuffer, hello_string
, (int16_t)strlen(hello_string));
pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
@@ -2637,7 +2652,7 @@ static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg,
struct AdapterControlBlock *acb = (struct AdapterControlBlock *)srb->acb;
union ccb *pccb;
int target, lun;
-
+
pccb = srb->pccb;
target = pccb->ccb_h.target_id;
lun = pccb->ccb_h.target_lun;
@@ -2719,7 +2734,7 @@ static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb)
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
u_int32_t intmask_org;
int i = 0;
-
+
acb->num_aborts++;
/*
***************************************************************************
@@ -2761,7 +2776,7 @@ static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb)
static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
{
int retry = 0;
-
+
acb->num_resets++;
acb->acb_flags |= ACB_F_BUS_RESET;
while(acb->srboutstandingcount != 0 && retry < 400) {
@@ -2791,10 +2806,10 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
char *buffer = pccb->csio.data_ptr;
inqdata[0] = T_PROCESSOR; /* Periph Qualifier & Periph Dev Type */
- inqdata[1] = 0; /* rem media bit & Dev Type Modifier */
- inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */
+ inqdata[1] = 0; /* rem media bit & Dev Type Modifier */
+ inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */
inqdata[3] = 0;
- inqdata[4] = 31; /* length of additional data */
+ inqdata[4] = 31; /* length of additional data */
inqdata[5] = 0;
inqdata[6] = 0;
inqdata[7] = 0;
@@ -2825,7 +2840,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
{
struct AdapterControlBlock *acb;
-
+
acb = (struct AdapterControlBlock *) cam_sim_softc(psim);
if(acb == NULL) {
pccb->ccb_h.status |= CAM_REQ_INVALID;
@@ -2837,7 +2852,7 @@ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
struct CommandControlBlock *srb;
int target = pccb->ccb_h.target_id;
int error;
-
+
if(target == 16) {
/* virtual device for iop message transfer */
arcmsr_handle_virtual_command(acb, pccb);
@@ -2884,7 +2899,9 @@ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(psim);
#ifdef CAM_NEW_TRAN_CODE
- if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
+ if(acb->adapter_bus_speed == ACB_BUS_SPEED_12G)
+ cpi->base_transfer_speed = 1200000;
+ else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
cpi->base_transfer_speed = 600000;
else
cpi->base_transfer_speed = 300000;
@@ -2938,8 +2955,8 @@ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
}
case XPT_RESET_BUS:
case XPT_RESET_DEV: {
- u_int32_t i;
-
+ u_int32_t i;
+
arcmsr_bus_reset(acb);
for (i=0; i < 500; i++) {
DELAY(1000);
@@ -2955,7 +2972,7 @@ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
}
case XPT_GET_TRAN_SETTINGS: {
struct ccb_trans_settings *cts;
-
+
if(pccb->ccb_h.target_id == 16) {
pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
xpt_done(pccb);
@@ -2967,7 +2984,7 @@ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
struct ccb_trans_settings_scsi *scsi;
struct ccb_trans_settings_spi *spi;
struct ccb_trans_settings_sas *sas;
-
+
scsi = &cts->proto_specific.scsi;
scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
scsi->valid = CTS_SCSI_VALID_TQ;
@@ -2982,10 +2999,11 @@ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
cts->transport = XPORT_SAS;
sas = &cts->xport_specific.sas;
sas->valid = CTS_SAS_VALID_SPEED;
- if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
- (acb->vendor_device_id == PCIDevVenIDARC1214))
+ if (acb->adapter_bus_speed == ACB_BUS_SPEED_12G)
+ sas->bitrate = 1200000;
+ else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
sas->bitrate = 600000;
- else if(acb->vendor_device_id == PCIDevVenIDARC1680)
+ else if(acb->adapter_bus_speed == ACB_BUS_SPEED_3G)
sas->bitrate = 300000;
}
else
@@ -2995,7 +3013,10 @@ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
cts->transport = XPORT_SPI;
spi = &cts->xport_specific.spi;
spi->flags = CTS_SPI_FLAGS_DISC_ENB;
- spi->sync_period = 2;
+ if (acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
+ spi->sync_period = 1;
+ else
+ spi->sync_period = 2;
spi->sync_offset = 32;
spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
spi->valid = CTS_SPI_VALID_DISC
@@ -3007,7 +3028,10 @@ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
#else
{
cts->flags = (CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
- cts->sync_period = 2;
+ if (acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
+ cts->sync_period = 1;
+ else
+ cts->sync_period = 2;
cts->sync_offset = 32;
cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
cts->valid = CCB_TRANS_SYNC_RATE_VALID |
@@ -3090,8 +3114,9 @@ static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
*/
static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
{
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
acb->acb_flags |= ACB_F_MSG_START_BGRB;
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB);
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB);
if(!arcmsr_hbb_wait_msgint_ready(acb)) {
printf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
}
@@ -3152,7 +3177,7 @@ static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct C
struct CommandControlBlock *srb;
u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
u_int16_t error;
-
+
polling_ccb_retry:
poll_count++;
outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
@@ -3174,7 +3199,7 @@ polling_ccb_retry:
/* check if command done with no error*/
srb = (struct CommandControlBlock *)
(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
- error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
+ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
poll_srb_done = (srb == poll_srb) ? 1:0;
if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
if(srb->srb_state == ARCMSR_SRB_ABORTED) {
@@ -3208,11 +3233,10 @@ static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct C
u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
int index;
u_int16_t error;
-
+
polling_ccb_retry:
poll_count++;
- CHIP_REG_WRITE32(HBB_DOORBELL,
- 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
+ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
while(1) {
index = phbbmu->doneq_index;
@@ -3221,7 +3245,7 @@ polling_ccb_retry:
break;/*chip FIFO no ccb for completion already*/
} else {
UDELAY(25000);
- if ((poll_count > 100) && (poll_srb != NULL)) {
+ if ((poll_count > 100) && (poll_srb != NULL)) {
break;
}
goto polling_ccb_retry;
@@ -3234,7 +3258,7 @@ polling_ccb_retry:
/* check if command done with no error*/
srb = (struct CommandControlBlock *)
(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
- error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
+ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
poll_srb_done = (srb == poll_srb) ? 1:0;
if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
if(srb->srb_state == ARCMSR_SRB_ABORTED) {
@@ -3266,7 +3290,7 @@ static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct C
struct CommandControlBlock *srb;
u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
u_int16_t error;
-
+
polling_ccb_retry:
poll_count++;
bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
@@ -3276,19 +3300,19 @@ polling_ccb_retry:
break;/*chip FIFO no ccb for completion already*/
} else {
UDELAY(25000);
- if ((poll_count > 100) && (poll_srb != NULL)) {
+ if ((poll_count > 100) && (poll_srb != NULL)) {
break;
}
- if (acb->srboutstandingcount == 0) {
+ if (acb->srboutstandingcount == 0) {
break;
- }
+ }
goto polling_ccb_retry;
}
}
flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
/* check if command done with no error*/
srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
- error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
+ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
if (poll_srb != NULL)
poll_srb_done = (srb == poll_srb) ? 1:0;
if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
@@ -3318,7 +3342,7 @@ static void arcmsr_polling_hbd_srbdone(struct AdapterControlBlock *acb, struct C
u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
u_int32_t outbound_write_pointer;
u_int16_t error, doneq_index;
-
+
polling_ccb_retry:
poll_count++;
bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
@@ -3330,12 +3354,12 @@ polling_ccb_retry:
break;/*chip FIFO no ccb for completion already*/
} else {
UDELAY(25000);
- if ((poll_count > 100) && (poll_srb != NULL)) {
+ if ((poll_count > 100) && (poll_srb != NULL)) {
+ break;
+ }
+ if (acb->srboutstandingcount == 0) {
break;
}
- if (acb->srboutstandingcount == 0) {
- break;
- }
goto polling_ccb_retry;
}
}
@@ -3343,7 +3367,7 @@ polling_ccb_retry:
flag_srb = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow;
/* check if command done with no error*/
srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
- error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
+ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index);
if (poll_srb != NULL)
poll_srb_done = (srb == poll_srb) ? 1:0;
@@ -3400,7 +3424,7 @@ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
size_t iop_firm_version = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
int i;
-
+
CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
if(!arcmsr_hba_wait_msgint_ready(acb)) {
printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
@@ -3442,6 +3466,7 @@ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
*/
static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
{
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
char *acb_firm_model = acb->firm_model;
char *acb_firm_version = acb->firm_version;
char *acb_device_map = acb->device_map;
@@ -3449,8 +3474,8 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
size_t iop_firm_version = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
int i;
-
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
+
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
if(!arcmsr_hbb_wait_msgint_ready(acb)) {
printf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
}
@@ -3498,7 +3523,7 @@ static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb)
size_t iop_firm_version = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
int i;
-
+
CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
if(!arcmsr_hbc_wait_msgint_ready(acb)) {
@@ -3548,7 +3573,7 @@ static void arcmsr_get_hbd_config(struct AdapterControlBlock *acb)
size_t iop_firm_version = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
size_t iop_device_map = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
int i;
-
+
if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE)
CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);
CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
@@ -3576,10 +3601,10 @@ static void arcmsr_get_hbd_config(struct AdapterControlBlock *acb)
i++;
}
printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
- acb->firm_request_len = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_request_len, 1, 04-07*/
- acb->firm_numbers_queue = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_numbers_queue, 2, 08-11*/
- acb->firm_sdram_size = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_sdram_size, 3, 12-15*/
- acb->firm_ide_channels = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[5]); /*firm_ide_channels, 4, 16-19*/
+ acb->firm_request_len = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/
+ acb->firm_numbers_queue = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
+ acb->firm_sdram_size = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/
+ acb->firm_ide_channels = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/
acb->firm_cfg_version = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */
if(acb->firm_numbers_queue > ARCMSR_MAX_HBD_POSTQUEUE)
acb->maxOutstanding = ARCMSR_MAX_HBD_POSTQUEUE - 1;
@@ -3618,7 +3643,7 @@ static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
{
int timeout=0;
-
+
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0)
@@ -3633,7 +3658,8 @@ static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
}
break;
case ACB_ADAPTER_TYPE_B: {
- while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
+ while ((READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
{
if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
{
@@ -3642,7 +3668,7 @@ static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
}
UDELAY(15000); /* wait 15 milli-seconds */
}
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
}
break;
case ACB_ADAPTER_TYPE_C: {
@@ -3689,8 +3715,9 @@ static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
}
break;
case ACB_ADAPTER_TYPE_B: {
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
+ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
/* let IOP know data has been read */
}
break;
@@ -3722,7 +3749,7 @@ static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
unsigned long srb_phyaddr;
u_int32_t srb_phyaddr_hi32;
u_int32_t srb_phyaddr_lo32;
-
+
/*
********************************************************************
** here we need to tell iop 331 our freesrb.HighPart
@@ -3753,11 +3780,11 @@ static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_B: {
u_int32_t post_queue_phyaddr;
struct HBB_MessageUnit *phbbmu;
-
+
phbbmu = (struct HBB_MessageUnit *)acb->pmu;
phbbmu->postq_index = 0;
phbbmu->doneq_index = 0;
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
if(!arcmsr_hbb_wait_msgint_ready(acb)) {
printf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit);
return FALSE;
@@ -3769,12 +3796,12 @@ static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */
CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */
CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
if(!arcmsr_hbb_wait_msgint_ready(acb)) {
printf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit);
return FALSE;
}
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
if(!arcmsr_hbb_wait_msgint_ready(acb)) {
printf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit);
return FALSE;
@@ -3797,7 +3824,7 @@ static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D: {
u_int32_t post_queue_phyaddr, done_queue_phyaddr;
struct HBD_MessageUnit0 *phbdmu;
-
+
phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
phbdmu->postq_index = 0;
phbdmu->doneq_index = 0x40FF;
@@ -3833,7 +3860,8 @@ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
break;
case ACB_ADAPTER_TYPE_B: {
- CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
+ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
+ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
if(!arcmsr_hbb_wait_msgint_ready(acb)) {
printf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit);
return;
@@ -3849,7 +3877,7 @@ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
static void arcmsr_iop_init(struct AdapterControlBlock *acb)
{
u_int32_t intmask_org;
-
+
/* disable all outbound interrupt */
intmask_org = arcmsr_disable_allintr(acb);
arcmsr_wait_firmware_ready(acb);
@@ -3874,7 +3902,7 @@ static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, in
struct CommandControlBlock *srb_tmp;
u_int32_t i;
unsigned long srb_phyaddr = (unsigned long)segs->ds_addr;
-
+
acb->srb_phyaddr.phyaddr = srb_phyaddr;
srb_tmp = (struct CommandControlBlock *)acb->uncacheptr;
for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
@@ -3950,13 +3978,17 @@ static u_int32_t arcmsr_initialize(device_t dev)
vendor_dev_id = pci_get_devid(dev);
acb->vendor_device_id = vendor_dev_id;
+ acb->sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
switch (vendor_dev_id) {
case PCIDevVenIDARC1880:
case PCIDevVenIDARC1882:
case PCIDevVenIDARC1213:
case PCIDevVenIDARC1223: {
acb->adapter_type = ACB_ADAPTER_TYPE_C;
- acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
+ if (acb->sub_device_id == ARECA_SUB_DEV_ID_1883)
+ acb->adapter_bus_speed = ACB_BUS_SPEED_12G;
+ else
+ acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
max_coherent_size = ARCMSR_SRBS_POOL_SIZE;
}
break;
@@ -3973,6 +4005,12 @@ static u_int32_t arcmsr_initialize(device_t dev)
max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit));
}
break;
+ case PCIDevVenIDARC1203: {
+ acb->adapter_type = ACB_ADAPTER_TYPE_B;
+ acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
+ max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit));
+ }
+ break;
case PCIDevVenIDARC1110:
case PCIDevVenIDARC1120:
case PCIDevVenIDARC1130:
@@ -4008,47 +4046,47 @@ static u_int32_t arcmsr_initialize(device_t dev)
#else
if(bus_dma_tag_create( /*PCI parent*/ NULL,
#endif
- /*alignemnt*/ 1,
- /*boundary*/ 0,
- /*lowaddr*/ BUS_SPACE_MAXADDR,
- /*highaddr*/ BUS_SPACE_MAXADDR,
- /*filter*/ NULL,
- /*filterarg*/ NULL,
- /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT,
- /*nsegments*/ BUS_SPACE_UNRESTRICTED,
- /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
- /*flags*/ 0,
+ /*alignemnt*/ 1,
+ /*boundary*/ 0,
+ /*lowaddr*/ BUS_SPACE_MAXADDR,
+ /*highaddr*/ BUS_SPACE_MAXADDR,
+ /*filter*/ NULL,
+ /*filterarg*/ NULL,
+ /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT,
+ /*nsegments*/ BUS_SPACE_UNRESTRICTED,
+ /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
+ /*flags*/ 0,
#if __FreeBSD_version >= 501102
- /*lockfunc*/ NULL,
- /*lockarg*/ NULL,
+ /*lockfunc*/ NULL,
+ /*lockarg*/ NULL,
#endif
- &acb->parent_dmat) != 0)
+ &acb->parent_dmat) != 0)
{
printf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
return ENOMEM;
}
/* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
- if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat,
- /*alignment*/ 1,
- /*boundary*/ 0,
+ if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat,
+ /*alignment*/ 1,
+ /*boundary*/ 0,
#ifdef PAE
- /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT,
+ /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT,
#else
- /*lowaddr*/ BUS_SPACE_MAXADDR,
+ /*lowaddr*/ BUS_SPACE_MAXADDR,
#endif
- /*highaddr*/ BUS_SPACE_MAXADDR,
- /*filter*/ NULL,
- /*filterarg*/ NULL,
- /*maxsize*/ ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM,
- /*nsegments*/ ARCMSR_MAX_SG_ENTRIES,
- /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
- /*flags*/ 0,
+ /*highaddr*/ BUS_SPACE_MAXADDR,
+ /*filter*/ NULL,
+ /*filterarg*/ NULL,
+ /*maxsize*/ ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM,
+ /*nsegments*/ ARCMSR_MAX_SG_ENTRIES,
+ /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
+ /*flags*/ 0,
#if __FreeBSD_version >= 501102
- /*lockfunc*/ busdma_lock_mutex,
- /*lockarg*/ &acb->isr_lock,
+ /*lockfunc*/ busdma_lock_mutex,
+ /*lockarg*/ &acb->isr_lock,
#endif
- &acb->dm_segs_dmat) != 0)
+ &acb->dm_segs_dmat) != 0)
{
bus_dma_tag_destroy(acb->parent_dmat);
printf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
@@ -4056,22 +4094,22 @@ static u_int32_t arcmsr_initialize(device_t dev)
}
/* DMA tag for our srb structures.... Allocate the freesrb memory */
- if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat,
- /*alignment*/ 0x20,
- /*boundary*/ 0,
- /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT,
- /*highaddr*/ BUS_SPACE_MAXADDR,
- /*filter*/ NULL,
- /*filterarg*/ NULL,
- /*maxsize*/ max_coherent_size,
- /*nsegments*/ 1,
- /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
- /*flags*/ 0,
+ if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat,
+ /*alignment*/ 0x20,
+ /*boundary*/ 0,
+ /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT,
+ /*highaddr*/ BUS_SPACE_MAXADDR,
+ /*filter*/ NULL,
+ /*filterarg*/ NULL,
+ /*maxsize*/ max_coherent_size,
+ /*nsegments*/ 1,
+ /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
+ /*flags*/ 0,
#if __FreeBSD_version >= 501102
- /*lockfunc*/ NULL,
- /*lockarg*/ NULL,
+ /*lockfunc*/ NULL,
+ /*lockarg*/ NULL,
#endif
- &acb->srb_dmat) != 0)
+ &acb->srb_dmat) != 0)
{
bus_dma_tag_destroy(acb->dm_segs_dmat);
bus_dma_tag_destroy(acb->parent_dmat);
@@ -4132,10 +4170,15 @@ static u_int32_t arcmsr_initialize(device_t dev)
struct CommandControlBlock *freesrb;
u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
vm_offset_t mem_base[]={0,0};
+ u_long size;
+ if (vendor_dev_id == PCIDevVenIDARC1203)
+ size = sizeof(struct HBB_DOORBELL_1203);
+ else
+ size = sizeof(struct HBB_DOORBELL);
for(i=0; i < 2; i++) {
if(i == 0) {
acb->sys_res_arcmsr[i] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i],
- 0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE);
+ 0ul, ~0ul, size, RF_ACTIVE);
} else {
acb->sys_res_arcmsr[i] = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i],
0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE);
@@ -4164,6 +4207,17 @@ static u_int32_t arcmsr_initialize(device_t dev)
phbbmu = (struct HBB_MessageUnit *)acb->pmu;
phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)mem_base[0];
phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)mem_base[1];
+ if (vendor_dev_id == PCIDevVenIDARC1203) {
+ phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell);
+ phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell_mask);
+ phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell);
+ phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell_mask);
+ } else {
+ phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL, drv2iop_doorbell);
+ phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL, drv2iop_doorbell_mask);
+ phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL, iop2drv_doorbell);
+ phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL, iop2drv_doorbell_mask);
+ }
}
break;
case ACB_ADAPTER_TYPE_C: {
@@ -4254,7 +4308,7 @@ static int arcmsr_attach(device_t dev)
struct cam_devq *devq; /* Device Queue to use for this SIM */
struct resource *irqres;
int rid;
-
+
if(acb == NULL) {
printf("arcmsr%d: cannot allocate softc\n", unit);
return (ENOMEM);
@@ -4288,9 +4342,9 @@ static int arcmsr_attach(device_t dev)
* Create device queue of SIM(s) * (MAX_START_JOB - 1) :
* max_sim_transactions
*/
- devq = cam_simq_alloc(ARCMSR_MAX_START_JOB);
+ devq = cam_simq_alloc(acb->maxOutstanding);
if(devq == NULL) {
- arcmsr_free_resource(acb);
+ arcmsr_free_resource(acb);
bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
arcmsr_mutex_destroy(acb);
printf("arcmsr%d: cam_simq_alloc failure!\n", unit);
@@ -4362,14 +4416,16 @@ static int arcmsr_attach(device_t dev)
static int arcmsr_probe(device_t dev)
{
u_int32_t id;
+ u_int16_t sub_device_id;
static char buf[256];
char x_type[]={"unknown"};
char *type;
int raid6 = 1;
-
+
if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
return (ENXIO);
}
+ sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
switch(id = pci_get_devid(dev)) {
case PCIDevVenIDARC1110:
case PCIDevVenIDARC1200:
@@ -4402,9 +4458,13 @@ static int arcmsr_probe(device_t dev)
case PCIDevVenIDARC1882:
case PCIDevVenIDARC1213:
case PCIDevVenIDARC1223:
- type = "SAS 6G";
+ if (sub_device_id == ARECA_SUB_DEV_ID_1883)
+ type = "SAS 12G";
+ else
+ type = "SAS 6G";
break;
case PCIDevVenIDARC1214:
+ case PCIDevVenIDARC1203:
type = "SATA 6G";
break;
default:
@@ -4429,7 +4489,7 @@ static int arcmsr_shutdown(device_t dev)
u_int32_t intmask_org;
struct CommandControlBlock *srb;
struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
-
+
/* stop adapter background rebuild */
ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
/* disable all outbound interrupt */
@@ -4469,7 +4529,7 @@ static int arcmsr_detach(device_t dev)
{
struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
int i;
-
+
callout_stop(&acb->devmap_callout);
bus_teardown_intr(dev, acb->irqres, acb->ih);
arcmsr_shutdown(dev);
diff --git a/sys/dev/arcmsr/arcmsr.h b/sys/dev/arcmsr/arcmsr.h
index e4f2d6f..aa613ad 100644
--- a/sys/dev/arcmsr/arcmsr.h
+++ b/sys/dev/arcmsr/arcmsr.h
@@ -34,23 +34,23 @@
**************************************************************************
* $FreeBSD$
*/
-#define ARCMSR_SCSI_INITIATOR_ID 255
-#define ARCMSR_DEV_SECTOR_SIZE 512
-#define ARCMSR_MAX_XFER_SECTORS 4096
-#define ARCMSR_MAX_TARGETID 17 /*16 max target id + 1*/
-#define ARCMSR_MAX_TARGETLUN 8 /*8*/
-#define ARCMSR_MAX_CHIPTYPE_NUM 4
-#define ARCMSR_MAX_OUTSTANDING_CMD 256
-#define ARCMSR_MAX_START_JOB 256
-#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
-#define ARCMSR_MAX_FREESRB_NUM 384
-#define ARCMSR_MAX_QBUFFER 4096 /* ioctl QBUFFER */
-#define ARCMSR_MAX_SG_ENTRIES 38 /* max 38*/
-#define ARCMSR_MAX_ADAPTER 4
-#define ARCMSR_RELEASE_SIMQ_LEVEL 230
-#define ARCMSR_MAX_HBB_POSTQUEUE 264 /* (ARCMSR_MAX_OUTSTANDING_CMD+8) */
-#define ARCMSR_MAX_HBD_POSTQUEUE 256
-#define ARCMSR_TIMEOUT_DELAY 60 /* in sec */
+#define ARCMSR_SCSI_INITIATOR_ID 255
+#define ARCMSR_DEV_SECTOR_SIZE 512
+#define ARCMSR_MAX_XFER_SECTORS 4096
+#define ARCMSR_MAX_TARGETID 17 /*16 max target id + 1*/
+#define ARCMSR_MAX_TARGETLUN 8 /*8*/
+#define ARCMSR_MAX_CHIPTYPE_NUM 4
+#define ARCMSR_MAX_OUTSTANDING_CMD 256
+#define ARCMSR_MAX_START_JOB 256
+#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
+#define ARCMSR_MAX_FREESRB_NUM 384
+#define ARCMSR_MAX_QBUFFER 4096 /* ioctl QBUFFER */
+#define ARCMSR_MAX_SG_ENTRIES 38 /* max 38*/
+#define ARCMSR_MAX_ADAPTER 4
+#define ARCMSR_RELEASE_SIMQ_LEVEL 230
+#define ARCMSR_MAX_HBB_POSTQUEUE 264 /* (ARCMSR_MAX_OUTSTANDING_CMD+8) */
+#define ARCMSR_MAX_HBD_POSTQUEUE 256
+#define ARCMSR_TIMEOUT_DELAY 60 /* in sec */
/*
*********************************************************************
*/
@@ -75,7 +75,7 @@
#define ARCMSR_LOCK_RELEASE(l) mtx_unlock(l)
#define ARCMSR_LOCK_TRY(l) mtx_trylock(l)
#define arcmsr_htole32(x) htole32(x)
- typedef struct mtx arcmsr_lock_t;
+ typedef struct mtx arcmsr_lock_t;
#else
#define ARCMSR_LOCK_INIT(l, s) simple_lock_init(l)
#define ARCMSR_LOCK_DESTROY(l)
@@ -91,7 +91,7 @@
**
**********************************************************************************
*/
-#define PCI_VENDOR_ID_ARECA 0x17D3 /* Vendor ID */
+#define PCI_VENDOR_ID_ARECA 0x17D3 /* Vendor ID */
#define PCI_DEVICE_ID_ARECA_1110 0x1110 /* Device ID */
#define PCI_DEVICE_ID_ARECA_1120 0x1120 /* Device ID */
#define PCI_DEVICE_ID_ARECA_1130 0x1130 /* Device ID */
@@ -99,6 +99,7 @@
#define PCI_DEVICE_ID_ARECA_1170 0x1170 /* Device ID */
#define PCI_DEVICE_ID_ARECA_1200 0x1200 /* Device ID */
#define PCI_DEVICE_ID_ARECA_1201 0x1201 /* Device ID */
+#define PCI_DEVICE_ID_ARECA_1203 0x1203 /* Device ID */
#define PCI_DEVICE_ID_ARECA_1210 0x1210 /* Device ID */
#define PCI_DEVICE_ID_ARECA_1212 0x1212 /* Device ID */
#define PCI_DEVICE_ID_ARECA_1214 0x1214 /* Device ID */
@@ -118,6 +119,7 @@
#define ARECA_SUB_DEV_ID_1880 0x1880 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1882 0x1882 /* Subsystem Device ID */
+#define ARECA_SUB_DEV_ID_1883 0x1883 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1212 0x1212 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1213 0x1213 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1222 0x1222 /* Subsystem Device ID */
@@ -130,13 +132,14 @@
#define PCIDevVenIDARC1170 0x117017D3 /* Vendor Device ID */
#define PCIDevVenIDARC1200 0x120017D3 /* Vendor Device ID */
#define PCIDevVenIDARC1201 0x120117D3 /* Vendor Device ID */
+#define PCIDevVenIDARC1203 0x120317D3 /* Vendor Device ID */
#define PCIDevVenIDARC1210 0x121017D3 /* Vendor Device ID */
#define PCIDevVenIDARC1212 0x121217D3 /* Vendor Device ID */
-#define PCIDevVenIDARC1213 0x121317D3 /* Vendor Device ID */
-#define PCIDevVenIDARC1214 0x121417D3 /* Vendor Device ID */
+#define PCIDevVenIDARC1213 0x121317D3 /* Vendor Device ID */
+#define PCIDevVenIDARC1214 0x121417D3 /* Vendor Device ID */
#define PCIDevVenIDARC1220 0x122017D3 /* Vendor Device ID */
#define PCIDevVenIDARC1222 0x122217D3 /* Vendor Device ID */
-#define PCIDevVenIDARC1223 0x122317D3 /* Vendor Device ID */
+#define PCIDevVenIDARC1223 0x122317D3 /* Vendor Device ID */
#define PCIDevVenIDARC1230 0x123017D3 /* Vendor Device ID */
#define PCIDevVenIDARC1231 0x123117D3 /* Vendor Device ID */
#define PCIDevVenIDARC1260 0x126017D3 /* Vendor Device ID */
@@ -148,7 +151,7 @@
#define PCIDevVenIDARC1680 0x168017D3 /* Vendor Device ID */
#define PCIDevVenIDARC1681 0x168117D3 /* Vendor Device ID */
#define PCIDevVenIDARC1880 0x188017D3 /* Vendor Device ID */
-#define PCIDevVenIDARC1882 0x188217D3 /* Vendor Device ID */
+#define PCIDevVenIDARC1882 0x188217D3 /* Vendor Device ID */
#ifndef PCIR_BARS
#define PCIR_BARS 0x10
@@ -175,18 +178,20 @@
**
**********************************************************************************
*/
-#define arcmsr_ccbsrb_ptr spriv_ptr0
-#define arcmsr_ccbacb_ptr spriv_ptr1
-#define dma_addr_hi32(addr) (u_int32_t) ((addr>>16)>>16)
-#define dma_addr_lo32(addr) (u_int32_t) (addr & 0xffffffff)
-#define get_min(x,y) ((x) < (y) ? (x) : (y))
-#define get_max(x,y) ((x) < (y) ? (y) : (x))
+#define arcmsr_ccbsrb_ptr spriv_ptr0
+#define arcmsr_ccbacb_ptr spriv_ptr1
+#define dma_addr_hi32(addr) (u_int32_t) ((addr>>16)>>16)
+#define dma_addr_lo32(addr) (u_int32_t) (addr & 0xffffffff)
+#define get_min(x,y) ((x) < (y) ? (x) : (y))
+#define get_max(x,y) ((x) < (y) ? (y) : (x))
/*
**************************************************************************
**************************************************************************
*/
-#define CHIP_REG_READ32(s, b, r) bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r))
+#define CHIP_REG_READ32(s, b, r) bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r))
#define CHIP_REG_WRITE32(s, b, r, d) bus_space_write_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r), d)
+#define READ_CHIP_REG32(b, r) bus_space_read_4(acb->btag[b], acb->bhandle[b], r)
+#define WRITE_CHIP_REG32(b, r, d) bus_space_write_4(acb->btag[b], acb->bhandle[b], r, d)
/*
**********************************************************************************
** IOCTL CONTROL Mail Box
@@ -209,17 +214,17 @@ struct CMD_MESSAGE_FIELD {
/************************************************************************/
/************************************************************************/
-#define ARCMSR_IOP_ERROR_ILLEGALPCI 0x0001
-#define ARCMSR_IOP_ERROR_VENDORID 0x0002
-#define ARCMSR_IOP_ERROR_DEVICEID 0x0002
-#define ARCMSR_IOP_ERROR_ILLEGALCDB 0x0003
-#define ARCMSR_IOP_ERROR_UNKNOW_CDBERR 0x0004
-#define ARCMSR_SYS_ERROR_MEMORY_ALLOCATE 0x0005
-#define ARCMSR_SYS_ERROR_MEMORY_CROSS4G 0x0006
-#define ARCMSR_SYS_ERROR_MEMORY_LACK 0x0007
-#define ARCMSR_SYS_ERROR_MEMORY_RANGE 0x0008
-#define ARCMSR_SYS_ERROR_DEVICE_BASE 0x0009
-#define ARCMSR_SYS_ERROR_PORT_VALIDATE 0x000A
+#define ARCMSR_IOP_ERROR_ILLEGALPCI 0x0001
+#define ARCMSR_IOP_ERROR_VENDORID 0x0002
+#define ARCMSR_IOP_ERROR_DEVICEID 0x0002
+#define ARCMSR_IOP_ERROR_ILLEGALCDB 0x0003
+#define ARCMSR_IOP_ERROR_UNKNOW_CDBERR 0x0004
+#define ARCMSR_SYS_ERROR_MEMORY_ALLOCATE 0x0005
+#define ARCMSR_SYS_ERROR_MEMORY_CROSS4G 0x0006
+#define ARCMSR_SYS_ERROR_MEMORY_LACK 0x0007
+#define ARCMSR_SYS_ERROR_MEMORY_RANGE 0x0008
+#define ARCMSR_SYS_ERROR_DEVICE_BASE 0x0009
+#define ARCMSR_SYS_ERROR_PORT_VALIDATE 0x000A
/*DeviceType*/
#define ARECA_SATA_RAID 0x90000000
@@ -251,44 +256,44 @@ struct CMD_MESSAGE_FIELD {
#define ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE _IOWR('F', FUNCTION_FLUSH_ADAPTER_CACHE, struct CMD_MESSAGE_FIELD)
/* ARECA IOCTL ReturnCode */
-#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001
-#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006
-#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F
-#define ARCMSR_IOCTL_RETURNCODE_BUS_HANG_ON 0x00000088
+#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001
+#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006
+#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F
+#define ARCMSR_IOCTL_RETURNCODE_BUS_HANG_ON 0x00000088
/*
************************************************************************
** SPEC. for Areca HBA adapter
************************************************************************
*/
/* signature of set and get firmware config */
-#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
-#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
+#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
+#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
/* message code of inbound message register */
-#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
-#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
-#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
-#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
-#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
-#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
-#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
-#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
-#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
+#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
+#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
+#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
+#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
+#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
+#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
+#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
+#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
+#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
/* doorbell interrupt generator */
-#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
-#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
-#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
-#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
+#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
+#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
+#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
+#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
/* srb areca cdb flag */
-#define ARCMSR_SRBPOST_FLAG_SGL_BSIZE 0x80000000
-#define ARCMSR_SRBPOST_FLAG_IAM_BIOS 0x40000000
-#define ARCMSR_SRBREPLY_FLAG_IAM_BIOS 0x40000000
-#define ARCMSR_SRBREPLY_FLAG_ERROR 0x10000000
-#define ARCMSR_SRBREPLY_FLAG_ERROR_MODE0 0x10000000
-#define ARCMSR_SRBREPLY_FLAG_ERROR_MODE1 0x00000001
+#define ARCMSR_SRBPOST_FLAG_SGL_BSIZE 0x80000000
+#define ARCMSR_SRBPOST_FLAG_IAM_BIOS 0x40000000
+#define ARCMSR_SRBREPLY_FLAG_IAM_BIOS 0x40000000
+#define ARCMSR_SRBREPLY_FLAG_ERROR 0x10000000
+#define ARCMSR_SRBREPLY_FLAG_ERROR_MODE0 0x10000000
+#define ARCMSR_SRBREPLY_FLAG_ERROR_MODE1 0x00000001
/* outbound firmware ok */
-#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
+#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
-#define ARCMSR_ARC1680_BUS_RESET 0x00000003
+#define ARCMSR_ARC1680_BUS_RESET 0x00000003
/*
************************************************************************
** SPEC. for Areca HBB adapter
@@ -300,26 +305,31 @@ struct CMD_MESSAGE_FIELD {
#define ARCMSR_IOP2DRV_DOORBELL 0x00020408 /* window of "instruction flags" from iop to driver */
#define ARCMSR_IOP2DRV_DOORBELL_MASK 0x0002040C
+#define ARCMSR_IOP2DRV_DOORBELL_1203 0x00021870 /* window of "instruction flags" from iop to driver */
+#define ARCMSR_IOP2DRV_DOORBELL_MASK_1203 0x00021874
+#define ARCMSR_DRV2IOP_DOORBELL_1203 0x00021878 /* window of "instruction flags" from driver to iop */
+#define ARCMSR_DRV2IOP_DOORBELL_MASK_1203 0x0002187C
+
/* ARECA FLAG LANGUAGE */
#define ARCMSR_IOP2DRV_DATA_WRITE_OK 0x00000001 /* ioctl transfer */
#define ARCMSR_IOP2DRV_DATA_READ_OK 0x00000002 /* ioctl transfer */
#define ARCMSR_IOP2DRV_CDB_DONE 0x00000004
#define ARCMSR_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
-#define ARCMSR_DOORBELL_HANDLE_INT 0x0000000F
+#define ARCMSR_DOORBELL_HANDLE_INT 0x0000000F
#define ARCMSR_DOORBELL_INT_CLEAR_PATTERN 0xFF00FFF0
#define ARCMSR_MESSAGE_INT_CLEAR_PATTERN 0xFF00FFF7
-#define ARCMSR_MESSAGE_GET_CONFIG 0x00010008 /* (ARCMSR_INBOUND_MESG0_GET_CONFIG<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
-#define ARCMSR_MESSAGE_SET_CONFIG 0x00020008 /* (ARCMSR_INBOUND_MESG0_SET_CONFIG<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
-#define ARCMSR_MESSAGE_ABORT_CMD 0x00030008 /* (ARCMSR_INBOUND_MESG0_ABORT_CMD<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
-#define ARCMSR_MESSAGE_STOP_BGRB 0x00040008 /* (ARCMSR_INBOUND_MESG0_STOP_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
+#define ARCMSR_MESSAGE_GET_CONFIG 0x00010008 /* (ARCMSR_INBOUND_MESG0_GET_CONFIG<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
+#define ARCMSR_MESSAGE_SET_CONFIG 0x00020008 /* (ARCMSR_INBOUND_MESG0_SET_CONFIG<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
+#define ARCMSR_MESSAGE_ABORT_CMD 0x00030008 /* (ARCMSR_INBOUND_MESG0_ABORT_CMD<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
+#define ARCMSR_MESSAGE_STOP_BGRB 0x00040008 /* (ARCMSR_INBOUND_MESG0_STOP_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
#define ARCMSR_MESSAGE_FLUSH_CACHE 0x00050008 /* (ARCMSR_INBOUND_MESG0_FLUSH_CACHE<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
-#define ARCMSR_MESSAGE_START_BGRB 0x00060008 /* (ARCMSR_INBOUND_MESG0_START_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
-#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
-#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
-#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
-#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000 /* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
+#define ARCMSR_MESSAGE_START_BGRB 0x00060008 /* (ARCMSR_INBOUND_MESG0_START_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
+#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
+#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
+#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
+#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000 /* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
#define ARCMSR_DRV2IOP_DATA_WRITE_OK 0x00000001 /* ioctl transfer */
#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002 /* ioctl transfer */
@@ -328,13 +338,13 @@ struct CMD_MESSAGE_FIELD {
#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010 /* */
/* data tunnel buffer between user space program and its firmware */
-#define ARCMSR_MSGCODE_RWBUFFER 0x0000fa00 /* iop msgcode_rwbuffer for message command */
-#define ARCMSR_IOCTL_WBUFFER 0x0000fe00 /* user space data to iop 128bytes */
-#define ARCMSR_IOCTL_RBUFFER 0x0000ff00 /* iop data to user space 128bytes */
-#define ARCMSR_HBB_BASE0_OFFSET 0x00000010
-#define ARCMSR_HBB_BASE1_OFFSET 0x00000018
-#define ARCMSR_HBB_BASE0_LEN 0x00021000
-#define ARCMSR_HBB_BASE1_LEN 0x00010000
+#define ARCMSR_MSGCODE_RWBUFFER 0x0000fa00 /* iop msgcode_rwbuffer for message command */
+#define ARCMSR_IOCTL_WBUFFER 0x0000fe00 /* user space data to iop 128bytes */
+#define ARCMSR_IOCTL_RBUFFER 0x0000ff00 /* iop data to user space 128bytes */
+#define ARCMSR_HBB_BASE0_OFFSET 0x00000010
+#define ARCMSR_HBB_BASE1_OFFSET 0x00000018
+#define ARCMSR_HBB_BASE0_LEN 0x00021000
+#define ARCMSR_HBB_BASE1_LEN 0x00010000
/*
************************************************************************
** SPEC. for Areca HBC adapter
@@ -382,64 +392,64 @@ struct CMD_MESSAGE_FIELD {
#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004/*outbound DATA READ isr door bell clear*/
#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008/*outbound message 0 ready*/
#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR 0x00000008/*outbound message cmd isr door bell clear*/
-#define ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK 0x80000000/*ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK*/
+#define ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK 0x80000000/*ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK*/
#define ARCMSR_HBCMU_RESET_ADAPTER 0x00000024
-#define ARCMSR_HBCMU_DiagWrite_ENABLE 0x00000080
+#define ARCMSR_HBCMU_DiagWrite_ENABLE 0x00000080
/*
************************************************************************
** SPEC. for Areca HBD adapter
************************************************************************
*/
-#define ARCMSR_HBDMU_CHIP_ID 0x00004
+#define ARCMSR_HBDMU_CHIP_ID 0x00004
#define ARCMSR_HBDMU_CPU_MEMORY_CONFIGURATION 0x00008
-#define ARCMSR_HBDMU_I2_HOST_INTERRUPT_MASK 0x00034
-#define ARCMSR_HBDMU_MAIN_INTERRUPT_STATUS 0x00200
+#define ARCMSR_HBDMU_I2_HOST_INTERRUPT_MASK 0x00034
+#define ARCMSR_HBDMU_MAIN_INTERRUPT_STATUS 0x00200
#define ARCMSR_HBDMU_PCIE_F0_INTERRUPT_ENABLE 0x0020C
-#define ARCMSR_HBDMU_INBOUND_MESSAGE0 0x00400
-#define ARCMSR_HBDMU_INBOUND_MESSAGE1 0x00404
-#define ARCMSR_HBDMU_OUTBOUND_MESSAGE0 0x00420
-#define ARCMSR_HBDMU_OUTBOUND_MESSAGE1 0x00424
-#define ARCMSR_HBDMU_INBOUND_DOORBELL 0x00460
-#define ARCMSR_HBDMU_OUTBOUND_DOORBELL 0x00480
+#define ARCMSR_HBDMU_INBOUND_MESSAGE0 0x00400
+#define ARCMSR_HBDMU_INBOUND_MESSAGE1 0x00404
+#define ARCMSR_HBDMU_OUTBOUND_MESSAGE0 0x00420
+#define ARCMSR_HBDMU_OUTBOUND_MESSAGE1 0x00424
+#define ARCMSR_HBDMU_INBOUND_DOORBELL 0x00460
+#define ARCMSR_HBDMU_OUTBOUND_DOORBELL 0x00480
#define ARCMSR_HBDMU_OUTBOUND_DOORBELL_ENABLE 0x00484
-#define ARCMSR_HBDMU_INBOUND_LIST_BASE_LOW 0x01000
-#define ARCMSR_HBDMU_INBOUND_LIST_BASE_HIGH 0x01004
+#define ARCMSR_HBDMU_INBOUND_LIST_BASE_LOW 0x01000
+#define ARCMSR_HBDMU_INBOUND_LIST_BASE_HIGH 0x01004
#define ARCMSR_HBDMU_INBOUND_LIST_WRITE_POINTER 0x01018
-#define ARCMSR_HBDMU_OUTBOUND_LIST_BASE_LOW 0x01060
+#define ARCMSR_HBDMU_OUTBOUND_LIST_BASE_LOW 0x01060
#define ARCMSR_HBDMU_OUTBOUND_LIST_BASE_HIGH 0x01064
#define ARCMSR_HBDMU_OUTBOUND_LIST_COPY_POINTER 0x0106C
#define ARCMSR_HBDMU_OUTBOUND_LIST_READ_POINTER 0x01070
#define ARCMSR_HBDMU_OUTBOUND_INTERRUPT_CAUSE 0x01088
#define ARCMSR_HBDMU_OUTBOUND_INTERRUPT_ENABLE 0x0108C
-#define ARCMSR_HBDMU_MESSAGE_WBUFFER 0x02000
-#define ARCMSR_HBDMU_MESSAGE_RBUFFER 0x02100
-#define ARCMSR_HBDMU_MESSAGE_RWBUFFER 0x02200
+#define ARCMSR_HBDMU_MESSAGE_WBUFFER 0x02000
+#define ARCMSR_HBDMU_MESSAGE_RBUFFER 0x02100
+#define ARCMSR_HBDMU_MESSAGE_RWBUFFER 0x02200
-#define ARCMSR_HBDMU_ISR_THROTTLING_LEVEL 16
-#define ARCMSR_HBDMU_ISR_MAX_DONE_QUEUE 20
+#define ARCMSR_HBDMU_ISR_THROTTLING_LEVEL 16
+#define ARCMSR_HBDMU_ISR_MAX_DONE_QUEUE 20
/* Host Interrupt Mask */
-#define ARCMSR_HBDMU_ALL_INT_ENABLE 0x00001010 /* enable all ISR */
-#define ARCMSR_HBDMU_ALL_INT_DISABLE 0x00000000 /* disable all ISR */
+#define ARCMSR_HBDMU_ALL_INT_ENABLE 0x00001010 /* enable all ISR */
+#define ARCMSR_HBDMU_ALL_INT_DISABLE 0x00000000 /* disable all ISR */
/* Host Interrupt Status */
-#define ARCMSR_HBDMU_OUTBOUND_INT 0x00001010
-#define ARCMSR_HBDMU_OUTBOUND_DOORBELL_INT 0x00001000
-#define ARCMSR_HBDMU_OUTBOUND_POSTQUEUE_INT 0x00000010
+#define ARCMSR_HBDMU_OUTBOUND_INT 0x00001010
+#define ARCMSR_HBDMU_OUTBOUND_DOORBELL_INT 0x00001000
+#define ARCMSR_HBDMU_OUTBOUND_POSTQUEUE_INT 0x00000010
/* DoorBell*/
-#define ARCMSR_HBDMU_DRV2IOP_DATA_IN_READY 0x00000001
-#define ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ 0x00000002
+#define ARCMSR_HBDMU_DRV2IOP_DATA_IN_READY 0x00000001
+#define ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ 0x00000002
-#define ARCMSR_HBDMU_IOP2DRV_DATA_WRITE_OK 0x00000001
-#define ARCMSR_HBDMU_IOP2DRV_DATA_READ_OK 0x00000002
+#define ARCMSR_HBDMU_IOP2DRV_DATA_WRITE_OK 0x00000001
+#define ARCMSR_HBDMU_IOP2DRV_DATA_READ_OK 0x00000002
/*outbound message 0 ready*/
#define ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE 0x02000000
-#define ARCMSR_HBDMU_F0_DOORBELL_CAUSE 0x02000003
+#define ARCMSR_HBDMU_F0_DOORBELL_CAUSE 0x02000003
/*outbound message cmd isr door bell clear*/
#define ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR 0x02000000
@@ -449,7 +459,7 @@ struct CMD_MESSAGE_FIELD {
#define ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT_CLEAR 0x00000001
/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
-#define ARCMSR_HBDMU_MESSAGE_FIRMWARE_OK 0x80000000
+#define ARCMSR_HBDMU_MESSAGE_FIRMWARE_OK 0x80000000
/*
*********************************************************************
** Message Unit structure
@@ -457,41 +467,49 @@ struct CMD_MESSAGE_FIELD {
*/
struct HBA_MessageUnit
{
- u_int32_t resrved0[4]; /*0000 000F*/
- u_int32_t inbound_msgaddr0; /*0010 0013*/
- u_int32_t inbound_msgaddr1; /*0014 0017*/
- u_int32_t outbound_msgaddr0; /*0018 001B*/
- u_int32_t outbound_msgaddr1; /*001C 001F*/
- u_int32_t inbound_doorbell; /*0020 0023*/
- u_int32_t inbound_intstatus; /*0024 0027*/
- u_int32_t inbound_intmask; /*0028 002B*/
- u_int32_t outbound_doorbell; /*002C 002F*/
- u_int32_t outbound_intstatus; /*0030 0033*/
- u_int32_t outbound_intmask; /*0034 0037*/
- u_int32_t reserved1[2]; /*0038 003F*/
- u_int32_t inbound_queueport; /*0040 0043*/
- u_int32_t outbound_queueport; /*0044 0047*/
- u_int32_t reserved2[2]; /*0048 004F*/
- u_int32_t reserved3[492]; /*0050 07FF ......local_buffer 492*/
- u_int32_t reserved4[128]; /*0800 09FF 128*/
- u_int32_t msgcode_rwbuffer[256]; /*0a00 0DFF 256*/
- u_int32_t message_wbuffer[32]; /*0E00 0E7F 32*/
- u_int32_t reserved5[32]; /*0E80 0EFF 32*/
- u_int32_t message_rbuffer[32]; /*0F00 0F7F 32*/
- u_int32_t reserved6[32]; /*0F80 0FFF 32*/
+ u_int32_t resrved0[4]; /*0000 000F*/
+ u_int32_t inbound_msgaddr0; /*0010 0013*/
+ u_int32_t inbound_msgaddr1; /*0014 0017*/
+ u_int32_t outbound_msgaddr0; /*0018 001B*/
+ u_int32_t outbound_msgaddr1; /*001C 001F*/
+ u_int32_t inbound_doorbell; /*0020 0023*/
+ u_int32_t inbound_intstatus; /*0024 0027*/
+ u_int32_t inbound_intmask; /*0028 002B*/
+ u_int32_t outbound_doorbell; /*002C 002F*/
+ u_int32_t outbound_intstatus; /*0030 0033*/
+ u_int32_t outbound_intmask; /*0034 0037*/
+ u_int32_t reserved1[2]; /*0038 003F*/
+ u_int32_t inbound_queueport; /*0040 0043*/
+ u_int32_t outbound_queueport; /*0044 0047*/
+ u_int32_t reserved2[2]; /*0048 004F*/
+ u_int32_t reserved3[492]; /*0050 07FF ......local_buffer 492*/
+ u_int32_t reserved4[128]; /*0800 09FF 128*/
+ u_int32_t msgcode_rwbuffer[256]; /*0a00 0DFF 256*/
+ u_int32_t message_wbuffer[32]; /*0E00 0E7F 32*/
+ u_int32_t reserved5[32]; /*0E80 0EFF 32*/
+ u_int32_t message_rbuffer[32]; /*0F00 0F7F 32*/
+ u_int32_t reserved6[32]; /*0F80 0FFF 32*/
};
/*
*********************************************************************
**
*********************************************************************
*/
+struct HBB_DOORBELL_1203
+{
+ u_int8_t doorbell_reserved[ARCMSR_IOP2DRV_DOORBELL_1203]; /*reserved */
+ u_int32_t iop2drv_doorbell; /*offset 0x00021870:00,01,02,03: window of "instruction flags" from iop to driver */
+ u_int32_t iop2drv_doorbell_mask; /* 04,05,06,07: doorbell mask */
+ u_int32_t drv2iop_doorbell; /* 08,09,10,11: window of "instruction flags" from driver to iop */
+ u_int32_t drv2iop_doorbell_mask; /* 12,13,14,15: doorbell mask */
+};
struct HBB_DOORBELL
{
- u_int8_t doorbell_reserved[ARCMSR_DRV2IOP_DOORBELL]; /*reserved */
- u_int32_t drv2iop_doorbell; /*offset 0x00020400:00,01,02,03: window of "instruction flags" from driver to iop */
- u_int32_t drv2iop_doorbell_mask; /* 04,05,06,07: doorbell mask */
- u_int32_t iop2drv_doorbell; /* 08,09,10,11: window of "instruction flags" from iop to driver */
- u_int32_t iop2drv_doorbell_mask; /* 12,13,14,15: doorbell mask */
+ u_int8_t doorbell_reserved[ARCMSR_DRV2IOP_DOORBELL]; /*reserved */
+ u_int32_t drv2iop_doorbell; /*offset 0x00020400:00,01,02,03: window of "instruction flags" from driver to iop */
+ u_int32_t drv2iop_doorbell_mask; /* 04,05,06,07: doorbell mask */
+ u_int32_t iop2drv_doorbell; /* 08,09,10,11: window of "instruction flags" from iop to driver */
+ u_int32_t iop2drv_doorbell_mask; /* 12,13,14,15: doorbell mask */
};
/*
*********************************************************************
@@ -500,11 +518,11 @@ struct HBB_DOORBELL
*/
struct HBB_RWBUFFER
{
- u_int8_t message_reserved0[ARCMSR_MSGCODE_RWBUFFER]; /*reserved */
- u_int32_t msgcode_rwbuffer[256]; /*offset 0x0000fa00: 0, 1, 2, 3,...,1023: message code read write 1024bytes */
- u_int32_t message_wbuffer[32]; /*offset 0x0000fe00:1024,1025,1026,1027,...,1151: user space data to iop 128bytes */
- u_int32_t message_reserved1[32]; /* 1152,1153,1154,1155,...,1279: message reserved*/
- u_int32_t message_rbuffer[32]; /*offset 0x0000ff00:1280,1281,1282,1283,...,1407: iop data to user space 128bytes */
+ u_int8_t message_reserved0[ARCMSR_MSGCODE_RWBUFFER]; /*reserved */
+ u_int32_t msgcode_rwbuffer[256]; /*offset 0x0000fa00: 0, 1, 2, 3,...,1023: message code read write 1024bytes */
+ u_int32_t message_wbuffer[32]; /*offset 0x0000fe00:1024,1025,1026,1027,...,1151: user space data to iop 128bytes */
+ u_int32_t message_reserved1[32]; /* 1152,1153,1154,1155,...,1279: message reserved*/
+ u_int32_t message_rbuffer[32]; /*offset 0x0000ff00:1280,1281,1282,1283,...,1407: iop data to user space 128bytes */
};
/*
*********************************************************************
@@ -513,12 +531,16 @@ struct HBB_RWBUFFER
*/
struct HBB_MessageUnit
{
- u_int32_t post_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; /* post queue buffer for iop */
- u_int32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; /* done queue buffer for iop */
- int32_t postq_index; /* post queue index */
- int32_t doneq_index; /* done queue index */
+ u_int32_t post_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; /* post queue buffer for iop */
+ u_int32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; /* done queue buffer for iop */
+ int32_t postq_index; /* post queue index */
+ int32_t doneq_index; /* done queue index */
struct HBB_DOORBELL *hbb_doorbell;
struct HBB_RWBUFFER *hbb_rwbuffer;
+ bus_size_t drv2iop_doorbell; /* window of "instruction flags" from driver to iop */
+ bus_size_t drv2iop_doorbell_mask; /* doorbell mask */
+ bus_size_t iop2drv_doorbell; /* window of "instruction flags" from iop to driver */
+ bus_size_t iop2drv_doorbell_mask; /* doorbell mask */
};
/*
@@ -530,71 +552,71 @@ struct HBC_MessageUnit {
u_int32_t message_unit_status; /*0000 0003*/
u_int32_t slave_error_attribute; /*0004 0007*/
u_int32_t slave_error_address; /*0008 000B*/
- u_int32_t posted_outbound_doorbell; /*000C 000F*/
+ u_int32_t posted_outbound_doorbell; /*000C 000F*/
u_int32_t master_error_attribute; /*0010 0013*/
- u_int32_t master_error_address_low; /*0014 0017*/
- u_int32_t master_error_address_high; /*0018 001B*/
+ u_int32_t master_error_address_low; /*0014 0017*/
+ u_int32_t master_error_address_high; /*0018 001B*/
u_int32_t hcb_size; /*001C 001F size of the PCIe window used for HCB_Mode accesses*/
- u_int32_t inbound_doorbell; /*0020 0023*/
- u_int32_t diagnostic_rw_data; /*0024 0027*/
- u_int32_t diagnostic_rw_address_low; /*0028 002B*/
- u_int32_t diagnostic_rw_address_high; /*002C 002F*/
- u_int32_t host_int_status; /*0030 0033 host interrupt status*/
- u_int32_t host_int_mask; /*0034 0037 host interrupt mask*/
- u_int32_t dcr_data; /*0038 003B*/
- u_int32_t dcr_address; /*003C 003F*/
- u_int32_t inbound_queueport; /*0040 0043 port32 host inbound queue port*/
- u_int32_t outbound_queueport; /*0044 0047 port32 host outbound queue port*/
- u_int32_t hcb_pci_address_low; /*0048 004B*/
- u_int32_t hcb_pci_address_high; /*004C 004F*/
- u_int32_t iop_int_status; /*0050 0053*/
- u_int32_t iop_int_mask; /*0054 0057*/
- u_int32_t iop_inbound_queue_port; /*0058 005B*/
- u_int32_t iop_outbound_queue_port; /*005C 005F*/
- u_int32_t inbound_free_list_index; /*0060 0063 inbound free list producer consumer index*/
- u_int32_t inbound_post_list_index; /*0064 0067 inbound post list producer consumer index*/
- u_int32_t outbound_free_list_index; /*0068 006B outbound free list producer consumer index*/
- u_int32_t outbound_post_list_index; /*006C 006F outbound post list producer consumer index*/
- u_int32_t inbound_doorbell_clear; /*0070 0073*/
- u_int32_t i2o_message_unit_control; /*0074 0077*/
- u_int32_t last_used_message_source_address_low; /*0078 007B*/
- u_int32_t last_used_message_source_address_high; /*007C 007F*/
- u_int32_t pull_mode_data_byte_count[4]; /*0080 008F pull mode data byte count0..count7*/
- u_int32_t message_dest_address_index; /*0090 0093*/
- u_int32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
- u_int32_t utility_A_int_counter_timer; /*0098 009B*/
- u_int32_t outbound_doorbell; /*009C 009F*/
- u_int32_t outbound_doorbell_clear; /*00A0 00A3*/
- u_int32_t message_source_address_index; /*00A4 00A7 message accelerator source address consumer producer index*/
- u_int32_t message_done_queue_index; /*00A8 00AB message accelerator completion queue consumer producer index*/
- u_int32_t reserved0; /*00AC 00AF*/
- u_int32_t inbound_msgaddr0; /*00B0 00B3 scratchpad0*/
- u_int32_t inbound_msgaddr1; /*00B4 00B7 scratchpad1*/
- u_int32_t outbound_msgaddr0; /*00B8 00BB scratchpad2*/
- u_int32_t outbound_msgaddr1; /*00BC 00BF scratchpad3*/
- u_int32_t inbound_queueport_low; /*00C0 00C3 port64 host inbound queue port low*/
- u_int32_t inbound_queueport_high; /*00C4 00C7 port64 host inbound queue port high*/
- u_int32_t outbound_queueport_low; /*00C8 00CB port64 host outbound queue port low*/
- u_int32_t outbound_queueport_high; /*00CC 00CF port64 host outbound queue port high*/
- u_int32_t iop_inbound_queue_port_low; /*00D0 00D3*/
- u_int32_t iop_inbound_queue_port_high; /*00D4 00D7*/
- u_int32_t iop_outbound_queue_port_low; /*00D8 00DB*/
- u_int32_t iop_outbound_queue_port_high; /*00DC 00DF*/
- u_int32_t message_dest_queue_port_low; /*00E0 00E3 message accelerator destination queue port low*/
- u_int32_t message_dest_queue_port_high; /*00E4 00E7 message accelerator destination queue port high*/
- u_int32_t last_used_message_dest_address_low; /*00E8 00EB last used message accelerator destination address low*/
- u_int32_t last_used_message_dest_address_high; /*00EC 00EF last used message accelerator destination address high*/
- u_int32_t message_done_queue_base_address_low; /*00F0 00F3 message accelerator completion queue base address low*/
- u_int32_t message_done_queue_base_address_high; /*00F4 00F7 message accelerator completion queue base address high*/
- u_int32_t host_diagnostic; /*00F8 00FB*/
- u_int32_t write_sequence; /*00FC 00FF*/
- u_int32_t reserved1[34]; /*0100 0187*/
- u_int32_t reserved2[1950]; /*0188 1FFF*/
- u_int32_t message_wbuffer[32]; /*2000 207F*/
- u_int32_t reserved3[32]; /*2080 20FF*/
- u_int32_t message_rbuffer[32]; /*2100 217F*/
- u_int32_t reserved4[32]; /*2180 21FF*/
- u_int32_t msgcode_rwbuffer[256]; /*2200 23FF*/
+ u_int32_t inbound_doorbell; /*0020 0023*/
+ u_int32_t diagnostic_rw_data; /*0024 0027*/
+ u_int32_t diagnostic_rw_address_low; /*0028 002B*/
+ u_int32_t diagnostic_rw_address_high; /*002C 002F*/
+ u_int32_t host_int_status; /*0030 0033 host interrupt status*/
+ u_int32_t host_int_mask; /*0034 0037 host interrupt mask*/
+ u_int32_t dcr_data; /*0038 003B*/
+ u_int32_t dcr_address; /*003C 003F*/
+ u_int32_t inbound_queueport; /*0040 0043 port32 host inbound queue port*/
+ u_int32_t outbound_queueport; /*0044 0047 port32 host outbound queue port*/
+ u_int32_t hcb_pci_address_low; /*0048 004B*/
+ u_int32_t hcb_pci_address_high; /*004C 004F*/
+ u_int32_t iop_int_status; /*0050 0053*/
+ u_int32_t iop_int_mask; /*0054 0057*/
+ u_int32_t iop_inbound_queue_port; /*0058 005B*/
+ u_int32_t iop_outbound_queue_port; /*005C 005F*/
+ u_int32_t inbound_free_list_index; /*0060 0063 inbound free list producer consumer index*/
+ u_int32_t inbound_post_list_index; /*0064 0067 inbound post list producer consumer index*/
+ u_int32_t outbound_free_list_index; /*0068 006B outbound free list producer consumer index*/
+ u_int32_t outbound_post_list_index; /*006C 006F outbound post list producer consumer index*/
+ u_int32_t inbound_doorbell_clear; /*0070 0073*/
+ u_int32_t i2o_message_unit_control; /*0074 0077*/
+ u_int32_t last_used_message_source_address_low; /*0078 007B*/
+ u_int32_t last_used_message_source_address_high; /*007C 007F*/
+ u_int32_t pull_mode_data_byte_count[4]; /*0080 008F pull mode data byte count0..count7*/
+ u_int32_t message_dest_address_index; /*0090 0093*/
+ u_int32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
+ u_int32_t utility_A_int_counter_timer; /*0098 009B*/
+ u_int32_t outbound_doorbell; /*009C 009F*/
+ u_int32_t outbound_doorbell_clear; /*00A0 00A3*/
+ u_int32_t message_source_address_index; /*00A4 00A7 message accelerator source address consumer producer index*/
+ u_int32_t message_done_queue_index; /*00A8 00AB message accelerator completion queue consumer producer index*/
+ u_int32_t reserved0; /*00AC 00AF*/
+ u_int32_t inbound_msgaddr0; /*00B0 00B3 scratchpad0*/
+ u_int32_t inbound_msgaddr1; /*00B4 00B7 scratchpad1*/
+ u_int32_t outbound_msgaddr0; /*00B8 00BB scratchpad2*/
+ u_int32_t outbound_msgaddr1; /*00BC 00BF scratchpad3*/
+ u_int32_t inbound_queueport_low; /*00C0 00C3 port64 host inbound queue port low*/
+ u_int32_t inbound_queueport_high; /*00C4 00C7 port64 host inbound queue port high*/
+ u_int32_t outbound_queueport_low; /*00C8 00CB port64 host outbound queue port low*/
+ u_int32_t outbound_queueport_high; /*00CC 00CF port64 host outbound queue port high*/
+ u_int32_t iop_inbound_queue_port_low; /*00D0 00D3*/
+ u_int32_t iop_inbound_queue_port_high; /*00D4 00D7*/
+ u_int32_t iop_outbound_queue_port_low; /*00D8 00DB*/
+ u_int32_t iop_outbound_queue_port_high; /*00DC 00DF*/
+ u_int32_t message_dest_queue_port_low; /*00E0 00E3 message accelerator destination queue port low*/
+ u_int32_t message_dest_queue_port_high; /*00E4 00E7 message accelerator destination queue port high*/
+ u_int32_t last_used_message_dest_address_low; /*00E8 00EB last used message accelerator destination address low*/
+ u_int32_t last_used_message_dest_address_high; /*00EC 00EF last used message accelerator destination address high*/
+ u_int32_t message_done_queue_base_address_low; /*00F0 00F3 message accelerator completion queue base address low*/
+ u_int32_t message_done_queue_base_address_high; /*00F4 00F7 message accelerator completion queue base address high*/
+ u_int32_t host_diagnostic; /*00F8 00FB*/
+ u_int32_t write_sequence; /*00FC 00FF*/
+ u_int32_t reserved1[34]; /*0100 0187*/
+ u_int32_t reserved2[1950]; /*0188 1FFF*/
+ u_int32_t message_wbuffer[32]; /*2000 207F*/
+ u_int32_t reserved3[32]; /*2080 20FF*/
+ u_int32_t message_rbuffer[32]; /*2100 217F*/
+ u_int32_t reserved4[32]; /*2180 21FF*/
+ u_int32_t msgcode_rwbuffer[256]; /*2200 23FF*/
};
/*
*********************************************************************
@@ -616,46 +638,46 @@ struct OutBound_SRB {
struct HBD_MessageUnit {
uint32_t reserved0;
uint32_t chip_id; //0x0004
- uint32_t cpu_mem_config; //0x0008
- uint32_t reserved1[10]; //0x000C
+ uint32_t cpu_mem_config; //0x0008
+ uint32_t reserved1[10]; //0x000C
uint32_t i2o_host_interrupt_mask; //0x0034
- uint32_t reserved2[114]; //0x0038
- uint32_t host_int_status; //0x0200
- uint32_t host_int_enable; //0x0204
- uint32_t reserved3[1]; //0x0208
- uint32_t pcief0_int_enable; //0x020C
- uint32_t reserved4[124]; //0x0210
- uint32_t inbound_msgaddr0; //0x0400
- uint32_t inbound_msgaddr1; //0x0404
- uint32_t reserved5[6]; //0x0408
- uint32_t outbound_msgaddr0; //0x0420
- uint32_t outbound_msgaddr1; //0x0424
- uint32_t reserved6[14]; //0x0428
- uint32_t inbound_doorbell; //0x0460
- uint32_t reserved7[7]; //0x0464
- uint32_t outbound_doorbell; //0x0480
+ uint32_t reserved2[114]; //0x0038
+ uint32_t host_int_status; //0x0200
+ uint32_t host_int_enable; //0x0204
+ uint32_t reserved3[1]; //0x0208
+ uint32_t pcief0_int_enable; //0x020C
+ uint32_t reserved4[124]; //0x0210
+ uint32_t inbound_msgaddr0; //0x0400
+ uint32_t inbound_msgaddr1; //0x0404
+ uint32_t reserved5[6]; //0x0408
+ uint32_t outbound_msgaddr0; //0x0420
+ uint32_t outbound_msgaddr1; //0x0424
+ uint32_t reserved6[14]; //0x0428
+ uint32_t inbound_doorbell; //0x0460
+ uint32_t reserved7[7]; //0x0464
+ uint32_t outbound_doorbell; //0x0480
uint32_t outbound_doorbell_enable; //0x0484
uint32_t reserved8[734]; //0x0488
- uint32_t inboundlist_base_low; //0x1000
- uint32_t inboundlist_base_high; //0x1004
- uint32_t reserved9[4]; //0x1008
+ uint32_t inboundlist_base_low; //0x1000
+ uint32_t inboundlist_base_high; //0x1004
+ uint32_t reserved9[4]; //0x1008
uint32_t inboundlist_write_pointer; //0x1018
uint32_t inboundlist_read_pointer; //0x101C
uint32_t reserved10[16]; //0x1020
- uint32_t outboundlist_base_low; //0x1060
+ uint32_t outboundlist_base_low; //0x1060
uint32_t outboundlist_base_high; //0x1064
- uint32_t reserved11; //0x1068
+ uint32_t reserved11; //0x1068
uint32_t outboundlist_copy_pointer; //0x106C
uint32_t outboundlist_read_pointer; //0x1070 0x1072
- uint32_t reserved12[5]; //0x1074
+ uint32_t reserved12[5]; //0x1074
uint32_t outboundlist_interrupt_cause; //0x1088
uint32_t outboundlist_interrupt_enable; //0x108C
uint32_t reserved13[988]; //0x1090
- uint32_t message_wbuffer[32]; //0x2000
+ uint32_t message_wbuffer[32]; //0x2000
uint32_t reserved14[32]; //0x2080
- uint32_t message_rbuffer[32]; //0x2100
+ uint32_t message_rbuffer[32]; //0x2100
uint32_t reserved15[32]; //0x2180
- uint32_t msgcode_rwbuffer[256]; //0x2200
+ uint32_t msgcode_rwbuffer[256]; //0x2200
};
struct HBD_MessageUnit0 {
@@ -674,10 +696,10 @@ struct HBD_MessageUnit0 {
struct MessageUnit_UNION
{
union {
- struct HBA_MessageUnit hbamu;
- struct HBB_MessageUnit hbbmu;
- struct HBC_MessageUnit hbcmu;
- struct HBD_MessageUnit0 hbdmu;
+ struct HBA_MessageUnit hbamu;
+ struct HBB_MessageUnit hbbmu;
+ struct HBC_MessageUnit hbcmu;
+ struct HBD_MessageUnit0 hbdmu;
} muu;
};
/*
@@ -685,7 +707,7 @@ struct MessageUnit_UNION
** structure for holding DMA address data
*************************************************************
*/
-#define IS_SG64_ADDR 0x01000000 /* bit24 */
+#define IS_SG64_ADDR 0x01000000 /* bit24 */
/*
************************************************************************************************
** ARECA FIRMWARE SPEC
@@ -694,10 +716,10 @@ struct MessageUnit_UNION
** (All In/Out is in IOP331's view)
** 1. Message 0 --> InitThread message and retrun code
** 2. Doorbell is used for RS-232 emulation
-** inDoorBell : bit0 -- data in ready (DRIVER DATA WRITE OK)
-** bit1 -- data out has been read (DRIVER DATA READ OK)
-** outDooeBell: bit0 -- data out ready (IOP331 DATA WRITE OK)
-** bit1 -- data in has been read (IOP331 DATA READ OK)
+** inDoorBell : bit0 -- data in ready (DRIVER DATA WRITE OK)
+** bit1 -- data out has been read (DRIVER DATA READ OK)
+** outDooeBell: bit0 -- data out ready (IOP331 DATA WRITE OK)
+** bit1 -- data in has been read (IOP331 DATA READ OK)
** 3. Index Memory Usage
** offset 0xf00 : for RS232 out (request buffer)
** offset 0xe00 : for RS232 in (scratch buffer)
@@ -710,66 +732,66 @@ struct MessageUnit_UNION
** 5. PostQ
** All SCSI Command must be sent through postQ:
** (inbound queue port) Request frame must be 32 bytes aligned
-** # bit27--bit31 => flag for post ccb
-** # bit0--bit26 => real address (bit27--bit31) of post arcmsr_cdb
-** bit31 : 0 : 256 bytes frame
-** 1 : 512 bytes frame
-** bit30 : 0 : normal request
-** 1 : BIOS request
-** bit29 : reserved
-** bit28 : reserved
-** bit27 : reserved
+** # bit27--bit31 => flag for post ccb
+** # bit0--bit26 => real address (bit27--bit31) of post arcmsr_cdb
+** bit31 : 0 : 256 bytes frame
+** 1 : 512 bytes frame
+** bit30 : 0 : normal request
+** 1 : BIOS request
+** bit29 : reserved
+** bit28 : reserved
+** bit27 : reserved
** -------------------------------------------------------------------------------
** (outbount queue port) Request reply
-** # bit27--bit31 => flag for reply
-** # bit0--bit26 => real address (bit27--bit31) of reply arcmsr_cdb
-** bit31 : must be 0 (for this type of reply)
-** bit30 : reserved for BIOS handshake
-** bit29 : reserved
-** bit28 : 0 : no error, ignore AdapStatus/DevStatus/SenseData
-** 1 : Error, error code in AdapStatus/DevStatus/SenseData
-** bit27 : reserved
+** # bit27--bit31 => flag for reply
+** # bit0--bit26 => real address (bit27--bit31) of reply arcmsr_cdb
+** bit31 : must be 0 (for this type of reply)
+** bit30 : reserved for BIOS handshake
+** bit29 : reserved
+** bit28 : 0 : no error, ignore AdapStatus/DevStatus/SenseData
+** 1 : Error, error code in AdapStatus/DevStatus/SenseData
+** bit27 : reserved
** 6. BIOS request
** All BIOS request is the same with request from PostQ
** Except :
** Request frame is sent from configuration space
-** offset: 0x78 : Request Frame (bit30 == 1)
-** offset: 0x18 : writeonly to generate IRQ to IOP331
+** offset: 0x78 : Request Frame (bit30 == 1)
+** offset: 0x18 : writeonly to generate IRQ to IOP331
** Completion of request:
-** (bit30 == 0, bit28==err flag)
+** (bit30 == 0, bit28==err flag)
** 7. Definition of SGL entry (structure)
** 8. Message1 Out - Diag Status Code (????)
** 9. Message0 message code :
** 0x00 : NOP
** 0x01 : Get Config ->offset 0xa00 :for outbound message code msgcode_rwbuffer (IOP331 send to driver)
-** Signature 0x87974060(4)
-** Request len 0x00000200(4)
-** numbers of queue 0x00000100(4)
-** SDRAM Size 0x00000100(4)-->256 MB
-** IDE Channels 0x00000008(4)
-** vendor 40 bytes char
-** model 8 bytes char
-** FirmVer 16 bytes char
-** Device Map 16 bytes char
+** Signature 0x87974060(4)
+** Request len 0x00000200(4)
+** numbers of queue 0x00000100(4)
+** SDRAM Size 0x00000100(4)-->256 MB
+** IDE Channels 0x00000008(4)
+** vendor 40 bytes char
+** model 8 bytes char
+** FirmVer 16 bytes char
+** Device Map 16 bytes char
**
** FirmwareVersion DWORD <== Added for checking of new firmware capability
** 0x02 : Set Config ->offset 0xa00 : for inbound message code msgcode_rwbuffer (driver send to IOP331)
-** Signature 0x87974063(4)
-** UPPER32 of Request Frame (4)-->Driver Only
+** Signature 0x87974063(4)
+** UPPER32 of Request Frame (4)-->Driver Only
** 0x03 : Reset (Abort all queued Command)
** 0x04 : Stop Background Activity
** 0x05 : Flush Cache
** 0x06 : Start Background Activity (re-start if background is halted)
** 0x07 : Check If Host Command Pending (Novell May Need This Function)
** 0x08 : Set controller time ->offset 0xa00 : for inbound message code msgcode_rwbuffer (driver to IOP331)
-** byte 0 : 0xaa <-- signature
-** byte 1 : 0x55 <-- signature
-** byte 2 : year (04)
-** byte 3 : month (1..12)
-** byte 4 : date (1..31)
-** byte 5 : hour (0..23)
-** byte 6 : minute (0..59)
-** byte 7 : second (0..59)
+** byte 0 : 0xaa <-- signature
+** byte 1 : 0x55 <-- signature
+** byte 2 : year (04)
+** byte 3 : month (1..12)
+** byte 4 : date (1..31)
+** byte 5 : hour (0..23)
+** byte 6 : minute (0..59)
+** byte 7 : second (0..59)
** *********************************************************************************
** Porting Of LSI2108/2116 Based PCIE SAS/6G host raid adapter
** ==> Difference from IOP348
@@ -788,33 +810,33 @@ struct MessageUnit_UNION
** b. Message0: message code
** 0x00 : NOP
** 0x01 : Get Config ->offset 0xB8 :for outbound message code msgcode_rwbuffer (IOP send to driver)
-** Signature 0x87974060(4)
-** Request len 0x00000200(4)
-** numbers of queue 0x00000100(4)
-** SDRAM Size 0x00000100(4)-->256 MB
-** IDE Channels 0x00000008(4)
-** vendor 40 bytes char
-** model 8 bytes char
-** FirmVer 16 bytes char
-** Device Map 16 bytes char
-** cfgVersion ULONG <== Added for checking of new firmware capability
+** Signature 0x87974060(4)
+** Request len 0x00000200(4)
+** numbers of queue 0x00000100(4)
+** SDRAM Size 0x00000100(4)-->256 MB
+** IDE Channels 0x00000008(4)
+** vendor 40 bytes char
+** model 8 bytes char
+** FirmVer 16 bytes char
+** Device Map 16 bytes char
+** cfgVersion ULONG <== Added for checking of new firmware capability
** 0x02 : Set Config ->offset 0xB0 :for inbound message code msgcode_rwbuffer (driver send to IOP)
-** Signature 0x87974063(4)
-** UPPER32 of Request Frame (4)-->Driver Only
+** Signature 0x87974063(4)
+** UPPER32 of Request Frame (4)-->Driver Only
** 0x03 : Reset (Abort all queued Command)
** 0x04 : Stop Background Activity
** 0x05 : Flush Cache
** 0x06 : Start Background Activity (re-start if background is halted)
** 0x07 : Check If Host Command Pending (Novell May Need This Function)
** 0x08 : Set controller time ->offset 0xB0 : for inbound message code msgcode_rwbuffer (driver to IOP)
-** byte 0 : 0xaa <-- signature
-** byte 1 : 0x55 <-- signature
-** byte 2 : year (04)
-** byte 3 : month (1..12)
-** byte 4 : date (1..31)
-** byte 5 : hour (0..23)
-** byte 6 : minute (0..59)
-** byte 7 : second (0..59)
+** byte 0 : 0xaa <-- signature
+** byte 1 : 0x55 <-- signature
+** byte 2 : year (04)
+** byte 3 : month (1..12)
+** byte 4 : date (1..31)
+** byte 5 : hour (0..23)
+** byte 6 : minute (0..59)
+** byte 7 : second (0..59)
**
** <2> Doorbell Register is used for RS-232 emulation
** <A> different clear register
@@ -907,21 +929,21 @@ struct MessageUnit_UNION
*/
/* size 8 bytes */
/* 32bit Scatter-Gather list */
-struct SG32ENTRY { /* length bit 24 == 0 */
- u_int32_t length; /* high 8 bit == flag,low 24 bit == length */
- u_int32_t address;
+struct SG32ENTRY { /* length bit 24 == 0 */
+ u_int32_t length; /* high 8 bit == flag,low 24 bit == length */
+ u_int32_t address;
};
/* size 12 bytes */
/* 64bit Scatter-Gather list */
-struct SG64ENTRY { /* length bit 24 == 1 */
- u_int32_t length; /* high 8 bit == flag,low 24 bit == length */
- u_int32_t address;
- u_int32_t addresshigh;
+struct SG64ENTRY { /* length bit 24 == 1 */
+ u_int32_t length; /* high 8 bit == flag,low 24 bit == length */
+ u_int32_t address;
+ u_int32_t addresshigh;
};
struct SGENTRY_UNION {
union {
- struct SG32ENTRY sg32entry; /* 30h Scatter gather address */
- struct SG64ENTRY sg64entry; /* 30h */
+ struct SG32ENTRY sg32entry; /* 30h Scatter gather address */
+ struct SG64ENTRY sg64entry; /* 30h */
}u;
};
/*
@@ -931,14 +953,14 @@ struct SGENTRY_UNION {
*/
struct QBUFFER {
u_int32_t data_len;
- u_int8_t data[124];
+ u_int8_t data[124];
};
/*
**********************************
*/
typedef struct PHYS_ADDR64 {
- u_int32_t phyadd_low;
- u_int32_t phyadd_high;
+ u_int32_t phyadd_low;
+ u_int32_t phyadd_high;
}PHYSADDR64;
/*
************************************************************************************************
@@ -958,11 +980,11 @@ struct FIRMWARE_INFO {
u_int32_t ide_channels; /*4,16-19*/
char vendor[40]; /*5,20-59*/
char model[8]; /*15,60-67*/
- char firmware_ver[16]; /*17,68-83*/
+ char firmware_ver[16]; /*17,68-83*/
char device_map[16]; /*21,84-99*/
- u_int32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
- char cfgSerial[16]; /*26,104-119*/
- u_int32_t cfgPicStatus; /*30,120-123*/
+ u_int32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
+ char cfgSerial[16]; /*26,104-119*/
+ u_int32_t cfgPicStatus; /*30,120-123*/
};
/* (A) For cfgVersion in FIRMWARE_INFO
** if low BYTE (byte#0) >= 3 (version 3)
@@ -1022,8 +1044,8 @@ struct ARCMSR_CDB {
u_int8_t SenseData[15]; /* 21h output */
union {
- struct SG32ENTRY sg32entry[ARCMSR_MAX_SG_ENTRIES]; /* 30h Scatter gather address */
- struct SG64ENTRY sg64entry[ARCMSR_MAX_SG_ENTRIES]; /* 30h */
+ struct SG32ENTRY sg32entry[ARCMSR_MAX_SG_ENTRIES]; /* 30h Scatter gather address */
+ struct SG64ENTRY sg64entry[ARCMSR_MAX_SG_ENTRIES]; /* 30h */
} u;
};
/* CDB flag */
@@ -1044,9 +1066,9 @@ struct ARCMSR_CDB {
#define SCSISTAT_COMMAND_TERMINATED 0x22
#define SCSISTAT_QUEUE_FULL 0x28
/* DeviceStatus */
-#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0
-#define ARCMSR_DEV_ABORTED 0xF1
-#define ARCMSR_DEV_INIT_FAIL 0xF2
+#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0
+#define ARCMSR_DEV_ABORTED 0xF1
+#define ARCMSR_DEV_INIT_FAIL 0xF2
/*
*********************************************************************
** Command Control Block (SrbExtension)
@@ -1056,40 +1078,40 @@ struct ARCMSR_CDB {
*********************************************************************
*/
struct CommandControlBlock {
- struct ARCMSR_CDB arcmsr_cdb; /* 0 -503 (size of CDB=504): arcmsr messenger scsi command descriptor size 504 bytes */
- u_int32_t cdb_phyaddr_low; /* 504-507 */
- u_int32_t arc_cdb_size; /* 508-511 */
+ struct ARCMSR_CDB arcmsr_cdb; /* 0 -503 (size of CDB=504): arcmsr messenger scsi command descriptor size 504 bytes */
+ u_int32_t cdb_phyaddr_low; /* 504-507 */
+ u_int32_t arc_cdb_size; /* 508-511 */
/* ======================512+32 bytes============================ */
- union ccb *pccb; /* 512-515 516-519 pointer of freebsd scsi command */
- struct AdapterControlBlock *acb; /* 520-523 524-527 */
- bus_dmamap_t dm_segs_dmamap; /* 528-531 532-535 */
- u_int16_t srb_flags; /* 536-537 */
- u_int16_t srb_state; /* 538-539 */
- u_int32_t cdb_phyaddr_high; /* 540-543 */
- struct callout ccb_callout;
+ union ccb *pccb; /* 512-515 516-519 pointer of freebsd scsi command */
+ struct AdapterControlBlock *acb; /* 520-523 524-527 */
+ bus_dmamap_t dm_segs_dmamap; /* 528-531 532-535 */
+ u_int16_t srb_flags; /* 536-537 */
+ u_int16_t srb_state; /* 538-539 */
+ u_int32_t cdb_phyaddr_high; /* 540-543 */
+ struct callout ccb_callout;
/* ========================================================== */
};
/* srb_flags */
-#define SRB_FLAG_READ 0x0000
-#define SRB_FLAG_WRITE 0x0001
-#define SRB_FLAG_ERROR 0x0002
-#define SRB_FLAG_FLUSHCACHE 0x0004
+#define SRB_FLAG_READ 0x0000
+#define SRB_FLAG_WRITE 0x0001
+#define SRB_FLAG_ERROR 0x0002
+#define SRB_FLAG_FLUSHCACHE 0x0004
#define SRB_FLAG_MASTER_ABORTED 0x0008
-#define SRB_FLAG_DMAVALID 0x0010
+#define SRB_FLAG_DMAVALID 0x0010
#define SRB_FLAG_DMACONSISTENT 0x0020
-#define SRB_FLAG_DMAWRITE 0x0040
-#define SRB_FLAG_PKTBIND 0x0080
+#define SRB_FLAG_DMAWRITE 0x0040
+#define SRB_FLAG_PKTBIND 0x0080
#define SRB_FLAG_TIMER_START 0x0080
/* srb_state */
-#define ARCMSR_SRB_DONE 0x0000
-#define ARCMSR_SRB_UNBUILD 0x0000
-#define ARCMSR_SRB_TIMEOUT 0x1111
-#define ARCMSR_SRB_RETRY 0x2222
-#define ARCMSR_SRB_START 0x55AA
-#define ARCMSR_SRB_PENDING 0xAA55
-#define ARCMSR_SRB_RESET 0xA5A5
-#define ARCMSR_SRB_ABORTED 0x5A5A
-#define ARCMSR_SRB_ILLEGAL 0xFFFF
+#define ARCMSR_SRB_DONE 0x0000
+#define ARCMSR_SRB_UNBUILD 0x0000
+#define ARCMSR_SRB_TIMEOUT 0x1111
+#define ARCMSR_SRB_RETRY 0x2222
+#define ARCMSR_SRB_START 0x55AA
+#define ARCMSR_SRB_PENDING 0xAA55
+#define ARCMSR_SRB_RESET 0xA5A5
+#define ARCMSR_SRB_ABORTED 0x5A5A
+#define ARCMSR_SRB_ILLEGAL 0xFFFF
#define SRB_SIZE ((sizeof(struct CommandControlBlock)+0x1f) & 0xffe0)
#define ARCMSR_SRBS_POOL_SIZE (SRB_SIZE * ARCMSR_MAX_FREESRB_NUM)
@@ -1099,108 +1121,109 @@ struct CommandControlBlock {
** Adapter Control Block
*********************************************************************
*/
-#define ACB_ADAPTER_TYPE_A 0x00000001 /* hba I IOP */
-#define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */
-#define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc L IOP */
-#define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd M IOP */
+#define ACB_ADAPTER_TYPE_A 0x00000001 /* hba I IOP */
+#define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */
+#define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc L IOP */
+#define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd M IOP */
struct AdapterControlBlock {
- u_int32_t adapter_type; /* adapter A,B..... */
+ u_int32_t adapter_type; /* adapter A,B..... */
- bus_space_tag_t btag[2];
- bus_space_handle_t bhandle[2];
- bus_dma_tag_t parent_dmat;
- bus_dma_tag_t dm_segs_dmat; /* dmat for buffer I/O */
- bus_dma_tag_t srb_dmat; /* dmat for freesrb */
- bus_dmamap_t srb_dmamap;
- device_t pci_dev;
+ bus_space_tag_t btag[2];
+ bus_space_handle_t bhandle[2];
+ bus_dma_tag_t parent_dmat;
+ bus_dma_tag_t dm_segs_dmat; /* dmat for buffer I/O */
+ bus_dma_tag_t srb_dmat; /* dmat for freesrb */
+ bus_dmamap_t srb_dmamap;
+ device_t pci_dev;
#if __FreeBSD_version < 503000
- dev_t ioctl_dev;
+ dev_t ioctl_dev;
#else
- struct cdev *ioctl_dev;
+ struct cdev *ioctl_dev;
#endif
- int pci_unit;
+ int pci_unit;
- struct resource *sys_res_arcmsr[2];
- struct resource *irqres;
- void *ih; /* interrupt handle */
+ struct resource *sys_res_arcmsr[2];
+ struct resource *irqres;
+ void *ih; /* interrupt handle */
/* Hooks into the CAM XPT */
- struct cam_sim *psim;
- struct cam_path *ppath;
- u_int8_t *uncacheptr;
- unsigned long vir2phy_offset;
+ struct cam_sim *psim;
+ struct cam_path *ppath;
+ u_int8_t *uncacheptr;
+ unsigned long vir2phy_offset;
union {
- unsigned long phyaddr;
+ unsigned long phyaddr;
struct {
- u_int32_t phyadd_low;
- u_int32_t phyadd_high;
+ u_int32_t phyadd_low;
+ u_int32_t phyadd_high;
}B;
- } srb_phyaddr;
+ }srb_phyaddr;
// unsigned long srb_phyaddr;
/* Offset is used in making arc cdb physical to virtual calculations */
- u_int32_t outbound_int_enable;
+ u_int32_t outbound_int_enable;
- struct MessageUnit_UNION *pmu; /* message unit ATU inbound base address0 */
+ struct MessageUnit_UNION *pmu; /* message unit ATU inbound base address0 */
- u_int8_t adapter_index; /* */
- u_int8_t irq;
- u_int16_t acb_flags; /* */
+ u_int8_t adapter_index;
+ u_int8_t irq;
+ u_int16_t acb_flags;
struct CommandControlBlock *psrb_pool[ARCMSR_MAX_FREESRB_NUM]; /* serial srb pointer array */
struct CommandControlBlock *srbworkingQ[ARCMSR_MAX_FREESRB_NUM]; /* working srb pointer array */
- int32_t workingsrb_doneindex; /* done srb array index */
- int32_t workingsrb_startindex; /* start srb array index */
- int32_t srboutstandingcount;
+ int32_t workingsrb_doneindex; /* done srb array index */
+ int32_t workingsrb_startindex; /* start srb array index */
+ int32_t srboutstandingcount;
- u_int8_t rqbuffer[ARCMSR_MAX_QBUFFER]; /* data collection buffer for read from 80331 */
- u_int32_t rqbuf_firstindex; /* first of read buffer */
- u_int32_t rqbuf_lastindex; /* last of read buffer */
+ u_int8_t rqbuffer[ARCMSR_MAX_QBUFFER]; /* data collection buffer for read from 80331 */
+ u_int32_t rqbuf_firstindex; /* first of read buffer */
+ u_int32_t rqbuf_lastindex; /* last of read buffer */
- u_int8_t wqbuffer[ARCMSR_MAX_QBUFFER]; /* data collection buffer for write to 80331 */
- u_int32_t wqbuf_firstindex; /* first of write buffer */
- u_int32_t wqbuf_lastindex; /* last of write buffer */
+ u_int8_t wqbuffer[ARCMSR_MAX_QBUFFER]; /* data collection buffer for write to 80331 */
+ u_int32_t wqbuf_firstindex; /* first of write buffer */
+ u_int32_t wqbuf_lastindex; /* last of write buffer */
- arcmsr_lock_t isr_lock;
- arcmsr_lock_t srb_lock;
- arcmsr_lock_t postDone_lock;
- arcmsr_lock_t qbuffer_lock;
+ arcmsr_lock_t isr_lock;
+ arcmsr_lock_t srb_lock;
+ arcmsr_lock_t postDone_lock;
+ arcmsr_lock_t qbuffer_lock;
- u_int8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN]; /* id0 ..... id15,lun0...lun7 */
- u_int32_t num_resets;
- u_int32_t num_aborts;
- u_int32_t firm_request_len; /*1,04-07*/
- u_int32_t firm_numbers_queue; /*2,08-11*/
- u_int32_t firm_sdram_size; /*3,12-15*/
- u_int32_t firm_ide_channels; /*4,16-19*/
- u_int32_t firm_cfg_version;
- char firm_model[12]; /*15,60-67*/
- char firm_version[20]; /*17,68-83*/
- char device_map[20]; /*21,84-99 */
- struct callout devmap_callout;
- u_int32_t pktRequestCount;
- u_int32_t pktReturnCount;
- u_int32_t vendor_device_id;
- u_int32_t adapter_bus_speed;
- u_int32_t maxOutstanding;
+ u_int8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN]; /* id0 ..... id15,lun0...lun7 */
+ u_int32_t num_resets;
+ u_int32_t num_aborts;
+ u_int32_t firm_request_len; /*1,04-07*/
+ u_int32_t firm_numbers_queue; /*2,08-11*/
+ u_int32_t firm_sdram_size; /*3,12-15*/
+ u_int32_t firm_ide_channels; /*4,16-19*/
+ u_int32_t firm_cfg_version;
+ char firm_model[12]; /*15,60-67*/
+ char firm_version[20]; /*17,68-83*/
+ char device_map[20]; /*21,84-99 */
+ struct callout devmap_callout;
+ u_int32_t pktRequestCount;
+ u_int32_t pktReturnCount;
+ u_int32_t vendor_device_id;
+ u_int32_t adapter_bus_speed;
+ u_int32_t maxOutstanding;
+ u_int16_t sub_device_id;
};/* HW_DEVICE_EXTENSION */
/* acb_flags */
#define ACB_F_SCSISTOPADAPTER 0x0001
-#define ACB_F_MSG_STOP_BGRB 0x0002 /* stop RAID background rebuild */
-#define ACB_F_MSG_START_BGRB 0x0004 /* stop RAID background rebuild */
-#define ACB_F_IOPDATA_OVERFLOW 0x0008 /* iop ioctl data rqbuffer overflow */
-#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010 /* ioctl clear wqbuffer */
-#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020 /* ioctl clear rqbuffer */
+#define ACB_F_MSG_STOP_BGRB 0x0002 /* stop RAID background rebuild */
+#define ACB_F_MSG_START_BGRB 0x0004 /* stop RAID background rebuild */
+#define ACB_F_IOPDATA_OVERFLOW 0x0008 /* iop ioctl data rqbuffer overflow */
+#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010 /* ioctl clear wqbuffer */
+#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020 /* ioctl clear rqbuffer */
#define ACB_F_MESSAGE_WQBUFFER_READ 0x0040
#define ACB_F_BUS_RESET 0x0080
-#define ACB_F_IOP_INITED 0x0100 /* iop init */
-#define ACB_F_MAPFREESRB_FAILD 0x0200 /* arcmsr_map_freesrb faild */
+#define ACB_F_IOP_INITED 0x0100 /* iop init */
+#define ACB_F_MAPFREESRB_FAILD 0x0200 /* arcmsr_map_freesrb faild */
#define ACB_F_CAM_DEV_QFRZN 0x0400
-#define ACB_F_BUS_HANG_ON 0x0800 /* need hardware reset bus */
+#define ACB_F_BUS_HANG_ON 0x0800 /* need hardware reset bus */
#define ACB_F_SRB_FUNCTION_POWER 0x1000
/* devstate */
-#define ARECA_RAID_GONE 0x55
-#define ARECA_RAID_GOOD 0xaa
+#define ARECA_RAID_GONE 0x55
+#define ARECA_RAID_GOOD 0xaa
/* adapter_bus_speed */
#define ACB_BUS_SPEED_3G 0
#define ACB_BUS_SPEED_6G 1
@@ -1231,17 +1254,17 @@ struct SENSE_DATA {
** Peripheral Device Type definitions
**********************************
*/
-#define SCSI_DASD 0x00 /* Direct-access Device */
+#define SCSI_DASD 0x00 /* Direct-access Device */
#define SCSI_SEQACESS 0x01 /* Sequential-access device */
#define SCSI_PRINTER 0x02 /* Printer device */
#define SCSI_PROCESSOR 0x03 /* Processor device */
#define SCSI_WRITEONCE 0x04 /* Write-once device */
-#define SCSI_CDROM 0x05 /* CD-ROM device */
+#define SCSI_CDROM 0x05 /* CD-ROM device */
#define SCSI_SCANNER 0x06 /* Scanner device */
#define SCSI_OPTICAL 0x07 /* Optical memory device */
#define SCSI_MEDCHGR 0x08 /* Medium changer device */
-#define SCSI_COMM 0x09 /* Communications device */
-#define SCSI_NODEV 0x1F /* Unknown or no device type */
+#define SCSI_COMM 0x09 /* Communications device */
+#define SCSI_NODEV 0x1F /* Unknown or no device type */
/*
************************************************************************************************************
** @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
index dccd5b3..27ef8b3 100644
--- a/sys/dev/md/md.c
+++ b/sys/dev/md/md.c
@@ -99,6 +99,8 @@
#include <vm/swap_pager.h>
#include <vm/uma.h>
+#include <machine/bus.h>
+
#define MD_MODVER 1
#define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */
@@ -435,7 +437,7 @@ g_md_start(struct bio *bp)
#define MD_MALLOC_MOVE_CMP 5
static int
-md_malloc_move(vm_page_t **mp, int *ma_offs, unsigned sectorsize,
+md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize,
void *ptr, u_char fill, int op)
{
struct sf_buf *sf;
@@ -497,7 +499,7 @@ md_malloc_move(vm_page_t **mp, int *ma_offs, unsigned sectorsize,
}
break;
default:
- KASSERT(0, ("md_malloc_move unknown op %d\n", op));
+ KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op));
break;
}
if (error != 0)
@@ -520,10 +522,68 @@ md_malloc_move(vm_page_t **mp, int *ma_offs, unsigned sectorsize,
}
static int
+md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs,
+ unsigned len, void *ptr, u_char fill, int op)
+{
+ bus_dma_segment_t *vlist;
+ uint8_t *p, *end, first;
+ off_t *uc;
+ int ma_offs, seg_len;
+
+ vlist = *pvlist;
+ ma_offs = *pma_offs;
+ uc = ptr;
+
+ for (; len != 0; len -= seg_len) {
+ seg_len = imin(vlist->ds_len - ma_offs, len);
+ p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs;
+ switch (op) {
+ case MD_MALLOC_MOVE_ZERO:
+ bzero(p, seg_len);
+ break;
+ case MD_MALLOC_MOVE_FILL:
+ memset(p, fill, seg_len);
+ break;
+ case MD_MALLOC_MOVE_READ:
+ bcopy(ptr, p, seg_len);
+ cpu_flush_dcache(p, seg_len);
+ break;
+ case MD_MALLOC_MOVE_WRITE:
+ bcopy(p, ptr, seg_len);
+ break;
+ case MD_MALLOC_MOVE_CMP:
+ end = p + seg_len;
+ first = *uc = *p;
+ /* Confirm all following bytes match the first */
+ while (++p < end) {
+ if (*p != first)
+ return (EDOOFUS);
+ }
+ break;
+ default:
+ KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op));
+ break;
+ }
+
+ ma_offs += seg_len;
+ if (ma_offs == vlist->ds_len) {
+ ma_offs = 0;
+ vlist++;
+ }
+ ptr = (uint8_t *)ptr + seg_len;
+ }
+ *pvlist = vlist;
+ *pma_offs = ma_offs;
+
+ return (0);
+}
+
+static int
mdstart_malloc(struct md_s *sc, struct bio *bp)
{
u_char *dst;
vm_page_t *m;
+ bus_dma_segment_t *vlist;
int i, error, error1, ma_offs, notmapped;
off_t secno, nsec, uc;
uintptr_t sp, osp;
@@ -538,10 +598,16 @@ mdstart_malloc(struct md_s *sc, struct bio *bp)
}
notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0;
+ vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
+ (bus_dma_segment_t *)bp->bio_data : NULL;
if (notmapped) {
m = bp->bio_ma;
ma_offs = bp->bio_ma_offset;
dst = NULL;
+ KASSERT(vlist == NULL, ("vlists cannot be unmapped"));
+ } else if (vlist != NULL) {
+ ma_offs = bp->bio_ma_offset;
+ dst = NULL;
} else {
dst = bp->bio_data;
}
@@ -557,23 +623,36 @@ mdstart_malloc(struct md_s *sc, struct bio *bp)
} else if (bp->bio_cmd == BIO_READ) {
if (osp == 0) {
if (notmapped) {
- error = md_malloc_move(&m, &ma_offs,
+ error = md_malloc_move_ma(&m, &ma_offs,
sc->sectorsize, NULL, 0,
MD_MALLOC_MOVE_ZERO);
+ } else if (vlist != NULL) {
+ error = md_malloc_move_vlist(&vlist,
+ &ma_offs, sc->sectorsize, NULL, 0,
+ MD_MALLOC_MOVE_ZERO);
} else
bzero(dst, sc->sectorsize);
} else if (osp <= 255) {
if (notmapped) {
- error = md_malloc_move(&m, &ma_offs,
+ error = md_malloc_move_ma(&m, &ma_offs,
sc->sectorsize, NULL, osp,
MD_MALLOC_MOVE_FILL);
+ } else if (vlist != NULL) {
+ error = md_malloc_move_vlist(&vlist,
+ &ma_offs, sc->sectorsize, NULL, osp,
+ MD_MALLOC_MOVE_FILL);
} else
memset(dst, osp, sc->sectorsize);
} else {
if (notmapped) {
- error = md_malloc_move(&m, &ma_offs,
+ error = md_malloc_move_ma(&m, &ma_offs,
sc->sectorsize, (void *)osp, 0,
MD_MALLOC_MOVE_READ);
+ } else if (vlist != NULL) {
+ error = md_malloc_move_vlist(&vlist,
+ &ma_offs, sc->sectorsize,
+ (void *)osp, 0,
+ MD_MALLOC_MOVE_READ);
} else {
bcopy((void *)osp, dst, sc->sectorsize);
cpu_flush_dcache(dst, sc->sectorsize);
@@ -583,10 +662,15 @@ mdstart_malloc(struct md_s *sc, struct bio *bp)
} else if (bp->bio_cmd == BIO_WRITE) {
if (sc->flags & MD_COMPRESS) {
if (notmapped) {
- error1 = md_malloc_move(&m, &ma_offs,
+ error1 = md_malloc_move_ma(&m, &ma_offs,
sc->sectorsize, &uc, 0,
MD_MALLOC_MOVE_CMP);
i = error1 == 0 ? sc->sectorsize : 0;
+ } else if (vlist != NULL) {
+ error1 = md_malloc_move_vlist(&vlist,
+ &ma_offs, sc->sectorsize, &uc, 0,
+ MD_MALLOC_MOVE_CMP);
+ i = error1 == 0 ? sc->sectorsize : 0;
} else {
uc = dst[0];
for (i = 1; i < sc->sectorsize; i++) {
@@ -611,10 +695,15 @@ mdstart_malloc(struct md_s *sc, struct bio *bp)
break;
}
if (notmapped) {
- error = md_malloc_move(&m,
+ error = md_malloc_move_ma(&m,
&ma_offs, sc->sectorsize,
(void *)sp, 0,
MD_MALLOC_MOVE_WRITE);
+ } else if (vlist != NULL) {
+ error = md_malloc_move_vlist(
+ &vlist, &ma_offs,
+ sc->sectorsize, (void *)sp,
+ 0, MD_MALLOC_MOVE_WRITE);
} else {
bcopy(dst, (void *)sp,
sc->sectorsize);
@@ -622,10 +711,15 @@ mdstart_malloc(struct md_s *sc, struct bio *bp)
error = s_write(sc->indir, secno, sp);
} else {
if (notmapped) {
- error = md_malloc_move(&m,
+ error = md_malloc_move_ma(&m,
&ma_offs, sc->sectorsize,
(void *)osp, 0,
MD_MALLOC_MOVE_WRITE);
+ } else if (vlist != NULL) {
+ error = md_malloc_move_vlist(
+ &vlist, &ma_offs,
+ sc->sectorsize, (void *)osp,
+ 0, MD_MALLOC_MOVE_WRITE);
} else {
bcopy(dst, (void *)osp,
sc->sectorsize);
@@ -641,26 +735,78 @@ mdstart_malloc(struct md_s *sc, struct bio *bp)
if (error != 0)
break;
secno++;
- if (!notmapped)
+ if (!notmapped && vlist == NULL)
dst += sc->sectorsize;
}
bp->bio_resid = 0;
return (error);
}
+static void
+mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len)
+{
+ off_t seg_len;
+
+ while (offset >= vlist->ds_len) {
+ offset -= vlist->ds_len;
+ vlist++;
+ }
+
+ while (len != 0) {
+ seg_len = omin(len, vlist->ds_len - offset);
+ bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset),
+ seg_len);
+ offset = 0;
+ src = (uint8_t *)src + seg_len;
+ len -= seg_len;
+ vlist++;
+ }
+}
+
+static void
+mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len)
+{
+ off_t seg_len;
+
+ while (offset >= vlist->ds_len) {
+ offset -= vlist->ds_len;
+ vlist++;
+ }
+
+ while (len != 0) {
+ seg_len = omin(len, vlist->ds_len - offset);
+ bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst,
+ seg_len);
+ offset = 0;
+ dst = (uint8_t *)dst + seg_len;
+ len -= seg_len;
+ vlist++;
+ }
+}
+
static int
mdstart_preload(struct md_s *sc, struct bio *bp)
{
+ uint8_t *p;
+ p = sc->pl_ptr + bp->bio_offset;
switch (bp->bio_cmd) {
case BIO_READ:
- bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
- bp->bio_length);
+ if ((bp->bio_flags & BIO_VLIST) != 0) {
+ mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data,
+ bp->bio_ma_offset, bp->bio_length);
+ } else {
+ bcopy(p, bp->bio_data, bp->bio_length);
+ }
cpu_flush_dcache(bp->bio_data, bp->bio_length);
break;
case BIO_WRITE:
- bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
- bp->bio_length);
+ if ((bp->bio_flags & BIO_VLIST) != 0) {
+ mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data,
+ bp->bio_ma_offset, p, bp->bio_length);
+ } else {
+ bcopy(bp->bio_data, p, bp->bio_length);
+ }
break;
}
bp->bio_resid = 0;
@@ -673,16 +819,23 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
int error;
struct uio auio;
struct iovec aiov;
+ struct iovec *piov;
struct mount *mp;
struct vnode *vp;
struct buf *pb;
+ bus_dma_segment_t *vlist;
struct thread *td;
- off_t end, zerosize;
+ off_t len, zerosize;
+ int ma_offs;
switch (bp->bio_cmd) {
case BIO_READ:
+ auio.uio_rw = UIO_READ;
+ break;
case BIO_WRITE:
case BIO_DELETE:
+ auio.uio_rw = UIO_WRITE;
+ break;
case BIO_FLUSH:
break;
default:
@@ -691,6 +844,9 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
td = curthread;
vp = sc->vnode;
+ pb = NULL;
+ piov = NULL;
+ ma_offs = bp->bio_ma_offset;
/*
* VNODE I/O
@@ -709,73 +865,66 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
return (error);
}
- bzero(&auio, sizeof(auio));
+ auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
+ auio.uio_resid = bp->bio_length;
+ auio.uio_segflg = UIO_SYSSPACE;
+ auio.uio_td = td;
- /*
- * Special case for BIO_DELETE. On the surface, this is very
- * similar to BIO_WRITE, except that we write from our own
- * fixed-length buffer, so we have to loop. The net result is
- * that the two cases end up having very little in common.
- */
if (bp->bio_cmd == BIO_DELETE) {
+ /*
+ * Emulate BIO_DELETE by writing zeros.
+ */
zerosize = ZERO_REGION_SIZE -
(ZERO_REGION_SIZE % sc->sectorsize);
- auio.uio_iov = &aiov;
- auio.uio_iovcnt = 1;
- auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
- auio.uio_segflg = UIO_SYSSPACE;
- auio.uio_rw = UIO_WRITE;
- auio.uio_td = td;
- end = bp->bio_offset + bp->bio_length;
- (void) vn_start_write(vp, &mp, V_WAIT);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
- error = 0;
- while (auio.uio_offset < end) {
- aiov.iov_base = __DECONST(void *, zero_region);
- aiov.iov_len = end - auio.uio_offset;
- if (aiov.iov_len > zerosize)
- aiov.iov_len = zerosize;
- auio.uio_resid = aiov.iov_len;
- error = VOP_WRITE(vp, &auio,
- sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred);
- if (error != 0)
- break;
+ auio.uio_iovcnt = howmany(bp->bio_length, zerosize);
+ piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK);
+ auio.uio_iov = piov;
+ len = bp->bio_length;
+ while (len > 0) {
+ piov->iov_base = __DECONST(void *, zero_region);
+ piov->iov_len = len;
+ if (len > zerosize)
+ piov->iov_len = zerosize;
+ len -= piov->iov_len;
+ piov++;
}
- VOP_UNLOCK(vp, 0);
- vn_finished_write(mp);
- bp->bio_resid = end - auio.uio_offset;
- return (error);
- }
-
- KASSERT(bp->bio_length <= MAXPHYS, ("bio_length %jd",
- (uintmax_t)bp->bio_length));
- if ((bp->bio_flags & BIO_UNMAPPED) == 0) {
- pb = NULL;
- aiov.iov_base = bp->bio_data;
- } else {
+ piov = auio.uio_iov;
+ } else if ((bp->bio_flags & BIO_VLIST) != 0) {
+ piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK);
+ auio.uio_iov = piov;
+ vlist = (bus_dma_segment_t *)bp->bio_data;
+ len = bp->bio_length;
+ while (len > 0) {
+ piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr +
+ ma_offs);
+ piov->iov_len = vlist->ds_len - ma_offs;
+ if (piov->iov_len > len)
+ piov->iov_len = len;
+ len -= piov->iov_len;
+ ma_offs = 0;
+ vlist++;
+ piov++;
+ }
+ auio.uio_iovcnt = piov - auio.uio_iov;
+ piov = auio.uio_iov;
+ } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
pb = getpbuf(&md_vnode_pbuf_freecnt);
pmap_qenter((vm_offset_t)pb->b_data, bp->bio_ma, bp->bio_ma_n);
- aiov.iov_base = (void *)((vm_offset_t)pb->b_data +
- bp->bio_ma_offset);
+ aiov.iov_base = (void *)((vm_offset_t)pb->b_data + ma_offs);
+ aiov.iov_len = bp->bio_length;
+ auio.uio_iov = &aiov;
+ auio.uio_iovcnt = 1;
+ } else {
+ aiov.iov_base = bp->bio_data;
+ aiov.iov_len = bp->bio_length;
+ auio.uio_iov = &aiov;
+ auio.uio_iovcnt = 1;
}
- aiov.iov_len = bp->bio_length;
- auio.uio_iov = &aiov;
- auio.uio_iovcnt = 1;
- auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
- auio.uio_segflg = UIO_SYSSPACE;
- if (bp->bio_cmd == BIO_READ)
- auio.uio_rw = UIO_READ;
- else if (bp->bio_cmd == BIO_WRITE)
- auio.uio_rw = UIO_WRITE;
- else
- panic("wrong BIO_OP in mdstart_vnode");
- auio.uio_resid = bp->bio_length;
- auio.uio_td = td;
/*
* When reading set IO_DIRECT to try to avoid double-caching
* the data. When writing IO_DIRECT is not optimal.
*/
- if (bp->bio_cmd == BIO_READ) {
+ if (auio.uio_rw == UIO_READ) {
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
VOP_UNLOCK(vp, 0);
@@ -787,10 +936,15 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
VOP_UNLOCK(vp, 0);
vn_finished_write(mp);
}
- if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
+
+ if (pb) {
pmap_qremove((vm_offset_t)pb->b_data, bp->bio_ma_n);
relpbuf(pb, &md_vnode_pbuf_freecnt);
}
+
+ if (piov != NULL)
+ free(piov, M_MD);
+
bp->bio_resid = auio.uio_resid;
return (error);
}
@@ -801,6 +955,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
vm_page_t m;
u_char *p;
vm_pindex_t i, lastp;
+ bus_dma_segment_t *vlist;
int rv, ma_offs, offs, len, lastend;
switch (bp->bio_cmd) {
@@ -813,7 +968,10 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
}
p = bp->bio_data;
- ma_offs = (bp->bio_flags & BIO_UNMAPPED) == 0 ? 0 : bp->bio_ma_offset;
+ ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ?
+ bp->bio_ma_offset : 0;
+ vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
+ (bus_dma_segment_t *)bp->bio_data : NULL;
/*
* offs is the offset at which to start operating on the
@@ -853,6 +1011,10 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
pmap_copy_pages(&m, offs, bp->bio_ma,
ma_offs, len);
+ } else if ((bp->bio_flags & BIO_VLIST) != 0) {
+ physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs,
+ vlist, ma_offs, len);
+ cpu_flush_dcache(p, len);
} else {
physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len);
cpu_flush_dcache(p, len);
@@ -869,6 +1031,9 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
pmap_copy_pages(bp->bio_ma, ma_offs, &m,
offs, len);
+ } else if ((bp->bio_flags & BIO_VLIST) != 0) {
+ physcopyin_vlist(vlist, ma_offs,
+ VM_PAGE_TO_PHYS(m) + offs, len);
} else {
physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len);
}
diff --git a/sys/dev/netmap/ixgbe_netmap.h b/sys/dev/netmap/ixgbe_netmap.h
index 3dc628a..f1f03cb 100644
--- a/sys/dev/netmap/ixgbe_netmap.h
+++ b/sys/dev/netmap/ixgbe_netmap.h
@@ -61,7 +61,8 @@
* count packets that might be missed due to lost interrupts.
*/
SYSCTL_DECL(_dev_netmap);
-static int ix_rx_miss, ix_rx_miss_bufs, ix_crcstrip;
+static int ix_rx_miss, ix_rx_miss_bufs;
+int ix_crcstrip;
SYSCTL_INT(_dev_netmap, OID_AUTO, ix_crcstrip,
CTLFLAG_RW, &ix_crcstrip, 0, "strip CRC on rx frames");
SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss,
diff --git a/sys/fs/nfs/nfs.h b/sys/fs/nfs/nfs.h
index be60c1c..5781459 100644
--- a/sys/fs/nfs/nfs.h
+++ b/sys/fs/nfs/nfs.h
@@ -96,12 +96,6 @@
#define NFSSESSIONHASHSIZE 20 /* Size of server session hash table */
#endif
#define NFSSTATEHASHSIZE 10 /* Size of server stateid hash table */
-#ifndef NFSUSERHASHSIZE
-#define NFSUSERHASHSIZE 30 /* Size of user id hash table */
-#endif
-#ifndef NFSGROUPHASHSIZE
-#define NFSGROUPHASHSIZE 5 /* Size of group id hash table */
-#endif
#ifndef NFSCLDELEGHIGHWATER
#define NFSCLDELEGHIGHWATER 10000 /* limit for client delegations */
#endif
@@ -204,6 +198,18 @@ struct nfsd_idargs {
int nid_usertimeout;/* User name timeout (minutes) */
u_char *nid_name; /* Name */
int nid_namelen; /* and its length */
+ gid_t *nid_grps; /* and the list */
+ int nid_ngroup; /* Size of groups list */
+};
+
+struct nfsd_oidargs {
+ int nid_flag; /* Flags (see below) */
+ uid_t nid_uid; /* user/group id */
+ gid_t nid_gid;
+ int nid_usermax; /* Upper bound on user name cache */
+ int nid_usertimeout;/* User name timeout (minutes) */
+ u_char *nid_name; /* Name */
+ int nid_namelen; /* and its length */
};
struct nfsd_clid {
diff --git a/sys/fs/nfs/nfs_commonport.c b/sys/fs/nfs/nfs_commonport.c
index 2406d34..85738af 100644
--- a/sys/fs/nfs/nfs_commonport.c
+++ b/sys/fs/nfs/nfs_commonport.c
@@ -63,6 +63,7 @@ int nfs_numnfscbd = 0;
int nfscl_debuglevel = 0;
char nfsv4_callbackaddr[INET6_ADDRSTRLEN];
struct callout newnfsd_callout;
+int nfsrv_lughashsize = 100;
void (*nfsd_call_servertimer)(void) = NULL;
void (*ncl_call_invalcaches)(struct vnode *) = NULL;
@@ -79,6 +80,9 @@ SYSCTL_STRING(_vfs_nfs, OID_AUTO, callback_addr, CTLFLAG_RW,
"NFSv4 callback addr for server to use");
SYSCTL_INT(_vfs_nfs, OID_AUTO, debuglevel, CTLFLAG_RW, &nfscl_debuglevel,
0, "Debug level for new nfs client");
+TUNABLE_INT("vfs.nfs.userhashsize", &nfsrv_lughashsize);
+SYSCTL_INT(_vfs_nfs, OID_AUTO, userhashsize, CTLFLAG_RDTUN, &nfsrv_lughashsize,
+ 0, "Size of hash tables for uid/name mapping");
/*
* Defines for malloc
@@ -445,9 +449,25 @@ nfssvc_call(struct thread *p, struct nfssvc_args *uap, struct ucred *cred)
{
int error = EINVAL;
struct nfsd_idargs nid;
+ struct nfsd_oidargs onid;
if (uap->flag & NFSSVC_IDNAME) {
- error = copyin(uap->argp, (caddr_t)&nid, sizeof (nid));
+ if ((uap->flag & NFSSVC_NEWSTRUCT) != 0)
+ error = copyin(uap->argp, &nid, sizeof(nid));
+ else {
+ error = copyin(uap->argp, &onid, sizeof(onid));
+ if (error == 0) {
+ nid.nid_flag = onid.nid_flag;
+ nid.nid_uid = onid.nid_uid;
+ nid.nid_gid = onid.nid_gid;
+ nid.nid_usermax = onid.nid_usermax;
+ nid.nid_usertimeout = onid.nid_usertimeout;
+ nid.nid_name = onid.nid_name;
+ nid.nid_namelen = onid.nid_namelen;
+ nid.nid_ngroup = 0;
+ nid.nid_grps = NULL;
+ }
+ }
if (error)
goto out;
error = nfssvc_idname(&nid);
@@ -604,6 +624,8 @@ nfscommon_modevent(module_t mod, int type, void *data)
nfsd_call_nfscommon = NULL;
callout_drain(&newnfsd_callout);
+ /* Clean out the name<-->id cache. */
+ nfsrv_cleanusergroup();
/* and get rid of the mutexes */
mtx_destroy(&nfs_nameid_mutex);
mtx_destroy(&newnfsd_mtx);
diff --git a/sys/fs/nfs/nfs_commonsubs.c b/sys/fs/nfs/nfs_commonsubs.c
index 6e0204b8..ba38e0e 100644
--- a/sys/fs/nfs/nfs_commonsubs.c
+++ b/sys/fs/nfs/nfs_commonsubs.c
@@ -44,6 +44,8 @@ __FBSDID("$FreeBSD$");
#include <fs/nfs/nfsport.h>
+#include <security/mac/mac_framework.h>
+
/*
* Data items converted to xdr at startup, since they are constant
* This is kinda hokey, but may save a little time doing byte swaps
@@ -68,6 +70,7 @@ int ncl_mbuf_mlen = MLEN;
int nfsd_enable_stringtouid = 0;
NFSNAMEIDMUTEX;
NFSSOCKMUTEX;
+extern int nfsrv_lughashsize;
/*
* This array of structures indicates, for V4:
@@ -154,11 +157,14 @@ static int nfsrv_usercnt = 0;
static int nfsrv_dnsnamelen;
static u_char *nfsrv_dnsname = NULL;
static int nfsrv_usermax = 999999999;
-static struct nfsuserhashhead nfsuserhash[NFSUSERHASHSIZE];
-static struct nfsuserhashhead nfsusernamehash[NFSUSERHASHSIZE];
-static struct nfsuserhashhead nfsgrouphash[NFSGROUPHASHSIZE];
-static struct nfsuserhashhead nfsgroupnamehash[NFSGROUPHASHSIZE];
-static struct nfsuserlruhead nfsuserlruhead;
+struct nfsrv_lughash {
+ struct mtx mtx;
+ struct nfsuserhashhead lughead;
+};
+static struct nfsrv_lughash *nfsuserhash;
+static struct nfsrv_lughash *nfsusernamehash;
+static struct nfsrv_lughash *nfsgrouphash;
+static struct nfsrv_lughash *nfsgroupnamehash;
/*
* This static array indicates whether or not the RPC generates a large
@@ -177,7 +183,7 @@ static void nfsv4_wanted(struct nfsv4lock *lp);
static int nfsrv_cmpmixedcase(u_char *cp, u_char *cp2, int len);
static int nfsrv_getuser(int procnum, uid_t uid, gid_t gid, char *name,
NFSPROC_T *p);
-static void nfsrv_removeuser(struct nfsusrgrp *usrp);
+static void nfsrv_removeuser(struct nfsusrgrp *usrp, int isuser);
static int nfsrv_getrefstr(struct nfsrv_descript *, u_char **, u_char **,
int *, int *);
static void nfsrv_refstrbigenough(int, u_char **, u_char **, int *);
@@ -2541,18 +2547,17 @@ nfsv4_uidtostr(uid_t uid, u_char **cpp, int *retlenp, NFSPROC_T *p)
u_char *cp = *cpp;
uid_t tmp;
int cnt, hasampersand, len = NFSV4_SMALLSTR, ret;
+ struct nfsrv_lughash *hp;
cnt = 0;
tryagain:
- NFSLOCKNAMEID();
- if (nfsrv_dnsname) {
+ if (nfsrv_dnsnamelen > 0) {
/*
* Always map nfsrv_defaultuid to "nobody".
*/
if (uid == nfsrv_defaultuid) {
i = nfsrv_dnsnamelen + 7;
if (i > len) {
- NFSUNLOCKNAMEID();
if (len > NFSV4_SMALLSTR)
free(cp, M_NFSSTRING);
cp = malloc(i, M_NFSSTRING, M_WAITOK);
@@ -2564,11 +2569,12 @@ tryagain:
NFSBCOPY("nobody@", cp, 7);
cp += 7;
NFSBCOPY(nfsrv_dnsname, cp, nfsrv_dnsnamelen);
- NFSUNLOCKNAMEID();
return;
}
hasampersand = 0;
- LIST_FOREACH(usrp, NFSUSERHASH(uid), lug_numhash) {
+ hp = NFSUSERHASH(uid);
+ mtx_lock(&hp->mtx);
+ TAILQ_FOREACH(usrp, &hp->lughead, lug_numhash) {
if (usrp->lug_uid == uid) {
if (usrp->lug_expiry < NFSD_MONOSEC)
break;
@@ -2588,7 +2594,7 @@ tryagain:
i = usrp->lug_namelen +
nfsrv_dnsnamelen + 1;
if (i > len) {
- NFSUNLOCKNAMEID();
+ mtx_unlock(&hp->mtx);
if (len > NFSV4_SMALLSTR)
free(cp, M_NFSSTRING);
cp = malloc(i, M_NFSSTRING, M_WAITOK);
@@ -2603,20 +2609,19 @@ tryagain:
*cp++ = '@';
NFSBCOPY(nfsrv_dnsname, cp, nfsrv_dnsnamelen);
}
- TAILQ_REMOVE(&nfsuserlruhead, usrp, lug_lru);
- TAILQ_INSERT_TAIL(&nfsuserlruhead, usrp, lug_lru);
- NFSUNLOCKNAMEID();
+ TAILQ_REMOVE(&hp->lughead, usrp, lug_numhash);
+ TAILQ_INSERT_TAIL(&hp->lughead, usrp,
+ lug_numhash);
+ mtx_unlock(&hp->mtx);
return;
}
}
- NFSUNLOCKNAMEID();
+ mtx_unlock(&hp->mtx);
cnt++;
ret = nfsrv_getuser(RPCNFSUSERD_GETUID, uid, (gid_t)0,
NULL, p);
if (ret == 0 && cnt < 2)
goto tryagain;
- } else {
- NFSUNLOCKNAMEID();
}
/*
@@ -2640,6 +2645,52 @@ tryagain:
}
/*
+ * Get a credential for the uid with the server's group list.
+ * If none is found, just return the credential passed in after
+ * logging a warning message.
+ */
+struct ucred *
+nfsrv_getgrpscred(struct ucred *oldcred)
+{
+ struct nfsusrgrp *usrp;
+ struct ucred *newcred;
+ int cnt, ret;
+ uid_t uid;
+ struct nfsrv_lughash *hp;
+
+ cnt = 0;
+ uid = oldcred->cr_uid;
+tryagain:
+ if (nfsrv_dnsnamelen > 0) {
+ hp = NFSUSERHASH(uid);
+ mtx_lock(&hp->mtx);
+ TAILQ_FOREACH(usrp, &hp->lughead, lug_numhash) {
+ if (usrp->lug_uid == uid) {
+ if (usrp->lug_expiry < NFSD_MONOSEC)
+ break;
+ if (usrp->lug_cred != NULL) {
+ newcred = crhold(usrp->lug_cred);
+ crfree(oldcred);
+ } else
+ newcred = oldcred;
+ TAILQ_REMOVE(&hp->lughead, usrp, lug_numhash);
+ TAILQ_INSERT_TAIL(&hp->lughead, usrp,
+ lug_numhash);
+ mtx_unlock(&hp->mtx);
+ return (newcred);
+ }
+ }
+ mtx_unlock(&hp->mtx);
+ cnt++;
+ ret = nfsrv_getuser(RPCNFSUSERD_GETUID, uid, (gid_t)0,
+ NULL, curthread);
+ if (ret == 0 && cnt < 2)
+ goto tryagain;
+ }
+ return (oldcred);
+}
+
+/*
* Convert a string to a uid.
* If no conversion is possible return NFSERR_BADOWNER, otherwise
* return 0.
@@ -2657,6 +2708,7 @@ nfsv4_strtouid(struct nfsrv_descript *nd, u_char *str, int len, uid_t *uidp,
int cnt, ret;
int error = 0;
uid_t tuid;
+ struct nfsrv_lughash *hp, *hp2;
if (len == 0) {
error = NFSERR_BADOWNER;
@@ -2686,49 +2738,55 @@ nfsv4_strtouid(struct nfsrv_descript *nd, u_char *str, int len, uid_t *uidp,
cnt = 0;
tryagain:
- NFSLOCKNAMEID();
- /*
- * If an '@' is found and the domain name matches, search for the name
- * with dns stripped off.
- * Mixed case alpahbetics will match for the domain name, but all
- * upper case will not.
- */
- if (cnt == 0 && i < len && i > 0 && nfsrv_dnsname &&
- (len - 1 - i) == nfsrv_dnsnamelen &&
- !nfsrv_cmpmixedcase(cp, nfsrv_dnsname, nfsrv_dnsnamelen)) {
- len -= (nfsrv_dnsnamelen + 1);
- *(cp - 1) = '\0';
- }
-
- /*
- * Check for the special case of "nobody".
- */
- if (len == 6 && !NFSBCMP(str, "nobody", 6)) {
- *uidp = nfsrv_defaultuid;
- NFSUNLOCKNAMEID();
- error = 0;
- goto out;
- }
-
- LIST_FOREACH(usrp, NFSUSERNAMEHASH(str, len), lug_namehash) {
- if (usrp->lug_namelen == len &&
- !NFSBCMP(usrp->lug_name, str, len)) {
- if (usrp->lug_expiry < NFSD_MONOSEC)
- break;
- *uidp = usrp->lug_uid;
- TAILQ_REMOVE(&nfsuserlruhead, usrp, lug_lru);
- TAILQ_INSERT_TAIL(&nfsuserlruhead, usrp, lug_lru);
- NFSUNLOCKNAMEID();
+ if (nfsrv_dnsnamelen > 0) {
+ /*
+ * If an '@' is found and the domain name matches, search for
+ * the name with dns stripped off.
+ * Mixed case alpahbetics will match for the domain name, but
+ * all upper case will not.
+ */
+ if (cnt == 0 && i < len && i > 0 &&
+ (len - 1 - i) == nfsrv_dnsnamelen &&
+ !nfsrv_cmpmixedcase(cp, nfsrv_dnsname, nfsrv_dnsnamelen)) {
+ len -= (nfsrv_dnsnamelen + 1);
+ *(cp - 1) = '\0';
+ }
+
+ /*
+ * Check for the special case of "nobody".
+ */
+ if (len == 6 && !NFSBCMP(str, "nobody", 6)) {
+ *uidp = nfsrv_defaultuid;
error = 0;
goto out;
}
+
+ hp = NFSUSERNAMEHASH(str, len);
+ mtx_lock(&hp->mtx);
+ TAILQ_FOREACH(usrp, &hp->lughead, lug_namehash) {
+ if (usrp->lug_namelen == len &&
+ !NFSBCMP(usrp->lug_name, str, len)) {
+ if (usrp->lug_expiry < NFSD_MONOSEC)
+ break;
+ hp2 = NFSUSERHASH(usrp->lug_uid);
+ mtx_lock(&hp2->mtx);
+ TAILQ_REMOVE(&hp2->lughead, usrp, lug_numhash);
+ TAILQ_INSERT_TAIL(&hp2->lughead, usrp,
+ lug_numhash);
+ *uidp = usrp->lug_uid;
+ mtx_unlock(&hp2->mtx);
+ mtx_unlock(&hp->mtx);
+ error = 0;
+ goto out;
+ }
+ }
+ mtx_unlock(&hp->mtx);
+ cnt++;
+ ret = nfsrv_getuser(RPCNFSUSERD_GETUSER, (uid_t)0, (gid_t)0,
+ str, p);
+ if (ret == 0 && cnt < 2)
+ goto tryagain;
}
- NFSUNLOCKNAMEID();
- cnt++;
- ret = nfsrv_getuser(RPCNFSUSERD_GETUSER, (uid_t)0, (gid_t)0,
- str, p);
- if (ret == 0 && cnt < 2)
- goto tryagain;
error = NFSERR_BADOWNER;
out:
@@ -2751,18 +2809,17 @@ nfsv4_gidtostr(gid_t gid, u_char **cpp, int *retlenp, NFSPROC_T *p)
u_char *cp = *cpp;
gid_t tmp;
int cnt, hasampersand, len = NFSV4_SMALLSTR, ret;
+ struct nfsrv_lughash *hp;
cnt = 0;
tryagain:
- NFSLOCKNAMEID();
- if (nfsrv_dnsname) {
+ if (nfsrv_dnsnamelen > 0) {
/*
* Always map nfsrv_defaultgid to "nogroup".
*/
if (gid == nfsrv_defaultgid) {
i = nfsrv_dnsnamelen + 8;
if (i > len) {
- NFSUNLOCKNAMEID();
if (len > NFSV4_SMALLSTR)
free(cp, M_NFSSTRING);
cp = malloc(i, M_NFSSTRING, M_WAITOK);
@@ -2774,11 +2831,12 @@ tryagain:
NFSBCOPY("nogroup@", cp, 8);
cp += 8;
NFSBCOPY(nfsrv_dnsname, cp, nfsrv_dnsnamelen);
- NFSUNLOCKNAMEID();
return;
}
hasampersand = 0;
- LIST_FOREACH(usrp, NFSGROUPHASH(gid), lug_numhash) {
+ hp = NFSGROUPHASH(gid);
+ mtx_lock(&hp->mtx);
+ TAILQ_FOREACH(usrp, &hp->lughead, lug_numhash) {
if (usrp->lug_gid == gid) {
if (usrp->lug_expiry < NFSD_MONOSEC)
break;
@@ -2798,7 +2856,7 @@ tryagain:
i = usrp->lug_namelen +
nfsrv_dnsnamelen + 1;
if (i > len) {
- NFSUNLOCKNAMEID();
+ mtx_unlock(&hp->mtx);
if (len > NFSV4_SMALLSTR)
free(cp, M_NFSSTRING);
cp = malloc(i, M_NFSSTRING, M_WAITOK);
@@ -2813,20 +2871,19 @@ tryagain:
*cp++ = '@';
NFSBCOPY(nfsrv_dnsname, cp, nfsrv_dnsnamelen);
}
- TAILQ_REMOVE(&nfsuserlruhead, usrp, lug_lru);
- TAILQ_INSERT_TAIL(&nfsuserlruhead, usrp, lug_lru);
- NFSUNLOCKNAMEID();
+ TAILQ_REMOVE(&hp->lughead, usrp, lug_numhash);
+ TAILQ_INSERT_TAIL(&hp->lughead, usrp,
+ lug_numhash);
+ mtx_unlock(&hp->mtx);
return;
}
}
- NFSUNLOCKNAMEID();
+ mtx_unlock(&hp->mtx);
cnt++;
ret = nfsrv_getuser(RPCNFSUSERD_GETGID, (uid_t)0, gid,
NULL, p);
if (ret == 0 && cnt < 2)
goto tryagain;
- } else {
- NFSUNLOCKNAMEID();
}
/*
@@ -2867,6 +2924,7 @@ nfsv4_strtogid(struct nfsrv_descript *nd, u_char *str, int len, gid_t *gidp,
int cnt, ret;
int error = 0;
gid_t tgid;
+ struct nfsrv_lughash *hp, *hp2;
if (len == 0) {
error = NFSERR_BADOWNER;
@@ -2896,47 +2954,53 @@ nfsv4_strtogid(struct nfsrv_descript *nd, u_char *str, int len, gid_t *gidp,
cnt = 0;
tryagain:
- NFSLOCKNAMEID();
- /*
- * If an '@' is found and the dns name matches, search for the name
- * with the dns stripped off.
- */
- if (cnt == 0 && i < len && i > 0 && nfsrv_dnsname &&
- (len - 1 - i) == nfsrv_dnsnamelen &&
- !nfsrv_cmpmixedcase(cp, nfsrv_dnsname, nfsrv_dnsnamelen)) {
- len -= (nfsrv_dnsnamelen + 1);
- *(cp - 1) = '\0';
- }
-
- /*
- * Check for the special case of "nogroup".
- */
- if (len == 7 && !NFSBCMP(str, "nogroup", 7)) {
- *gidp = nfsrv_defaultgid;
- NFSUNLOCKNAMEID();
- error = 0;
- goto out;
- }
-
- LIST_FOREACH(usrp, NFSGROUPNAMEHASH(str, len), lug_namehash) {
- if (usrp->lug_namelen == len &&
- !NFSBCMP(usrp->lug_name, str, len)) {
- if (usrp->lug_expiry < NFSD_MONOSEC)
- break;
- *gidp = usrp->lug_gid;
- TAILQ_REMOVE(&nfsuserlruhead, usrp, lug_lru);
- TAILQ_INSERT_TAIL(&nfsuserlruhead, usrp, lug_lru);
- NFSUNLOCKNAMEID();
+ if (nfsrv_dnsnamelen > 0) {
+ /*
+ * If an '@' is found and the dns name matches, search for the
+ * name with the dns stripped off.
+ */
+ if (cnt == 0 && i < len && i > 0 &&
+ (len - 1 - i) == nfsrv_dnsnamelen &&
+ !nfsrv_cmpmixedcase(cp, nfsrv_dnsname, nfsrv_dnsnamelen)) {
+ len -= (nfsrv_dnsnamelen + 1);
+ *(cp - 1) = '\0';
+ }
+
+ /*
+ * Check for the special case of "nogroup".
+ */
+ if (len == 7 && !NFSBCMP(str, "nogroup", 7)) {
+ *gidp = nfsrv_defaultgid;
error = 0;
goto out;
}
+
+ hp = NFSGROUPNAMEHASH(str, len);
+ mtx_lock(&hp->mtx);
+ TAILQ_FOREACH(usrp, &hp->lughead, lug_namehash) {
+ if (usrp->lug_namelen == len &&
+ !NFSBCMP(usrp->lug_name, str, len)) {
+ if (usrp->lug_expiry < NFSD_MONOSEC)
+ break;
+ hp2 = NFSGROUPHASH(usrp->lug_gid);
+ mtx_lock(&hp2->mtx);
+ TAILQ_REMOVE(&hp2->lughead, usrp, lug_numhash);
+ TAILQ_INSERT_TAIL(&hp2->lughead, usrp,
+ lug_numhash);
+ *gidp = usrp->lug_gid;
+ mtx_unlock(&hp2->mtx);
+ mtx_unlock(&hp->mtx);
+ error = 0;
+ goto out;
+ }
+ }
+ mtx_unlock(&hp->mtx);
+ cnt++;
+ ret = nfsrv_getuser(RPCNFSUSERD_GETGROUP, (uid_t)0, (gid_t)0,
+ str, p);
+ if (ret == 0 && cnt < 2)
+ goto tryagain;
}
- NFSUNLOCKNAMEID();
- cnt++;
- ret = nfsrv_getuser(RPCNFSUSERD_GETGROUP, (uid_t)0, (gid_t)0,
- str, p);
- if (ret == 0 && cnt < 2)
- goto tryagain;
error = NFSERR_BADOWNER;
out:
@@ -3094,111 +3158,218 @@ APPLESTATIC int
nfssvc_idname(struct nfsd_idargs *nidp)
{
struct nfsusrgrp *nusrp, *usrp, *newusrp;
- struct nfsuserhashhead *hp;
- int i;
+ struct nfsrv_lughash *hp_name, *hp_idnum, *thp;
+ int i, group_locked, groupname_locked, user_locked, username_locked;
int error = 0;
u_char *cp;
+ gid_t *grps;
+ struct ucred *cr;
+ static int onethread = 0;
+ static time_t lasttime = 0;
if (nidp->nid_flag & NFSID_INITIALIZE) {
- cp = (u_char *)malloc(nidp->nid_namelen + 1,
- M_NFSSTRING, M_WAITOK);
- error = copyin(CAST_USER_ADDR_T(nidp->nid_name), cp,
- nidp->nid_namelen);
- NFSLOCKNAMEID();
- if (nfsrv_dnsname) {
+ cp = malloc(nidp->nid_namelen + 1, M_NFSSTRING, M_WAITOK);
+ error = copyin(CAST_USER_ADDR_T(nidp->nid_name), cp,
+ nidp->nid_namelen);
+ if (error != 0) {
+ free(cp, M_NFSSTRING);
+ goto out;
+ }
+ if (atomic_cmpset_acq_int(&nfsrv_dnsnamelen, 0, 0) == 0) {
+ /*
+ * Free up all the old stuff and reinitialize hash
+ * lists. All mutexes for both lists must be locked,
+ * with the user/group name ones before the uid/gid
+ * ones, to avoid a LOR.
+ */
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_lock(&nfsusernamehash[i].mtx);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_lock(&nfsuserhash[i].mtx);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ TAILQ_FOREACH_SAFE(usrp,
+ &nfsuserhash[i].lughead, lug_numhash, nusrp)
+ nfsrv_removeuser(usrp, 1);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_unlock(&nfsuserhash[i].mtx);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_unlock(&nfsusernamehash[i].mtx);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_lock(&nfsgroupnamehash[i].mtx);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_lock(&nfsgrouphash[i].mtx);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ TAILQ_FOREACH_SAFE(usrp,
+ &nfsgrouphash[i].lughead, lug_numhash,
+ nusrp)
+ nfsrv_removeuser(usrp, 0);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_unlock(&nfsgrouphash[i].mtx);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_unlock(&nfsgroupnamehash[i].mtx);
+ free(nfsrv_dnsname, M_NFSSTRING);
+ nfsrv_dnsname = NULL;
+ }
+ if (nfsuserhash == NULL) {
+ /* Allocate the hash tables. */
+ nfsuserhash = malloc(sizeof(struct nfsrv_lughash) *
+ nfsrv_lughashsize, M_NFSUSERGROUP, M_WAITOK |
+ M_ZERO);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_init(&nfsuserhash[i].mtx, "nfsuidhash",
+ NULL, MTX_DEF | MTX_DUPOK);
+ nfsusernamehash = malloc(sizeof(struct nfsrv_lughash) *
+ nfsrv_lughashsize, M_NFSUSERGROUP, M_WAITOK |
+ M_ZERO);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_init(&nfsusernamehash[i].mtx,
+ "nfsusrhash", NULL, MTX_DEF |
+ MTX_DUPOK);
+ nfsgrouphash = malloc(sizeof(struct nfsrv_lughash) *
+ nfsrv_lughashsize, M_NFSUSERGROUP, M_WAITOK |
+ M_ZERO);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_init(&nfsgrouphash[i].mtx, "nfsgidhash",
+ NULL, MTX_DEF | MTX_DUPOK);
+ nfsgroupnamehash = malloc(sizeof(struct nfsrv_lughash) *
+ nfsrv_lughashsize, M_NFSUSERGROUP, M_WAITOK |
+ M_ZERO);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_init(&nfsgroupnamehash[i].mtx,
+ "nfsgrphash", NULL, MTX_DEF | MTX_DUPOK);
+ }
+ /* (Re)initialize the list heads. */
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ TAILQ_INIT(&nfsuserhash[i].lughead);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ TAILQ_INIT(&nfsusernamehash[i].lughead);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ TAILQ_INIT(&nfsgrouphash[i].lughead);
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ TAILQ_INIT(&nfsgroupnamehash[i].lughead);
+
/*
- * Free up all the old stuff and reinitialize hash lists.
+ * Put name in "DNS" string.
*/
- TAILQ_FOREACH_SAFE(usrp, &nfsuserlruhead, lug_lru, nusrp) {
- nfsrv_removeuser(usrp);
- }
- free(nfsrv_dnsname, M_NFSSTRING);
- nfsrv_dnsname = NULL;
- }
- TAILQ_INIT(&nfsuserlruhead);
- for (i = 0; i < NFSUSERHASHSIZE; i++)
- LIST_INIT(&nfsuserhash[i]);
- for (i = 0; i < NFSGROUPHASHSIZE; i++)
- LIST_INIT(&nfsgrouphash[i]);
- for (i = 0; i < NFSUSERHASHSIZE; i++)
- LIST_INIT(&nfsusernamehash[i]);
- for (i = 0; i < NFSGROUPHASHSIZE; i++)
- LIST_INIT(&nfsgroupnamehash[i]);
-
- /*
- * Put name in "DNS" string.
- */
- if (!error) {
nfsrv_dnsname = cp;
- nfsrv_dnsnamelen = nidp->nid_namelen;
nfsrv_defaultuid = nidp->nid_uid;
nfsrv_defaultgid = nidp->nid_gid;
nfsrv_usercnt = 0;
nfsrv_usermax = nidp->nid_usermax;
- }
- NFSUNLOCKNAMEID();
- if (error)
- free(cp, M_NFSSTRING);
- goto out;
+ atomic_store_rel_int(&nfsrv_dnsnamelen, nidp->nid_namelen);
+ goto out;
}
/*
* malloc the new one now, so any potential sleep occurs before
* manipulation of the lists.
*/
- MALLOC(newusrp, struct nfsusrgrp *, sizeof (struct nfsusrgrp) +
- nidp->nid_namelen, M_NFSUSERGROUP, M_WAITOK);
+ newusrp = malloc(sizeof(struct nfsusrgrp) + nidp->nid_namelen,
+ M_NFSUSERGROUP, M_WAITOK | M_ZERO);
error = copyin(CAST_USER_ADDR_T(nidp->nid_name), newusrp->lug_name,
nidp->nid_namelen);
+ if (error == 0 && nidp->nid_ngroup > 0 &&
+ (nidp->nid_flag & NFSID_ADDUID) != 0) {
+ grps = malloc(sizeof(gid_t) * nidp->nid_ngroup, M_TEMP,
+ M_WAITOK);
+ error = copyin(CAST_USER_ADDR_T(nidp->nid_grps), grps,
+ sizeof(gid_t) * nidp->nid_ngroup);
+ if (error == 0) {
+ /*
+ * Create a credential just like svc_getcred(),
+ * but using the group list provided.
+ */
+ cr = crget();
+ cr->cr_uid = cr->cr_ruid = cr->cr_svuid = nidp->nid_uid;
+ crsetgroups(cr, nidp->nid_ngroup, grps);
+ cr->cr_rgid = cr->cr_svgid = cr->cr_groups[0];
+ cr->cr_prison = &prison0;
+ prison_hold(cr->cr_prison);
+#ifdef MAC
+ mac_cred_associate_nfsd(cr);
+#endif
+ newusrp->lug_cred = cr;
+ }
+ free(grps, M_TEMP);
+ }
if (error) {
- free((caddr_t)newusrp, M_NFSUSERGROUP);
+ free(newusrp, M_NFSUSERGROUP);
goto out;
}
newusrp->lug_namelen = nidp->nid_namelen;
- NFSLOCKNAMEID();
+ /*
+ * The lock order is username[0]->[nfsrv_lughashsize - 1] followed
+ * by uid[0]->[nfsrv_lughashsize - 1], with the same for group.
+ * The flags user_locked, username_locked, group_locked and
+ * groupname_locked are set to indicate all of those hash lists are
+ * locked. hp_name != NULL and hp_idnum != NULL indicates that
+ * the respective one mutex is locked.
+ */
+ user_locked = username_locked = group_locked = groupname_locked = 0;
+ hp_name = hp_idnum = NULL;
+
/*
* Delete old entries, as required.
*/
if (nidp->nid_flag & (NFSID_DELUID | NFSID_ADDUID)) {
- hp = NFSUSERHASH(nidp->nid_uid);
- LIST_FOREACH_SAFE(usrp, hp, lug_numhash, nusrp) {
+ /* Must lock all username hash lists first, to avoid a LOR. */
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_lock(&nfsusernamehash[i].mtx);
+ username_locked = 1;
+ hp_idnum = NFSUSERHASH(nidp->nid_uid);
+ mtx_lock(&hp_idnum->mtx);
+ TAILQ_FOREACH_SAFE(usrp, &hp_idnum->lughead, lug_numhash,
+ nusrp) {
if (usrp->lug_uid == nidp->nid_uid)
- nfsrv_removeuser(usrp);
+ nfsrv_removeuser(usrp, 1);
}
- }
- if (nidp->nid_flag & (NFSID_DELUSERNAME | NFSID_ADDUSERNAME)) {
- hp = NFSUSERNAMEHASH(newusrp->lug_name, newusrp->lug_namelen);
- LIST_FOREACH_SAFE(usrp, hp, lug_namehash, nusrp) {
+ } else if (nidp->nid_flag & (NFSID_DELUSERNAME | NFSID_ADDUSERNAME)) {
+ hp_name = NFSUSERNAMEHASH(newusrp->lug_name,
+ newusrp->lug_namelen);
+ mtx_lock(&hp_name->mtx);
+ TAILQ_FOREACH_SAFE(usrp, &hp_name->lughead, lug_namehash,
+ nusrp) {
if (usrp->lug_namelen == newusrp->lug_namelen &&
!NFSBCMP(usrp->lug_name, newusrp->lug_name,
- usrp->lug_namelen))
- nfsrv_removeuser(usrp);
+ usrp->lug_namelen)) {
+ thp = NFSUSERHASH(usrp->lug_uid);
+ mtx_lock(&thp->mtx);
+ nfsrv_removeuser(usrp, 1);
+ mtx_unlock(&thp->mtx);
+ }
}
- }
- if (nidp->nid_flag & (NFSID_DELGID | NFSID_ADDGID)) {
- hp = NFSGROUPHASH(nidp->nid_gid);
- LIST_FOREACH_SAFE(usrp, hp, lug_numhash, nusrp) {
+ hp_idnum = NFSUSERHASH(nidp->nid_uid);
+ mtx_lock(&hp_idnum->mtx);
+ } else if (nidp->nid_flag & (NFSID_DELGID | NFSID_ADDGID)) {
+ /* Must lock all groupname hash lists first, to avoid a LOR. */
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_lock(&nfsgroupnamehash[i].mtx);
+ groupname_locked = 1;
+ hp_idnum = NFSGROUPHASH(nidp->nid_gid);
+ mtx_lock(&hp_idnum->mtx);
+ TAILQ_FOREACH_SAFE(usrp, &hp_idnum->lughead, lug_numhash,
+ nusrp) {
if (usrp->lug_gid == nidp->nid_gid)
- nfsrv_removeuser(usrp);
+ nfsrv_removeuser(usrp, 0);
}
- }
- if (nidp->nid_flag & (NFSID_DELGROUPNAME | NFSID_ADDGROUPNAME)) {
- hp = NFSGROUPNAMEHASH(newusrp->lug_name, newusrp->lug_namelen);
- LIST_FOREACH_SAFE(usrp, hp, lug_namehash, nusrp) {
+ } else if (nidp->nid_flag & (NFSID_DELGROUPNAME | NFSID_ADDGROUPNAME)) {
+ hp_name = NFSGROUPNAMEHASH(newusrp->lug_name,
+ newusrp->lug_namelen);
+ mtx_lock(&hp_name->mtx);
+ TAILQ_FOREACH_SAFE(usrp, &hp_name->lughead, lug_namehash,
+ nusrp) {
if (usrp->lug_namelen == newusrp->lug_namelen &&
!NFSBCMP(usrp->lug_name, newusrp->lug_name,
- usrp->lug_namelen))
- nfsrv_removeuser(usrp);
+ usrp->lug_namelen)) {
+ thp = NFSGROUPHASH(usrp->lug_gid);
+ mtx_lock(&thp->mtx);
+ nfsrv_removeuser(usrp, 0);
+ mtx_unlock(&thp->mtx);
+ }
}
- }
- TAILQ_FOREACH_SAFE(usrp, &nfsuserlruhead, lug_lru, nusrp) {
- if (usrp->lug_expiry < NFSD_MONOSEC)
- nfsrv_removeuser(usrp);
- }
- while (nfsrv_usercnt >= nfsrv_usermax) {
- usrp = TAILQ_FIRST(&nfsuserlruhead);
- nfsrv_removeuser(usrp);
+ hp_idnum = NFSGROUPHASH(nidp->nid_gid);
+ mtx_lock(&hp_idnum->mtx);
}
/*
@@ -3210,23 +3381,129 @@ nfssvc_idname(struct nfsd_idargs *nidp)
newusrp->lug_expiry = NFSD_MONOSEC + 5;
if (nidp->nid_flag & (NFSID_ADDUID | NFSID_ADDUSERNAME)) {
newusrp->lug_uid = nidp->nid_uid;
- LIST_INSERT_HEAD(NFSUSERHASH(newusrp->lug_uid), newusrp,
- lug_numhash);
- LIST_INSERT_HEAD(NFSUSERNAMEHASH(newusrp->lug_name,
- newusrp->lug_namelen), newusrp, lug_namehash);
- TAILQ_INSERT_TAIL(&nfsuserlruhead, newusrp, lug_lru);
- nfsrv_usercnt++;
+ thp = NFSUSERHASH(newusrp->lug_uid);
+ mtx_assert(&thp->mtx, MA_OWNED);
+ TAILQ_INSERT_TAIL(&thp->lughead, newusrp, lug_numhash);
+ thp = NFSUSERNAMEHASH(newusrp->lug_name, newusrp->lug_namelen);
+ mtx_assert(&thp->mtx, MA_OWNED);
+ TAILQ_INSERT_TAIL(&thp->lughead, newusrp, lug_namehash);
+ atomic_add_int(&nfsrv_usercnt, 1);
} else if (nidp->nid_flag & (NFSID_ADDGID | NFSID_ADDGROUPNAME)) {
newusrp->lug_gid = nidp->nid_gid;
- LIST_INSERT_HEAD(NFSGROUPHASH(newusrp->lug_gid), newusrp,
- lug_numhash);
- LIST_INSERT_HEAD(NFSGROUPNAMEHASH(newusrp->lug_name,
- newusrp->lug_namelen), newusrp, lug_namehash);
- TAILQ_INSERT_TAIL(&nfsuserlruhead, newusrp, lug_lru);
- nfsrv_usercnt++;
- } else
- FREE((caddr_t)newusrp, M_NFSUSERGROUP);
- NFSUNLOCKNAMEID();
+ thp = NFSGROUPHASH(newusrp->lug_gid);
+ mtx_assert(&thp->mtx, MA_OWNED);
+ TAILQ_INSERT_TAIL(&thp->lughead, newusrp, lug_numhash);
+ thp = NFSGROUPNAMEHASH(newusrp->lug_name, newusrp->lug_namelen);
+ mtx_assert(&thp->mtx, MA_OWNED);
+ TAILQ_INSERT_TAIL(&thp->lughead, newusrp, lug_namehash);
+ atomic_add_int(&nfsrv_usercnt, 1);
+ } else {
+ if (newusrp->lug_cred != NULL)
+ crfree(newusrp->lug_cred);
+ free(newusrp, M_NFSUSERGROUP);
+ }
+
+ /*
+ * Once per second, allow one thread to trim the cache.
+ */
+ if (lasttime < NFSD_MONOSEC &&
+ atomic_cmpset_acq_int(&onethread, 0, 1) != 0) {
+ /*
+ * First, unlock the single mutexes, so that all entries
+ * can be locked and any LOR is avoided.
+ */
+ if (hp_name != NULL) {
+ mtx_unlock(&hp_name->mtx);
+ hp_name = NULL;
+ }
+ if (hp_idnum != NULL) {
+ mtx_unlock(&hp_idnum->mtx);
+ hp_idnum = NULL;
+ }
+
+ if ((nidp->nid_flag & (NFSID_DELUID | NFSID_ADDUID |
+ NFSID_DELUSERNAME | NFSID_ADDUSERNAME)) != 0) {
+ if (username_locked == 0) {
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_lock(&nfsusernamehash[i].mtx);
+ username_locked = 1;
+ }
+ KASSERT(user_locked == 0,
+ ("nfssvc_idname: user_locked"));
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_lock(&nfsuserhash[i].mtx);
+ user_locked = 1;
+ for (i = 0; i < nfsrv_lughashsize; i++) {
+ TAILQ_FOREACH_SAFE(usrp,
+ &nfsuserhash[i].lughead, lug_numhash,
+ nusrp)
+ if (usrp->lug_expiry < NFSD_MONOSEC)
+ nfsrv_removeuser(usrp, 1);
+ }
+ for (i = 0; i < nfsrv_lughashsize; i++) {
+ /*
+ * Trim the cache using an approximate LRU
+ * algorithm. This code deletes the least
+ * recently used entry on each hash list.
+ */
+ if (nfsrv_usercnt <= nfsrv_usermax)
+ break;
+ usrp = TAILQ_FIRST(&nfsuserhash[i].lughead);
+ if (usrp != NULL)
+ nfsrv_removeuser(usrp, 1);
+ }
+ } else {
+ if (groupname_locked == 0) {
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_lock(&nfsgroupnamehash[i].mtx);
+ groupname_locked = 1;
+ }
+ KASSERT(group_locked == 0,
+ ("nfssvc_idname: group_locked"));
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_lock(&nfsgrouphash[i].mtx);
+ group_locked = 1;
+ for (i = 0; i < nfsrv_lughashsize; i++) {
+ TAILQ_FOREACH_SAFE(usrp,
+ &nfsgrouphash[i].lughead, lug_numhash,
+ nusrp)
+ if (usrp->lug_expiry < NFSD_MONOSEC)
+ nfsrv_removeuser(usrp, 0);
+ }
+ for (i = 0; i < nfsrv_lughashsize; i++) {
+ /*
+ * Trim the cache using an approximate LRU
+ * algorithm. This code deletes the least
+ * recently user entry on each hash list.
+ */
+ if (nfsrv_usercnt <= nfsrv_usermax)
+ break;
+ usrp = TAILQ_FIRST(&nfsgrouphash[i].lughead);
+ if (usrp != NULL)
+ nfsrv_removeuser(usrp, 0);
+ }
+ }
+ lasttime = NFSD_MONOSEC;
+ atomic_store_rel_int(&onethread, 0);
+ }
+
+ /* Now, unlock all locked mutexes. */
+ if (hp_idnum != NULL)
+ mtx_unlock(&hp_idnum->mtx);
+ if (hp_name != NULL)
+ mtx_unlock(&hp_name->mtx);
+ if (user_locked != 0)
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_unlock(&nfsuserhash[i].mtx);
+ if (username_locked != 0)
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_unlock(&nfsusernamehash[i].mtx);
+ if (group_locked != 0)
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_unlock(&nfsgrouphash[i].mtx);
+ if (groupname_locked != 0)
+ for (i = 0; i < nfsrv_lughashsize; i++)
+ mtx_unlock(&nfsgroupnamehash[i].mtx);
out:
NFSEXITCODE(error);
return (error);
@@ -3236,15 +3513,78 @@ out:
* Remove a user/group name element.
*/
static void
-nfsrv_removeuser(struct nfsusrgrp *usrp)
+nfsrv_removeuser(struct nfsusrgrp *usrp, int isuser)
{
+ struct nfsrv_lughash *hp;
+
+ if (isuser != 0) {
+ hp = NFSUSERHASH(usrp->lug_uid);
+ mtx_assert(&hp->mtx, MA_OWNED);
+ TAILQ_REMOVE(&hp->lughead, usrp, lug_numhash);
+ hp = NFSUSERNAMEHASH(usrp->lug_name, usrp->lug_namelen);
+ mtx_assert(&hp->mtx, MA_OWNED);
+ TAILQ_REMOVE(&hp->lughead, usrp, lug_namehash);
+ } else {
+ hp = NFSGROUPHASH(usrp->lug_gid);
+ mtx_assert(&hp->mtx, MA_OWNED);
+ TAILQ_REMOVE(&hp->lughead, usrp, lug_numhash);
+ hp = NFSGROUPNAMEHASH(usrp->lug_name, usrp->lug_namelen);
+ mtx_assert(&hp->mtx, MA_OWNED);
+ TAILQ_REMOVE(&hp->lughead, usrp, lug_namehash);
+ }
+ atomic_add_int(&nfsrv_usercnt, -1);
+ if (usrp->lug_cred != NULL)
+ crfree(usrp->lug_cred);
+ free(usrp, M_NFSUSERGROUP);
+}
- NFSNAMEIDREQUIRED();
- LIST_REMOVE(usrp, lug_numhash);
- LIST_REMOVE(usrp, lug_namehash);
- TAILQ_REMOVE(&nfsuserlruhead, usrp, lug_lru);
- nfsrv_usercnt--;
- FREE((caddr_t)usrp, M_NFSUSERGROUP);
+/*
+ * Free up all the allocations related to the name<-->id cache.
+ * This function should only be called when the nfsuserd daemon isn't
+ * running, since it doesn't do any locking.
+ * This function is meant to be used when the nfscommon module is unloaded.
+ */
+APPLESTATIC void
+nfsrv_cleanusergroup(void)
+{
+ struct nfsrv_lughash *hp, *hp2;
+ struct nfsusrgrp *nusrp, *usrp;
+ int i;
+
+ if (nfsuserhash == NULL)
+ return;
+
+ for (i = 0; i < nfsrv_lughashsize; i++) {
+ hp = &nfsuserhash[i];
+ TAILQ_FOREACH_SAFE(usrp, &hp->lughead, lug_numhash, nusrp) {
+ TAILQ_REMOVE(&hp->lughead, usrp, lug_numhash);
+ hp2 = NFSUSERNAMEHASH(usrp->lug_name,
+ usrp->lug_namelen);
+ TAILQ_REMOVE(&hp2->lughead, usrp, lug_namehash);
+ if (usrp->lug_cred != NULL)
+ crfree(usrp->lug_cred);
+ free(usrp, M_NFSUSERGROUP);
+ }
+ hp = &nfsgrouphash[i];
+ TAILQ_FOREACH_SAFE(usrp, &hp->lughead, lug_numhash, nusrp) {
+ TAILQ_REMOVE(&hp->lughead, usrp, lug_numhash);
+ hp2 = NFSGROUPNAMEHASH(usrp->lug_name,
+ usrp->lug_namelen);
+ TAILQ_REMOVE(&hp2->lughead, usrp, lug_namehash);
+ if (usrp->lug_cred != NULL)
+ crfree(usrp->lug_cred);
+ free(usrp, M_NFSUSERGROUP);
+ }
+ mtx_destroy(&nfsuserhash[i].mtx);
+ mtx_destroy(&nfsusernamehash[i].mtx);
+ mtx_destroy(&nfsgroupnamehash[i].mtx);
+ mtx_destroy(&nfsgrouphash[i].mtx);
+ }
+ free(nfsuserhash, M_NFSUSERGROUP);
+ free(nfsusernamehash, M_NFSUSERGROUP);
+ free(nfsgrouphash, M_NFSUSERGROUP);
+ free(nfsgroupnamehash, M_NFSUSERGROUP);
+ free(nfsrv_dnsname, M_NFSSTRING);
}
/*
diff --git a/sys/fs/nfs/nfs_var.h b/sys/fs/nfs/nfs_var.h
index d540dc9..5739f6b 100644
--- a/sys/fs/nfs/nfs_var.h
+++ b/sys/fs/nfs/nfs_var.h
@@ -283,6 +283,7 @@ void nfsv4_getref(struct nfsv4lock *, int *, void *, struct mount *);
int nfsv4_getref_nonblock(struct nfsv4lock *);
int nfsv4_testlock(struct nfsv4lock *);
int nfsrv_mtostr(struct nfsrv_descript *, char *, int);
+void nfsrv_cleanusergroup(void);
int nfsrv_checkutf8(u_int8_t *, int);
int newnfs_sndlock(int *);
void newnfs_sndunlock(int *);
@@ -296,6 +297,7 @@ void nfsv4_setsequence(struct nfsmount *, struct nfsrv_descript *,
int nfsv4_sequencelookup(struct nfsmount *, struct nfsclsession *, int *,
int *, uint32_t *, uint8_t *);
void nfsv4_freeslot(struct nfsclsession *, int);
+struct ucred *nfsrv_getgrpscred(struct ucred *);
/* nfs_clcomsubs.c */
void nfsm_uiombuf(struct nfsrv_descript *, struct uio *, int);
diff --git a/sys/fs/nfs/nfsrvstate.h b/sys/fs/nfs/nfsrvstate.h
index 972fffe..6d32244 100644
--- a/sys/fs/nfs/nfsrvstate.h
+++ b/sys/fs/nfs/nfsrvstate.h
@@ -48,23 +48,22 @@ LIST_HEAD(nfssessionhashhead, nfsdsession);
/*
* List head for nfsusrgrp.
*/
-LIST_HEAD(nfsuserhashhead, nfsusrgrp);
-TAILQ_HEAD(nfsuserlruhead, nfsusrgrp);
+TAILQ_HEAD(nfsuserhashhead, nfsusrgrp);
#define NFSCLIENTHASH(id) \
(&nfsclienthash[(id).lval[1] % nfsrv_clienthashsize])
#define NFSSTATEHASH(clp, id) \
(&((clp)->lc_stateid[(id).other[2] % nfsrv_statehashsize]))
#define NFSUSERHASH(id) \
- (&nfsuserhash[(id) % NFSUSERHASHSIZE])
+ (&nfsuserhash[(id) % nfsrv_lughashsize])
#define NFSUSERNAMEHASH(p, l) \
(&nfsusernamehash[((l)>=4?(*(p)+*((p)+1)+*((p)+2)+*((p)+3)):*(p)) \
- % NFSUSERHASHSIZE])
+ % nfsrv_lughashsize])
#define NFSGROUPHASH(id) \
- (&nfsgrouphash[(id) % NFSGROUPHASHSIZE])
+ (&nfsgrouphash[(id) % nfsrv_lughashsize])
#define NFSGROUPNAMEHASH(p, l) \
(&nfsgroupnamehash[((l)>=4?(*(p)+*((p)+1)+*((p)+2)+*((p)+3)):*(p)) \
- % NFSGROUPHASHSIZE])
+ % nfsrv_lughashsize])
struct nfssessionhash {
struct mtx mtx;
@@ -264,14 +263,14 @@ struct nfslockfile {
* names.
*/
struct nfsusrgrp {
- TAILQ_ENTRY(nfsusrgrp) lug_lru; /* LRU list */
- LIST_ENTRY(nfsusrgrp) lug_numhash; /* Hash by id# */
- LIST_ENTRY(nfsusrgrp) lug_namehash; /* and by name */
+ TAILQ_ENTRY(nfsusrgrp) lug_numhash; /* Hash by id# */
+ TAILQ_ENTRY(nfsusrgrp) lug_namehash; /* and by name */
time_t lug_expiry; /* Expiry time in sec */
union {
uid_t un_uid; /* id# */
gid_t un_gid;
} lug_un;
+ struct ucred *lug_cred; /* Cred. with groups list */
int lug_namelen; /* Name length */
u_char lug_name[1]; /* malloc'd correct length */
};
diff --git a/sys/fs/nfsserver/nfs_nfsdport.c b/sys/fs/nfsserver/nfs_nfsdport.c
index a700ee9..99fbf4d 100644
--- a/sys/fs/nfsserver/nfs_nfsdport.c
+++ b/sys/fs/nfsserver/nfs_nfsdport.c
@@ -2644,14 +2644,24 @@ nfsd_excred(struct nfsrv_descript *nd, struct nfsexstuff *exp,
* Fsinfo RPC. If set for anything else, this code might need
* to change.)
*/
- if (NFSVNO_EXPORTED(exp) &&
- ((!(nd->nd_flag & ND_GSS) && nd->nd_cred->cr_uid == 0) ||
- NFSVNO_EXPORTANON(exp) ||
- (nd->nd_flag & ND_AUTHNONE))) {
- nd->nd_cred->cr_uid = credanon->cr_uid;
- nd->nd_cred->cr_gid = credanon->cr_gid;
- crsetgroups(nd->nd_cred, credanon->cr_ngroups,
- credanon->cr_groups);
+ if (NFSVNO_EXPORTED(exp)) {
+ if (((nd->nd_flag & ND_GSS) == 0 && nd->nd_cred->cr_uid == 0) ||
+ NFSVNO_EXPORTANON(exp) ||
+ (nd->nd_flag & ND_AUTHNONE) != 0) {
+ nd->nd_cred->cr_uid = credanon->cr_uid;
+ nd->nd_cred->cr_gid = credanon->cr_gid;
+ crsetgroups(nd->nd_cred, credanon->cr_ngroups,
+ credanon->cr_groups);
+ } else if ((nd->nd_flag & ND_GSS) == 0) {
+ /*
+ * If using AUTH_SYS, call nfsrv_getgrpscred() to see
+ * if there is a replacement credential with a group
+ * list set up by "nfsuserd -manage-gids".
+ * If there is no replacement, nfsrv_getgrpscred()
+ * simply returns its argument.
+ */
+ nd->nd_cred = nfsrv_getgrpscred(nd->nd_cred);
+ }
}
out:
diff --git a/sys/geom/geom_disk.c b/sys/geom/geom_disk.c
index 9319b97..1a879f7 100644
--- a/sys/geom/geom_disk.c
+++ b/sys/geom/geom_disk.c
@@ -58,6 +58,8 @@ __FBSDID("$FreeBSD$");
#include <dev/led/led.h>
+#include <machine/bus.h>
+
struct g_disk_softc {
struct mtx done_mtx;
struct disk *dp;
@@ -273,6 +275,145 @@ g_disk_ioctl(struct g_provider *pp, u_long cmd, void * data, int fflag, struct t
return (error);
}
+static off_t
+g_disk_maxsize(struct disk *dp, struct bio *bp)
+{
+ if (bp->bio_cmd == BIO_DELETE)
+ return (dp->d_delmaxsize);
+ return (dp->d_maxsize);
+}
+
+static int
+g_disk_maxsegs(struct disk *dp, struct bio *bp)
+{
+ return ((g_disk_maxsize(dp, bp) / PAGE_SIZE) + 1);
+}
+
+static void
+g_disk_advance(struct disk *dp, struct bio *bp, off_t off)
+{
+
+ bp->bio_offset += off;
+ bp->bio_length -= off;
+
+ if ((bp->bio_flags & BIO_VLIST) != 0) {
+ bus_dma_segment_t *seg, *end;
+
+ seg = (bus_dma_segment_t *)bp->bio_data;
+ end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n;
+ off += bp->bio_ma_offset;
+ while (off >= seg->ds_len) {
+ KASSERT((seg != end),
+ ("vlist request runs off the end"));
+ off -= seg->ds_len;
+ seg++;
+ }
+ bp->bio_ma_offset = off;
+ bp->bio_ma_n = end - seg;
+ bp->bio_data = (void *)seg;
+ } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
+ bp->bio_ma += off / PAGE_SIZE;
+ bp->bio_ma_offset += off;
+ bp->bio_ma_offset %= PAGE_SIZE;
+ bp->bio_ma_n -= off / PAGE_SIZE;
+ } else {
+ bp->bio_data += off;
+ }
+}
+
+static void
+g_disk_seg_limit(bus_dma_segment_t *seg, off_t *poffset,
+ off_t *plength, int *ppages)
+{
+ uintptr_t seg_page_base;
+ uintptr_t seg_page_end;
+ off_t offset;
+ off_t length;
+ int seg_pages;
+
+ offset = *poffset;
+ length = *plength;
+
+ if (length > seg->ds_len - offset)
+ length = seg->ds_len - offset;
+
+ seg_page_base = trunc_page(seg->ds_addr + offset);
+ seg_page_end = round_page(seg->ds_addr + offset + length);
+ seg_pages = (seg_page_end - seg_page_base) >> PAGE_SHIFT;
+
+ if (seg_pages > *ppages) {
+ seg_pages = *ppages;
+ length = (seg_page_base + (seg_pages << PAGE_SHIFT)) -
+ (seg->ds_addr + offset);
+ }
+
+ *poffset = 0;
+ *plength -= length;
+ *ppages -= seg_pages;
+}
+
+static off_t
+g_disk_vlist_limit(struct disk *dp, struct bio *bp, bus_dma_segment_t **pendseg)
+{
+ bus_dma_segment_t *seg, *end;
+ off_t residual;
+ off_t offset;
+ int pages;
+
+ seg = (bus_dma_segment_t *)bp->bio_data;
+ end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n;
+ residual = bp->bio_length;
+ offset = bp->bio_ma_offset;
+ pages = g_disk_maxsegs(dp, bp);
+ while (residual != 0 && pages != 0) {
+ KASSERT((seg != end),
+ ("vlist limit runs off the end"));
+ g_disk_seg_limit(seg, &offset, &residual, &pages);
+ seg++;
+ }
+ if (pendseg != NULL)
+ *pendseg = seg;
+ return (residual);
+}
+
+static bool
+g_disk_limit(struct disk *dp, struct bio *bp)
+{
+ bool limited = false;
+ off_t maxsz;
+
+ maxsz = g_disk_maxsize(dp, bp);
+
+ /*
+ * XXX: If we have a stripesize we should really use it here.
+ * Care should be taken in the delete case if this is done
+ * as deletes can be very sensitive to size given how they
+ * are processed.
+ */
+ if (bp->bio_length > maxsz) {
+ bp->bio_length = maxsz;
+ limited = true;
+ }
+
+ if ((bp->bio_flags & BIO_VLIST) != 0) {
+ bus_dma_segment_t *firstseg, *endseg;
+ off_t residual;
+
+ firstseg = (bus_dma_segment_t*)bp->bio_data;
+ residual = g_disk_vlist_limit(dp, bp, &endseg);
+ if (residual != 0) {
+ bp->bio_ma_n = endseg - firstseg;
+ bp->bio_length -= residual;
+ limited = true;
+ }
+ } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
+ bp->bio_ma_n =
+ howmany(bp->bio_ma_offset + bp->bio_length, PAGE_SIZE);
+ }
+
+ return (limited);
+}
+
static void
g_disk_start(struct bio *bp)
{
@@ -297,6 +438,9 @@ g_disk_start(struct bio *bp)
/* fall-through */
case BIO_READ:
case BIO_WRITE:
+ KASSERT((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0 ||
+ (bp->bio_flags & BIO_UNMAPPED) == 0,
+ ("unmapped bio not supported by disk %s", dp->d_name));
off = 0;
bp3 = NULL;
bp2 = g_clone_bio(bp);
@@ -304,39 +448,10 @@ g_disk_start(struct bio *bp)
error = ENOMEM;
break;
}
- do {
- off_t d_maxsize;
-
- d_maxsize = (bp->bio_cmd == BIO_DELETE) ?
- dp->d_delmaxsize : dp->d_maxsize;
- bp2->bio_offset += off;
- bp2->bio_length -= off;
- if ((bp->bio_flags & BIO_UNMAPPED) == 0) {
- bp2->bio_data += off;
- } else {
- KASSERT((dp->d_flags & DISKFLAG_UNMAPPED_BIO)
- != 0,
- ("unmapped bio not supported by disk %s",
- dp->d_name));
- bp2->bio_ma += off / PAGE_SIZE;
- bp2->bio_ma_offset += off;
- bp2->bio_ma_offset %= PAGE_SIZE;
- bp2->bio_ma_n -= off / PAGE_SIZE;
- }
- if (bp2->bio_length > d_maxsize) {
- /*
- * XXX: If we have a stripesize we should really
- * use it here. Care should be taken in the delete
- * case if this is done as deletes can be very
- * sensitive to size given how they are processed.
- */
- bp2->bio_length = d_maxsize;
- if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
- bp2->bio_ma_n = howmany(
- bp2->bio_ma_offset +
- bp2->bio_length, PAGE_SIZE);
- }
- off += d_maxsize;
+ for (;;) {
+ if (g_disk_limit(dp, bp2)) {
+ off += bp2->bio_length;
+
/*
* To avoid a race, we need to grab the next bio
* before we schedule this one. See "notes".
@@ -355,9 +470,14 @@ g_disk_start(struct bio *bp)
g_disk_lock_giant(dp);
dp->d_strategy(bp2);
g_disk_unlock_giant(dp);
+
+ if (bp3 == NULL)
+ break;
+
bp2 = bp3;
bp3 = NULL;
- } while (bp2 != NULL);
+ g_disk_advance(dp, bp2, off);
+ }
break;
case BIO_GETATTR:
/* Give the driver a chance to override */
diff --git a/sys/geom/geom_io.c b/sys/geom/geom_io.c
index f1edc70..9dff151 100644
--- a/sys/geom/geom_io.c
+++ b/sys/geom/geom_io.c
@@ -205,11 +205,12 @@ g_clone_bio(struct bio *bp)
/*
* BIO_ORDERED flag may be used by disk drivers to enforce
* ordering restrictions, so this flag needs to be cloned.
- * BIO_UNMAPPED should be inherited, to properly indicate
- * which way the buffer is passed.
+ * BIO_UNMAPPED and BIO_VLIST should be inherited, to properly
+ * indicate which way the buffer is passed.
* Other bio flags are not suitable for cloning.
*/
- bp2->bio_flags = bp->bio_flags & (BIO_ORDERED | BIO_UNMAPPED);
+ bp2->bio_flags = bp->bio_flags &
+ (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST);
bp2->bio_length = bp->bio_length;
bp2->bio_offset = bp->bio_offset;
bp2->bio_data = bp->bio_data;
@@ -240,7 +241,7 @@ g_duplicate_bio(struct bio *bp)
struct bio *bp2;
bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
- bp2->bio_flags = bp->bio_flags & BIO_UNMAPPED;
+ bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST);
bp2->bio_parent = bp;
bp2->bio_cmd = bp->bio_cmd;
bp2->bio_length = bp->bio_length;
diff --git a/sys/i386/conf/XEN b/sys/i386/conf/XEN
index c9fdab0..7c0ee3c 100644
--- a/sys/i386/conf/XEN
+++ b/sys/i386/conf/XEN
@@ -14,6 +14,9 @@ makeoptions WITHOUT_MODULES="ctl cxgb dpt drm drm2 hptmv ida rdma"
# The following drivers don't work with PAE enabled.
makeoptions WITHOUT_MODULES+="asr ncr pst"
+# The following drivers don't build with XEN enabled.
+makeoptions WITHOUT_MODULES+="mlx5 mlx5en"
+
options SCHED_ULE # ULE scheduler
options PREEMPTION # Enable kernel thread preemption
diff --git a/sys/ia64/include/bus.h b/sys/ia64/include/bus.h
index 966a75d3..a9b09c6 100644
--- a/sys/ia64/include/bus.h
+++ b/sys/ia64/include/bus.h
@@ -123,6 +123,7 @@
#define BUS_SPACE_UNRESTRICTED (~0)
+#ifdef _KERNEL
/*
* Map and unmap a region of device bus space into CPU virtual address space.
@@ -815,6 +816,8 @@ bus_space_copy_region_8(bus_space_tag_t bst, bus_space_handle_t sbsh,
#define bus_space_copy_region_stream_4 bus_space_copy_region_4
#define bus_space_copy_region_stream_8 bus_space_copy_region_8
+#endif /* _KERNEL */
+
#include <machine/bus_dma.h>
#endif /* _MACHINE_BUS_H_ */
diff --git a/sys/kern/subr_bus_dma.c b/sys/kern/subr_bus_dma.c
index a16d8c8..ae30276 100644
--- a/sys/kern/subr_bus_dma.c
+++ b/sys/kern/subr_bus_dma.c
@@ -54,19 +54,32 @@ __FBSDID("$FreeBSD$");
#include <machine/bus.h>
/*
- * Load a list of virtual addresses.
+ * Load up data starting at offset within a region specified by a
+ * list of virtual address ranges until either length or the region
+ * are exhausted.
*/
static int
_bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs,
- int flags)
+ int flags, size_t offset, size_t length)
{
int error;
error = 0;
- for (; sglist_cnt > 0; sglist_cnt--, list++) {
- error = _bus_dmamap_load_buffer(dmat, map,
- (void *)(uintptr_t)list->ds_addr, list->ds_len, pmap,
+ for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) {
+ char *addr;
+ size_t ds_len;
+
+ KASSERT((offset < list->ds_len),
+ ("Invalid mid-segment offset"));
+ addr = (char *)(uintptr_t)list->ds_addr + offset;
+ ds_len = list->ds_len - offset;
+ offset = 0;
+ if (ds_len > length)
+ ds_len = length;
+ length -= ds_len;
+ KASSERT((ds_len != 0), ("Segment length is zero"));
+ error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap,
flags, NULL, nsegs);
if (error)
break;
@@ -118,22 +131,48 @@ _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
}
/*
+ * Load tlen data starting at offset within a region specified by a list of
+ * physical pages.
+ */
+static int
+_bus_dmamap_load_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_page_t *pages, bus_size_t tlen, int offset, int *nsegs, int flags)
+{
+ vm_paddr_t paddr;
+ bus_size_t len;
+ int error, i;
+
+ for (i = 0, error = 0; error == 0 && tlen > 0; i++, tlen -= len) {
+ len = min(PAGE_SIZE - offset, tlen);
+ paddr = VM_PAGE_TO_PHYS(pages[i]) + offset;
+ error = _bus_dmamap_load_phys(dmat, map, paddr, len,
+ flags, NULL, nsegs);
+ offset = 0;
+ }
+ return (error);
+}
+
+/*
* Load from block io.
*/
static int
_bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
int *nsegs, int flags)
{
- int error;
- if ((bio->bio_flags & BIO_UNMAPPED) == 0) {
- error = _bus_dmamap_load_buffer(dmat, map, bio->bio_data,
- bio->bio_bcount, kernel_pmap, flags, NULL, nsegs);
- } else {
- error = _bus_dmamap_load_ma(dmat, map, bio->bio_ma,
- bio->bio_bcount, bio->bio_ma_offset, flags, NULL, nsegs);
+ if ((bio->bio_flags & BIO_VLIST) != 0) {
+ bus_dma_segment_t *segs = (bus_dma_segment_t *)bio->bio_data;
+ return (_bus_dmamap_load_vlist(dmat, map, segs, bio->bio_ma_n,
+ kernel_pmap, nsegs, flags, bio->bio_ma_offset,
+ bio->bio_bcount));
}
- return (error);
+
+ if ((bio->bio_flags & BIO_UNMAPPED) != 0)
+ return (_bus_dmamap_load_pages(dmat, map, bio->bio_ma,
+ bio->bio_bcount, bio->bio_ma_offset, nsegs, flags));
+
+ return (_bus_dmamap_load_buffer(dmat, map, bio->bio_data,
+ bio->bio_bcount, kernel_pmap, flags, NULL, nsegs));
}
int
@@ -219,7 +258,7 @@ _bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
case CAM_DATA_SG:
error = _bus_dmamap_load_vlist(dmat, map,
(bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap,
- nsegs, flags);
+ nsegs, flags, 0, dxfer_len);
break;
case CAM_DATA_SG_PADDR:
error = _bus_dmamap_load_plist(dmat, map,
@@ -494,7 +533,7 @@ bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map,
break;
case MEMDESC_VLIST:
error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list,
- mem->md_opaque, kernel_pmap, &nsegs, flags);
+ mem->md_opaque, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX);
break;
case MEMDESC_PLIST:
error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list,
diff --git a/sys/kern/subr_uio.c b/sys/kern/subr_uio.c
index 87892fd..3712f92 100644
--- a/sys/kern/subr_uio.c
+++ b/sys/kern/subr_uio.c
@@ -62,6 +62,8 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_pageout.h>
#include <vm/vm_map.h>
+#include <machine/bus.h>
+
SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, UIO_MAXIOV,
"Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
@@ -136,6 +138,58 @@ physcopyout(vm_paddr_t src, void *dst, size_t len)
#undef PHYS_PAGE_COUNT
int
+physcopyin_vlist(bus_dma_segment_t *src, off_t offset, vm_paddr_t dst,
+ size_t len)
+{
+ size_t seg_len;
+ int error;
+
+ error = 0;
+ while (offset >= src->ds_len) {
+ offset -= src->ds_len;
+ src++;
+ }
+
+ while (len > 0 && error == 0) {
+ seg_len = MIN(src->ds_len - offset, len);
+ error = physcopyin((void *)(uintptr_t)(src->ds_addr + offset),
+ dst, seg_len);
+ offset = 0;
+ src++;
+ len -= seg_len;
+ dst += seg_len;
+ }
+
+ return (error);
+}
+
+int
+physcopyout_vlist(vm_paddr_t src, bus_dma_segment_t *dst, off_t offset,
+ size_t len)
+{
+ size_t seg_len;
+ int error;
+
+ error = 0;
+ while (offset >= dst->ds_len) {
+ offset -= dst->ds_len;
+ dst++;
+ }
+
+ while (len > 0 && error == 0) {
+ seg_len = MIN(dst->ds_len - offset, len);
+ error = physcopyout(src, (void *)(uintptr_t)(dst->ds_addr +
+ offset), seg_len);
+ offset = 0;
+ dst++;
+ len -= seg_len;
+ src += seg_len;
+ }
+
+ return (error);
+}
+
+int
uiomove(void *cp, int n, struct uio *uio)
{
diff --git a/sys/nfs/nfssvc.h b/sys/nfs/nfssvc.h
index 65b1681..a194ed5 100644
--- a/sys/nfs/nfssvc.h
+++ b/sys/nfs/nfssvc.h
@@ -69,6 +69,7 @@
#define NFSSVC_SUSPENDNFSD 0x04000000
#define NFSSVC_RESUMENFSD 0x08000000
#define NFSSVC_DUMPMNTOPTS 0x10000000
+#define NFSSVC_NEWSTRUCT 0x20000000
/* Argument structure for NFSSVC_DUMPMNTOPTS. */
struct nfscl_dumpmntopts {
diff --git a/sys/pc98/include/bus.h b/sys/pc98/include/bus.h
index 3292474..2060414 100644
--- a/sys/pc98/include/bus.h
+++ b/sys/pc98/include/bus.h
@@ -78,7 +78,9 @@
#ifndef _PC98_BUS_H_
#define _PC98_BUS_H_
+#ifdef _KERNEL
#include <sys/systm.h>
+#endif /* _KERNEL */
#include <machine/_bus.h>
#include <machine/cpufunc.h>
@@ -92,6 +94,8 @@
#define BUS_SPACE_UNRESTRICTED (~0)
+#ifdef _KERNEL
+
/*
* address relocation table
*/
@@ -639,4 +643,6 @@ bus_space_barrier(bus_space_tag_t tag, bus_space_handle_t bsh,
#define bus_space_copy_region_stream_4(t, h1, o1, h2, o2, c) \
bus_space_copy_region_4((t), (h1), (o1), (h2), (o2), (c))
+#endif /* _KERNEL */
+
#endif /* _PC98_BUS_H_ */
diff --git a/sys/sparc64/include/bus.h b/sys/sparc64/include/bus.h
index c641a25..4f02917 100644
--- a/sys/sparc64/include/bus.h
+++ b/sys/sparc64/include/bus.h
@@ -146,8 +146,8 @@ bus_space_barrier(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
}
static __inline int
-bus_space_subregion(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
- bus_size_t s, bus_space_handle_t *hp)
+bus_space_subregion(bus_space_tag_t t __unused, bus_space_handle_t h,
+ bus_size_t o __unused, bus_size_t s __unused, bus_space_handle_t *hp)
{
*hp = h + o;
diff --git a/sys/sys/bio.h b/sys/sys/bio.h
index 535ce61..8b3a5fc 100644
--- a/sys/sys/bio.h
+++ b/sys/sys/bio.h
@@ -61,6 +61,7 @@
#define BIO_ORDERED 0x08
#define BIO_UNMAPPED 0x10
#define BIO_TRANSIENT_MAPPING 0x20
+#define BIO_VLIST 0x40
#ifdef _KERNEL
struct disk;
diff --git a/sys/sys/param.h b/sys/sys/param.h
index 4c934a6..b58101a 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -58,7 +58,7 @@
* in the range 5 to 9.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 1002502 /* Master, propagated to newvers */
+#define __FreeBSD_version 1002503 /* Master, propagated to newvers */
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
diff --git a/sys/sys/uio.h b/sys/sys/uio.h
index 271a2f7..ff21b09 100644
--- a/sys/sys/uio.h
+++ b/sys/sys/uio.h
@@ -85,6 +85,7 @@ struct uio {
struct vm_object;
struct vm_page;
+struct bus_dma_segment;
struct uio *cloneuio(struct uio *uiop);
int copyinfrom(const void * __restrict src, void * __restrict dst,
@@ -98,6 +99,10 @@ int copyout_map(struct thread *td, vm_offset_t *addr, size_t sz);
int copyout_unmap(struct thread *td, vm_offset_t addr, size_t sz);
int physcopyin(void *src, vm_paddr_t dst, size_t len);
int physcopyout(vm_paddr_t src, void *dst, size_t len);
+int physcopyin_vlist(struct bus_dma_segment *src, off_t offset,
+ vm_paddr_t dst, size_t len);
+int physcopyout_vlist(vm_paddr_t src, struct bus_dma_segment *dst,
+ off_t offset, size_t len);
int uiomove(void *cp, int n, struct uio *uio);
int uiomove_frombuf(void *buf, int buflen, struct uio *uio);
int uiomove_fromphys(struct vm_page *ma[], vm_offset_t offset, int n,
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 9d08714..e9d1280 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -1424,13 +1424,40 @@ retry:
#define OBSC_COLLAPSE_NOWAIT 0x0002
#define OBSC_COLLAPSE_WAIT 0x0004
-static int
+static vm_page_t
+vm_object_backing_scan_wait(vm_object_t object, vm_page_t p, vm_page_t next,
+ int op)
+{
+ vm_object_t backing_object;
+
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ backing_object = object->backing_object;
+ VM_OBJECT_ASSERT_WLOCKED(backing_object);
+
+ KASSERT(p == NULL || vm_page_busied(p), ("unbusy page %p", p));
+ KASSERT(p == NULL || p->object == object || p->object == backing_object,
+ ("invalid ownership %p %p %p", p, object, backing_object));
+ if ((op & OBSC_COLLAPSE_NOWAIT) != 0)
+ return (next);
+ if (p != NULL)
+ vm_page_lock(p);
+ VM_OBJECT_WUNLOCK(object);
+ VM_OBJECT_WUNLOCK(backing_object);
+ if (p == NULL)
+ VM_WAIT;
+ else
+ vm_page_busy_sleep(p, "vmocol");
+ VM_OBJECT_WLOCK(object);
+ VM_OBJECT_WLOCK(backing_object);
+ return (TAILQ_FIRST(&backing_object->memq));
+}
+
+static bool
vm_object_backing_scan(vm_object_t object, int op)
{
- int r = 1;
- vm_page_t p;
vm_object_t backing_object;
- vm_pindex_t backing_offset_index;
+ vm_page_t next, p, pp;
+ vm_pindex_t backing_offset_index, new_pindex;
VM_OBJECT_ASSERT_WLOCKED(object);
VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
@@ -1452,7 +1479,7 @@ vm_object_backing_scan(vm_object_t object, int op)
* shadow test may succeed! XXX
*/
if (backing_object->type != OBJT_DEFAULT) {
- return (0);
+ return (false);
}
}
if (op & OBSC_COLLAPSE_WAIT) {
@@ -1464,24 +1491,19 @@ vm_object_backing_scan(vm_object_t object, int op)
*/
p = TAILQ_FIRST(&backing_object->memq);
while (p) {
- vm_page_t next = TAILQ_NEXT(p, listq);
- vm_pindex_t new_pindex = p->pindex - backing_offset_index;
-
+ next = TAILQ_NEXT(p, listq);
+ new_pindex = p->pindex - backing_offset_index;
if (op & OBSC_TEST_ALL_SHADOWED) {
- vm_page_t pp;
-
/*
* Ignore pages outside the parent object's range
* and outside the parent object's mapping of the
* backing object.
*
- * note that we do not busy the backing object's
+ * Note that we do not busy the backing object's
* page.
*/
- if (
- p->pindex < backing_offset_index ||
- new_pindex >= object->size
- ) {
+ if (p->pindex < backing_offset_index ||
+ new_pindex >= object->size) {
p = next;
continue;
}
@@ -1497,55 +1519,26 @@ vm_object_backing_scan(vm_object_t object, int op)
*/
pp = vm_page_lookup(object, new_pindex);
- if (
- (pp == NULL || pp->valid == 0) &&
- !vm_pager_has_page(object, new_pindex, NULL, NULL)
- ) {
- r = 0;
- break;
- }
+ if ((pp == NULL || pp->valid == 0) &&
+ !vm_pager_has_page(object, new_pindex, NULL, NULL))
+ return (false);
}
/*
* Check for busy page
*/
if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
- vm_page_t pp;
-
- if (op & OBSC_COLLAPSE_NOWAIT) {
- if (!p->valid || vm_page_busied(p)) {
- p = next;
- continue;
- }
- } else if (op & OBSC_COLLAPSE_WAIT) {
- if (vm_page_busied(p)) {
- VM_OBJECT_WUNLOCK(object);
- vm_page_lock(p);
- VM_OBJECT_WUNLOCK(backing_object);
- vm_page_busy_sleep(p, "vmocol");
- VM_OBJECT_WLOCK(object);
- VM_OBJECT_WLOCK(backing_object);
- /*
- * If we slept, anything could have
- * happened. Since the object is
- * marked dead, the backing offset
- * should not have changed so we
- * just restart our scan.
- */
- p = TAILQ_FIRST(&backing_object->memq);
- continue;
- }
+ if (vm_page_busied(p)) {
+ p = vm_object_backing_scan_wait(object, p,
+ next, op);
+ continue;
}
- KASSERT(
- p->object == backing_object,
- ("vm_object_backing_scan: object mismatch")
- );
+ KASSERT(p->object == backing_object,
+ ("vm_object_backing_scan: object mismatch"));
- if (
- p->pindex < backing_offset_index ||
- new_pindex >= object->size
- ) {
+ if (p->pindex < backing_offset_index ||
+ new_pindex >= object->size) {
if (backing_object->type == OBJT_SWAP)
swap_pager_freespace(backing_object,
p->pindex, 1);
@@ -1567,43 +1560,45 @@ vm_object_backing_scan(vm_object_t object, int op)
}
pp = vm_page_lookup(object, new_pindex);
- if (
- (op & OBSC_COLLAPSE_NOWAIT) != 0 &&
- (pp != NULL && pp->valid == 0)
- ) {
- if (backing_object->type == OBJT_SWAP)
- swap_pager_freespace(backing_object,
- p->pindex, 1);
-
+ if (pp != NULL && vm_page_busied(pp)) {
/*
- * The page in the parent is not (yet) valid.
- * We don't know anything about the state of
- * the original page. It might be mapped,
- * so we must avoid the next if here.
+ * The page in the parent is busy and
+ * possibly not (yet) valid. Until
+ * its state is finalized by the busy
+ * bit owner, we can't tell whether it
+ * shadows the original page.
+ * Therefore, we must either skip it
+ * and the original (backing_object)
+ * page or wait for its state to be
+ * finalized.
*
- * This is due to a race in vm_fault() where
- * we must unbusy the original (backing_obj)
- * page before we can (re)lock the parent.
- * Hence we can get here.
+ * This is due to a race with vm_fault()
+ * where we must unbusy the original
+ * (backing_obj) page before we can
+ * (re)lock the parent. Hence we can
+ * get here.
*/
- p = next;
+ p = vm_object_backing_scan_wait(object, pp,
+ next, op);
continue;
}
- if (
- pp != NULL ||
- vm_pager_has_page(object, new_pindex, NULL, NULL)
- ) {
- if (backing_object->type == OBJT_SWAP)
- swap_pager_freespace(backing_object,
- p->pindex, 1);
+ KASSERT(pp == NULL || pp->valid != 0,
+ ("unbusy invalid page %p", pp));
+
+ if (pp != NULL || vm_pager_has_page(object,
+ new_pindex, NULL, NULL)) {
/*
- * page already exists in parent OR swap exists
- * for this location in the parent. Destroy
- * the original page from the backing object.
- *
- * Leave the parent's page alone
+ * The page already exists in the
+ * parent OR swap exists for this
+ * location in the parent. Leave the
+ * parent's page alone. Destroy the
+ * original page from the backing
+ * object.
*/
+ if (backing_object->type == OBJT_SWAP)
+ swap_pager_freespace(backing_object,
+ p->pindex, 1);
vm_page_lock(p);
KASSERT(!pmap_page_is_mapped(p),
("freeing mapped page %p", p));
@@ -1625,16 +1620,8 @@ vm_object_backing_scan(vm_object_t object, int op)
* vm_page_rename() will handle dirty and cache.
*/
if (vm_page_rename(p, object, new_pindex)) {
- if (op & OBSC_COLLAPSE_NOWAIT) {
- p = next;
- continue;
- }
- VM_OBJECT_WUNLOCK(backing_object);
- VM_OBJECT_WUNLOCK(object);
- VM_WAIT;
- VM_OBJECT_WLOCK(object);
- VM_OBJECT_WLOCK(backing_object);
- p = TAILQ_FIRST(&backing_object->memq);
+ p = vm_object_backing_scan_wait(object, NULL,
+ next, op);
continue;
}
@@ -1653,7 +1640,7 @@ vm_object_backing_scan(vm_object_t object, int op)
}
p = next;
}
- return (r);
+ return (true);
}
@@ -1820,8 +1807,8 @@ vm_object_collapse(vm_object_t object)
* there is nothing we can do so we give up.
*/
if (object->resident_page_count != object->size &&
- vm_object_backing_scan(object,
- OBSC_TEST_ALL_SHADOWED) == 0) {
+ !vm_object_backing_scan(object,
+ OBSC_TEST_ALL_SHADOWED)) {
VM_OBJECT_WUNLOCK(backing_object);
break;
}
diff --git a/tools/build/mk/OptionalObsoleteFiles.inc b/tools/build/mk/OptionalObsoleteFiles.inc
index 4361d08..f51e15e 100644
--- a/tools/build/mk/OptionalObsoleteFiles.inc
+++ b/tools/build/mk/OptionalObsoleteFiles.inc
@@ -4081,6 +4081,7 @@ OLD_FILES+=usr/bin/rcsdiff
OLD_FILES+=usr/bin/rcsfreeze
OLD_FILES+=usr/bin/rcsmerge
OLD_FILES+=usr/bin/rlog
+OLD_FILES+=usr/sbin/etcupdate
OLD_FILES+=usr/share/man/man1/ci.1.gz
OLD_FILES+=usr/share/man/man1/co.1.gz
OLD_FILES+=usr/share/man/man1/ident.1.gz
@@ -4093,6 +4094,7 @@ OLD_FILES+=usr/share/man/man1/rcsintro.1.gz
OLD_FILES+=usr/share/man/man1/rcsmerge.1.gz
OLD_FILES+=usr/share/man/man1/rlog.1.gz
OLD_FILES+=usr/share/man/man5/rcsfile.5.gz
+OLD_FILES+=usr/share/man/man8/etcupdate.8.gz
.endif
#.if ${MK_RESCUE} == no
diff --git a/tools/build/options/WITHOUT_RCS b/tools/build/options/WITHOUT_RCS
index 2a4ddec..e21e027 100644
--- a/tools/build/options/WITHOUT_RCS
+++ b/tools/build/options/WITHOUT_RCS
@@ -1,4 +1,6 @@
.\" $FreeBSD$
Set to not build
.Xr rcs 1
-and related utilities.
+,
+.Xr etcupdate 8
+, and related utilities.
diff --git a/tools/regression/mac/mac_portacl/misc.sh b/tools/regression/mac/mac_portacl/misc.sh
index 0fabe15..a1f152b 100755
--- a/tools/regression/mac/mac_portacl/misc.sh
+++ b/tools/regression/mac/mac_portacl/misc.sh
@@ -3,8 +3,7 @@
sysctl security.mac.portacl >/dev/null 2>&1
if [ $? -ne 0 ]; then
- echo "1..1"
- echo "not ok 1 # MAC_PORTACL is unavailable."
+ echo "1..0 # SKIP MAC_PORTACL is unavailable."
exit 0
fi
diff --git a/usr.bin/bmake/Makefile b/usr.bin/bmake/Makefile
index c91c09a..82decdb 100644
--- a/usr.bin/bmake/Makefile
+++ b/usr.bin/bmake/Makefile
@@ -167,7 +167,6 @@ accept test:
# override some simple things
BINDIR= /usr/bin
MANDIR= /usr/share/man/man
-DEFAULT_SYS_PATH= /usr/share/mk
# make sure we get this
CFLAGS+= ${COPTS.${.IMPSRC:T}}
diff --git a/usr.bin/mkimg/ebr.c b/usr.bin/mkimg/ebr.c
index 28931ea..526c494 100644
--- a/usr.bin/mkimg/ebr.c
+++ b/usr.bin/mkimg/ebr.c
@@ -39,11 +39,15 @@ __FBSDID("$FreeBSD$");
#include "mkimg.h"
#include "scheme.h"
+#ifndef DOSPTYP_FAT16B
+#define DOSPTYP_FAT16B 0x06
+#endif
#ifndef DOSPTYP_FAT32
#define DOSPTYP_FAT32 0x0b
#endif
static struct mkimg_alias ebr_aliases[] = {
+ { ALIAS_FAT16B, ALIAS_INT2TYPE(DOSPTYP_FAT16B) },
{ ALIAS_FAT32, ALIAS_INT2TYPE(DOSPTYP_FAT32) },
{ ALIAS_FREEBSD, ALIAS_INT2TYPE(DOSPTYP_386BSD) },
{ ALIAS_NONE, 0 }
diff --git a/usr.bin/mkimg/mbr.c b/usr.bin/mkimg/mbr.c
index 961ca45..20d4d91 100644
--- a/usr.bin/mkimg/mbr.c
+++ b/usr.bin/mkimg/mbr.c
@@ -39,6 +39,9 @@ __FBSDID("$FreeBSD$");
#include "mkimg.h"
#include "scheme.h"
+#ifndef DOSPTYP_FAT16B
+#define DOSPTYP_FAT16B 0x06
+#endif
#ifndef DOSPTYP_FAT32
#define DOSPTYP_FAT32 0x0b
#endif
@@ -49,6 +52,7 @@ __FBSDID("$FreeBSD$");
static struct mkimg_alias mbr_aliases[] = {
{ ALIAS_EBR, ALIAS_INT2TYPE(DOSPTYP_EXT) },
{ ALIAS_EFI, ALIAS_INT2TYPE(DOSPTYP_EFI) },
+ { ALIAS_FAT16B, ALIAS_INT2TYPE(DOSPTYP_FAT16B) },
{ ALIAS_FAT32, ALIAS_INT2TYPE(DOSPTYP_FAT32) },
{ ALIAS_FREEBSD, ALIAS_INT2TYPE(DOSPTYP_386BSD) },
{ ALIAS_NTFS, ALIAS_INT2TYPE(DOSPTYP_NTFS) },
diff --git a/usr.bin/mkimg/scheme.c b/usr.bin/mkimg/scheme.c
index 9bdf8a5..1e64855 100644
--- a/usr.bin/mkimg/scheme.c
+++ b/usr.bin/mkimg/scheme.c
@@ -50,6 +50,7 @@ static struct {
} scheme_alias[] = {
{ "ebr", ALIAS_EBR },
{ "efi", ALIAS_EFI },
+ { "fat16b", ALIAS_FAT16B },
{ "fat32", ALIAS_FAT32 },
{ "freebsd", ALIAS_FREEBSD },
{ "freebsd-boot", ALIAS_FREEBSD_BOOT },
diff --git a/usr.bin/mkimg/scheme.h b/usr.bin/mkimg/scheme.h
index 73b06eb..3ba4243 100644
--- a/usr.bin/mkimg/scheme.h
+++ b/usr.bin/mkimg/scheme.h
@@ -36,6 +36,7 @@ enum alias {
/* start */
ALIAS_EBR,
ALIAS_EFI,
+ ALIAS_FAT16B,
ALIAS_FAT32,
ALIAS_FREEBSD,
ALIAS_FREEBSD_BOOT,
diff --git a/usr.sbin/Makefile b/usr.sbin/Makefile
index 81be708..8e97961 100644
--- a/usr.sbin/Makefile
+++ b/usr.sbin/Makefile
@@ -7,6 +7,7 @@ SUBDIR= adduser \
arp \
binmiscctl \
bsdconfig \
+ camdd \
cdcontrol \
chkgrp \
chown \
@@ -23,7 +24,6 @@ SUBDIR= adduser \
digictl \
diskinfo \
dumpcis \
- etcupdate \
extattr \
extattrctl \
fifolog \
@@ -302,6 +302,10 @@ SUBDIR+= repquota
SUBDIR+= rwhod
.endif
+.if ${MK_RCS} != "no"
+SUBDIR+= etcupdate
+.endif
+
.if ${MK_SENDMAIL} != "no"
SUBDIR+= editmap
SUBDIR+= mailstats
diff --git a/usr.sbin/camdd/Makefile b/usr.sbin/camdd/Makefile
new file mode 100644
index 0000000..0028668
--- /dev/null
+++ b/usr.sbin/camdd/Makefile
@@ -0,0 +1,11 @@
+# $FreeBSD$
+
+PROG= camdd
+SRCS= camdd.c
+SDIR= ${.CURDIR}/../../sys
+DPADD= ${LIBCAM} ${LIBMT} ${LIBSBUF} ${LIBBSDXML} ${LIBUTIL} ${LIBTHR}
+LDADD= -lcam -lmt -lsbuf -lbsdxml -lutil -lthr
+NO_WTHREAD_SAFETY= 1
+MAN= camdd.8
+
+.include <bsd.prog.mk>
diff --git a/usr.sbin/camdd/camdd.8 b/usr.sbin/camdd/camdd.8
new file mode 100644
index 0000000..af556bb
--- /dev/null
+++ b/usr.sbin/camdd/camdd.8
@@ -0,0 +1,283 @@
+.\"
+.\" Copyright (c) 2015 Spectra Logic Corporation
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions, and the following disclaimer,
+.\" without modification.
+.\" 2. Redistributions in binary form must reproduce at minimum a disclaimer
+.\" substantially similar to the "NO WARRANTY" disclaimer below
+.\" ("Disclaimer") and any redistribution must be conditioned upon
+.\" including a substantially similar Disclaimer requirement for further
+.\" binary redistribution.
+.\"
+.\" NO WARRANTY
+.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+.\" "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+.\" LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+.\" A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+.\" HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+.\" STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+.\" IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\" POSSIBILITY OF SUCH DAMAGES.
+.\"
+.\" Authors: Ken Merry (Spectra Logic Corporation)
+.\"
+.\" $FreeBSD$
+.\"
+.Dd November 11, 2015
+.Dt CAMDD 8
+.Os
+.Sh NAME
+.Nm camdd
+.Nd CAM data transfer utility
+.Sh SYNOPSIS
+.Nm
+.Aq Fl i|o Ar pass=pass_dev|file=filename,bs=blocksize,[...]
+.Op Fl C Ar retry_count
+.Op Fl E
+.Op Fl m Ar max_io
+.Op Fl t Ar timeout
+.Op Fl v
+.Op Fl h
+.Sh DESCRIPTION
+The
+.Nm
+utility is a sequential data transfer utility that offers standard
+.Xr read 2
+and
+.Xr write 2
+operation in addition to a mode that uses the asynchronous
+.Xr pass 4
+API.
+The asynchronous
+.Xr pass 4
+API allows multiple requests to be queued to a device simultaneously.
+.Pp
+.Nm
+collects performance information and will display it when the transfer
+completes, when
+.Nm
+is terminated or when it receives a SIGINFO signal.
+.Pp
+The following options are available:
+.Bl -tag -width 12n
+.It Fl i | o Ar args
+Specify the input and output device or file.
+Both
+.Fl i
+and
+.Fl o
+must be specified.
+There are a number of parameters that can be specified.
+One of the first two (file or pass) MUST be specified to indicate which I/O
+method to use on the device in question.
+.Bl -tag -width 9n
+.It pass=dev
+Specify a
+.Xr pass 4
+device to operate on.
+This requests that
+.Nm
+access the device in question be accessed via the asynchronous
+.Xr pass 4
+interface.
+.Pp
+The device name can be a
+.Xr pass 4
+name and unit number, for instance
+.Dq pass0 ,
+or a regular peripheral driver name and unit number, for instance
+.Dq da5 .
+It can also be the path of a
+.Xr pass 4
+or other disk device, like
+.Dq /dev/da5 .
+It may also be a bus:target:lun, for example:
+.Dq 0:5:0 .
+.Pp
+Only
+.Xr pass 4
+devices for
+.Tn SCSI
+disk-like devices are supported.
+.Tn ATA
+devices are not currently supported, but support could be added later.
+Specifically,
+.Tn SCSI
+Direct Access (type 0), WORM (type 4), CDROM (type 5), and RBC (Reduced
+Block Command, type 14) devices are supported.
+Tape drives, medium changers, enclosures etc. are not supported.
+.It file=path
+Specify a file or device to operate on.
+This requests that the file or device in question be accessed using the
+standard
+.Xr read 2
+and
+.Xr write 2
+system calls.
+The file interface does not support queueing multiple commands at a time.
+It does support probing disk sector size and capacity information, and tape
+blocksize and maximum transfer size information.
+The file interface supports standard files, disks, tape drives, special
+devices, pipes and standard input and output.
+If the file is specified as a
+.Dq - ,
+standard input or standard output are used.
+For tape devices, the specified blocksize will be the size that
+.Nm
+attempts to use to write to or read from the tape.
+When writing to a tape device, the blocksize is treated like a disk sector
+size.
+So, that means
+.Nm
+will not write anything smaller than the sector size.
+At the end of a transfer, if there isn't sufficient data from the reader
+to yield a full block,
+.Nm
+will add zeros on the end of the data from the reader to make up a full
+block.
+.It bs=N
+Specify the blocksize to use for transfers.
+.Nm
+will attempt to read or write using the requested blocksize.
+.Pp
+Note that the blocksize given only applies to either the input or the
+output path.
+To use the same blocksize for the input and output transfers, you must
+specify that blocksize with both the
+.Fl i
+and
+.Fl o
+arguments.
+.Pp
+The blocksize may be specified in bytes, or using any suffix (e.g. k, M, G)
+supported by
+.Xr expand_number 3 .
+.It offset=N
+Specify the starting offset for the input or output device or file.
+The offset may be specified in bytes, or by using any suffix (e.g. k, M, G)
+supported by
+.Xr expand_number 3 .
+.It depth=N
+Specify a desired queue depth for the input or output path.
+.Nm
+will attempt to keep the requested number of requests of the specified
+blocksize queued to the input or output device.
+Queue depths greater than 1 are only supported for the asynchronous
+.Xr pass 4
+output method.
+The queue depth is maintained on a best effort basis, and may not be
+possible to maintain for especially fast devices.
+For writes, maintaining the queue depth also depends on a sufficiently
+fast reading device.
+.It mcs=N
+Specify the minimum command size to use for
+.Xr pass 4
+devices.
+Some devices do not support 6 byte
+.Tn SCSI
+commands.
+The
+.Xr da 4
+device handles this restriction automatically, but the
+.Xr pass 4
+device allows the user to specify the
+.Tn SCSI
+command used.
+If a device does not accept 6 byte
+.Tn SCSI
+READ/WRITE commands (which is the default at lower LBAs), it will generally
+accept 10 byte
+.Tn SCSI
+commands instead.
+.It debug=N
+Specify the debug level for this device.
+There is currently only one debug level setting, so setting this to any
+non-zero value will turn on debugging.
+The debug facility may be expanded in the future.
+.El
+.It Fl C Ar count
+Specify the retry count for commands sent via the asynchronous
+.Xr pass 4
+interface.
+This does not apply to commands sent via the file interface.
+.It Fl E
+Enable kernel error recovery for the
+.Xr pass 4
+driver.
+If error recovery is not enabled, unit attention conditions and other
+transient failures may cause the transfer to fail.
+.It Fl m Ar size
+Specify the maximum amount of data to be transferred.
+This may be specified in bytes, or by using any suffix (e.g. K, M, G)
+supported by
+.Xr expand_number 3 .
+.It Fl t Ar timeout
+Specify the command timeout in seconds to use for commands sent via the
+.Xr pass 4
+driver.
+.It Fl v
+Enable verbose reporting of errors.
+This is recommended to aid in debugging any
+.Tn SCSI
+issues that come up.
+.It Fl h
+Display the
+.Nm
+usage message.
+.El
+.Pp
+If
+.Nm
+receives a SIGINFO signal, it will print the current input and output byte
+counts, elapsed runtime and average throughput.
+If
+.Nm
+receives a SIGINT signal, it will print the current input and output byte
+counts, elapsed runtime and average throughput and then exit.
+.Sh EXAMPLES
+.Dl camdd -i pass=da8,bs=512k,depth=4 -o pass=da3,bs=512k,depth=4
+.Pp
+Copy all data from da8 to da3 using a blocksize of 512k for both drives,
+and attempt to maintain a queue depth of 4 on both the input and output
+devices.
+The transfer will stop when the end of either device is reached.
+.Pp
+.Dl camdd -i file=/dev/zero,bs=1M -o pass=da5,bs=1M,depth=4 -m 100M
+.Pp
+Read 1MB blocks of zeros from /dev/zero, and write them to da5 with a
+desired queue depth of 4.
+Stop the transfer after 100MB has been written.
+.Pp
+.Dl camdd -i pass=da8,bs=1M,depth=3 -o file=disk.img
+.Pp
+Copy disk da8 using a 1MB blocksize and desired queue depth of 3 to the
+file disk.img.
+.Pp
+.Dl camdd -i file=/etc/rc -o file=-
+.Pp
+Read the file /etc/rc and write it to standard output.
+.Pp
+.Dl camdd -i pass=da10,bs=64k,depth=16 -o file=/dev/nsa0,bs=128k
+.Pp
+Copy 64K blocks from the disk da10 with a queue depth of 16, and write
+to the tape drive sa0 with a 128k blocksize.
+The copy will stop when either the end of the disk or tape is reached.
+.Sh SEE ALSO
+.Xr cam 3 ,
+.Xr cam 4 ,
+.Xr pass 4 ,
+.Xr camcontrol 8
+.Sh HISTORY
+.Nm
+first appeared in
+.Fx 10.2
+.Sh AUTHORS
+.An Kenneth Merry Aq Mt ken@FreeBSD.org
diff --git a/usr.sbin/camdd/camdd.c b/usr.sbin/camdd/camdd.c
new file mode 100644
index 0000000..573214e
--- /dev/null
+++ b/usr.sbin/camdd/camdd.c
@@ -0,0 +1,3428 @@
+/*-
+ * Copyright (c) 1997-2007 Kenneth D. Merry
+ * Copyright (c) 2013, 2014, 2015 Spectra Logic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * Authors: Ken Merry (Spectra Logic Corporation)
+ */
+
+/*
+ * This is eventually intended to be:
+ * - A basic data transfer/copy utility
+ * - A simple benchmark utility
+ * - An example of how to use the asynchronous pass(4) driver interface.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/ioctl.h>
+#include <sys/stdint.h>
+#include <sys/types.h>
+#include <sys/endian.h>
+#include <sys/param.h>
+#include <sys/sbuf.h>
+#include <sys/stat.h>
+#include <sys/event.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <vm/vm.h>
+#include <machine/bus.h>
+#include <sys/bus.h>
+#include <sys/bus_dma.h>
+#include <sys/mtio.h>
+#include <sys/conf.h>
+#include <sys/disk.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <semaphore.h>
+#include <string.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <ctype.h>
+#include <err.h>
+#include <libutil.h>
+#include <pthread.h>
+#include <assert.h>
+#include <bsdxml.h>
+
+#include <cam/cam.h>
+#include <cam/cam_debug.h>
+#include <cam/cam_ccb.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/scsi/scsi_pass.h>
+#include <cam/scsi/scsi_message.h>
+#include <cam/scsi/smp_all.h>
+#include <camlib.h>
+#include <mtlib.h>
+#include <zlib.h>
+
+typedef enum {
+ CAMDD_CMD_NONE = 0x00000000,
+ CAMDD_CMD_HELP = 0x00000001,
+ CAMDD_CMD_WRITE = 0x00000002,
+ CAMDD_CMD_READ = 0x00000003
+} camdd_cmdmask;
+
+typedef enum {
+ CAMDD_ARG_NONE = 0x00000000,
+ CAMDD_ARG_VERBOSE = 0x00000001,
+ CAMDD_ARG_DEVICE = 0x00000002,
+ CAMDD_ARG_BUS = 0x00000004,
+ CAMDD_ARG_TARGET = 0x00000008,
+ CAMDD_ARG_LUN = 0x00000010,
+ CAMDD_ARG_UNIT = 0x00000020,
+ CAMDD_ARG_TIMEOUT = 0x00000040,
+ CAMDD_ARG_ERR_RECOVER = 0x00000080,
+ CAMDD_ARG_RETRIES = 0x00000100
+} camdd_argmask;
+
+typedef enum {
+ CAMDD_DEV_NONE = 0x00,
+ CAMDD_DEV_PASS = 0x01,
+ CAMDD_DEV_FILE = 0x02
+} camdd_dev_type;
+
+struct camdd_io_opts {
+ camdd_dev_type dev_type;
+ char *dev_name;
+ uint64_t blocksize;
+ uint64_t queue_depth;
+ uint64_t offset;
+ int min_cmd_size;
+ int write_dev;
+ uint64_t debug;
+};
+
+typedef enum {
+ CAMDD_BUF_NONE,
+ CAMDD_BUF_DATA,
+ CAMDD_BUF_INDIRECT
+} camdd_buf_type;
+
+struct camdd_buf_indirect {
+ /*
+ * Pointer to the source buffer.
+ */
+ struct camdd_buf *src_buf;
+
+ /*
+ * Offset into the source buffer, in bytes.
+ */
+ uint64_t offset;
+ /*
+ * Pointer to the starting point in the source buffer.
+ */
+ uint8_t *start_ptr;
+
+ /*
+ * Length of this chunk in bytes.
+ */
+ size_t len;
+};
+
+struct camdd_buf_data {
+ /*
+ * Buffer allocated when we allocate this camdd_buf. This should
+ * be the size of the blocksize for this device.
+ */
+ uint8_t *buf;
+
+ /*
+ * The amount of backing store allocated in buf. Generally this
+ * will be the blocksize of the device.
+ */
+ uint32_t alloc_len;
+
+ /*
+ * The amount of data that was put into the buffer (on reads) or
+ * the amount of data we have put onto the src_list so far (on
+ * writes).
+ */
+ uint32_t fill_len;
+
+ /*
+ * The amount of data that was not transferred.
+ */
+ uint32_t resid;
+
+ /*
+ * Starting byte offset on the reader.
+ */
+ uint64_t src_start_offset;
+
+ /*
+ * CCB used for pass(4) device targets.
+ */
+ union ccb ccb;
+
+ /*
+ * Number of scatter/gather segments.
+ */
+ int sg_count;
+
+ /*
+ * Set if we had to tack on an extra buffer to round the transfer
+ * up to a sector size.
+ */
+ int extra_buf;
+
+ /*
+ * Scatter/gather list used generally when we're the writer for a
+ * pass(4) device.
+ */
+ bus_dma_segment_t *segs;
+
+ /*
+ * Scatter/gather list used generally when we're the writer for a
+ * file or block device;
+ */
+ struct iovec *iovec;
+};
+
+union camdd_buf_types {
+ struct camdd_buf_indirect indirect;
+ struct camdd_buf_data data;
+};
+
+typedef enum {
+ CAMDD_STATUS_NONE,
+ CAMDD_STATUS_OK,
+ CAMDD_STATUS_SHORT_IO,
+ CAMDD_STATUS_EOF,
+ CAMDD_STATUS_ERROR
+} camdd_buf_status;
+
+struct camdd_buf {
+ camdd_buf_type buf_type;
+ union camdd_buf_types buf_type_spec;
+
+ camdd_buf_status status;
+
+ uint64_t lba;
+ size_t len;
+
+ /*
+ * A reference count of how many indirect buffers point to this
+ * buffer.
+ */
+ int refcount;
+
+ /*
+ * A link back to our parent device.
+ */
+ struct camdd_dev *dev;
+ STAILQ_ENTRY(camdd_buf) links;
+ STAILQ_ENTRY(camdd_buf) work_links;
+
+ /*
+ * A count of the buffers on the src_list.
+ */
+ int src_count;
+
+ /*
+ * List of buffers from our partner thread that are the components
+ * of this buffer for the I/O. Uses src_links.
+ */
+ STAILQ_HEAD(,camdd_buf) src_list;
+ STAILQ_ENTRY(camdd_buf) src_links;
+};
+
+#define NUM_DEV_TYPES 2
+
+struct camdd_dev_pass {
+ int scsi_dev_type;
+ struct cam_device *dev;
+ uint64_t max_sector;
+ uint32_t block_len;
+ uint32_t cpi_maxio;
+};
+
+typedef enum {
+ CAMDD_FILE_NONE,
+ CAMDD_FILE_REG,
+ CAMDD_FILE_STD,
+ CAMDD_FILE_PIPE,
+ CAMDD_FILE_DISK,
+ CAMDD_FILE_TAPE,
+ CAMDD_FILE_TTY,
+ CAMDD_FILE_MEM
+} camdd_file_type;
+
+typedef enum {
+ CAMDD_FF_NONE = 0x00,
+ CAMDD_FF_CAN_SEEK = 0x01
+} camdd_file_flags;
+
+struct camdd_dev_file {
+ int fd;
+ struct stat sb;
+ char filename[MAXPATHLEN + 1];
+ camdd_file_type file_type;
+ camdd_file_flags file_flags;
+ uint8_t *tmp_buf;
+};
+
+struct camdd_dev_block {
+ int fd;
+ uint64_t size_bytes;
+ uint32_t block_len;
+};
+
+union camdd_dev_spec {
+ struct camdd_dev_pass pass;
+ struct camdd_dev_file file;
+ struct camdd_dev_block block;
+};
+
+typedef enum {
+ CAMDD_DEV_FLAG_NONE = 0x00,
+ CAMDD_DEV_FLAG_EOF = 0x01,
+ CAMDD_DEV_FLAG_PEER_EOF = 0x02,
+ CAMDD_DEV_FLAG_ACTIVE = 0x04,
+ CAMDD_DEV_FLAG_EOF_SENT = 0x08,
+ CAMDD_DEV_FLAG_EOF_QUEUED = 0x10
+} camdd_dev_flags;
+
+struct camdd_dev {
+ camdd_dev_type dev_type;
+ union camdd_dev_spec dev_spec;
+ camdd_dev_flags flags;
+ char device_name[MAXPATHLEN+1];
+ uint32_t blocksize;
+ uint32_t sector_size;
+ uint64_t max_sector;
+ uint64_t sector_io_limit;
+ int min_cmd_size;
+ int write_dev;
+ int retry_count;
+ int io_timeout;
+ int debug;
+ uint64_t start_offset_bytes;
+ uint64_t next_io_pos_bytes;
+ uint64_t next_peer_pos_bytes;
+ uint64_t next_completion_pos_bytes;
+ uint64_t peer_bytes_queued;
+ uint64_t bytes_transferred;
+ uint32_t target_queue_depth;
+ uint32_t cur_active_io;
+ uint8_t *extra_buf;
+ uint32_t extra_buf_len;
+ struct camdd_dev *peer_dev;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ int kq;
+
+ int (*run)(struct camdd_dev *dev);
+ int (*fetch)(struct camdd_dev *dev);
+
+ /*
+ * Buffers that are available for I/O. Uses links.
+ */
+ STAILQ_HEAD(,camdd_buf) free_queue;
+
+ /*
+ * Free indirect buffers. These are used for breaking a large
+ * buffer into multiple pieces.
+ */
+ STAILQ_HEAD(,camdd_buf) free_indirect_queue;
+
+ /*
+ * Buffers that have been queued to the kernel. Uses links.
+ */
+ STAILQ_HEAD(,camdd_buf) active_queue;
+
+ /*
+ * Will generally contain one of our buffers that is waiting for enough
+ * I/O from our partner thread to be able to execute. This will
+ * generally happen when our per-I/O-size is larger than the
+ * partner thread's per-I/O-size. Uses links.
+ */
+ STAILQ_HEAD(,camdd_buf) pending_queue;
+
+ /*
+ * Number of buffers on the pending queue
+ */
+ int num_pending_queue;
+
+ /*
+ * Buffers that are filled and ready to execute. This is used when
+ * our partner (reader) thread sends us blocks that are larger than
+ * our blocksize, and so we have to split them into multiple pieces.
+ */
+ STAILQ_HEAD(,camdd_buf) run_queue;
+
+ /*
+ * Number of buffers on the run queue.
+ */
+ int num_run_queue;
+
+ STAILQ_HEAD(,camdd_buf) reorder_queue;
+
+ int num_reorder_queue;
+
+ /*
+ * Buffers that have been queued to us by our partner thread
+ * (generally the reader thread) to be written out. Uses
+ * work_links.
+ */
+ STAILQ_HEAD(,camdd_buf) work_queue;
+
+ /*
+ * Buffers that have been completed by our partner thread. Uses
+ * work_links.
+ */
+ STAILQ_HEAD(,camdd_buf) peer_done_queue;
+
+ /*
+ * Number of buffers on the peer done queue.
+ */
+ uint32_t num_peer_done_queue;
+
+ /*
+ * A list of buffers that we have queued to our peer thread. Uses
+ * links.
+ */
+ STAILQ_HEAD(,camdd_buf) peer_work_queue;
+
+ /*
+ * Number of buffers on the peer work queue.
+ */
+ uint32_t num_peer_work_queue;
+};
+
+static sem_t camdd_sem;
+static int need_exit = 0;
+static int error_exit = 0;
+static int need_status = 0;
+
+#ifndef min
+#define min(a, b) (a < b) ? a : b
+#endif
+
+/*
+ * XXX KDM private copy of timespecsub(). This is normally defined in
+ * sys/time.h, but is only enabled in the kernel. If that definition is
+ * enabled in userland, it breaks the build of libnetbsd.
+ */
+#ifndef timespecsub
+#define timespecsub(vvp, uvp) \
+ do { \
+ (vvp)->tv_sec -= (uvp)->tv_sec; \
+ (vvp)->tv_nsec -= (uvp)->tv_nsec; \
+ if ((vvp)->tv_nsec < 0) { \
+ (vvp)->tv_sec--; \
+ (vvp)->tv_nsec += 1000000000; \
+ } \
+ } while (0)
+#endif
+
+
+/* Generically usefull offsets into the peripheral private area */
+#define ppriv_ptr0 periph_priv.entries[0].ptr
+#define ppriv_ptr1 periph_priv.entries[1].ptr
+#define ppriv_field0 periph_priv.entries[0].field
+#define ppriv_field1 periph_priv.entries[1].field
+
+#define ccb_buf ppriv_ptr0
+
+#define CAMDD_FILE_DEFAULT_BLOCK 524288
+#define CAMDD_FILE_DEFAULT_DEPTH 1
+#define CAMDD_PASS_MAX_BLOCK 1048576
+#define CAMDD_PASS_DEFAULT_DEPTH 6
+#define CAMDD_PASS_RW_TIMEOUT 60 * 1000
+
+static int parse_btl(char *tstr, int *bus, int *target, int *lun,
+ camdd_argmask *arglst);
+void camdd_free_dev(struct camdd_dev *dev);
+struct camdd_dev *camdd_alloc_dev(camdd_dev_type dev_type,
+ struct kevent *new_ke, int num_ke,
+ int retry_count, int timeout);
+static struct camdd_buf *camdd_alloc_buf(struct camdd_dev *dev,
+ camdd_buf_type buf_type);
+void camdd_release_buf(struct camdd_buf *buf);
+struct camdd_buf *camdd_get_buf(struct camdd_dev *dev, camdd_buf_type buf_type);
+int camdd_buf_sg_create(struct camdd_buf *buf, int iovec,
+ uint32_t sector_size, uint32_t *num_sectors_used,
+ int *double_buf_needed);
+uint32_t camdd_buf_get_len(struct camdd_buf *buf);
+void camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf);
+int camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize,
+ uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran);
+struct camdd_dev *camdd_probe_file(int fd, struct camdd_io_opts *io_opts,
+ int retry_count, int timeout);
+struct camdd_dev *camdd_probe_pass(struct cam_device *cam_dev,
+ struct camdd_io_opts *io_opts,
+ camdd_argmask arglist, int probe_retry_count,
+ int probe_timeout, int io_retry_count,
+ int io_timeout);
+void *camdd_file_worker(void *arg);
+camdd_buf_status camdd_ccb_status(union ccb *ccb);
+int camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf);
+int camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf);
+void camdd_peer_done(struct camdd_buf *buf);
+void camdd_complete_buf(struct camdd_dev *dev, struct camdd_buf *buf,
+ int *error_count);
+int camdd_pass_fetch(struct camdd_dev *dev);
+int camdd_file_run(struct camdd_dev *dev);
+int camdd_pass_run(struct camdd_dev *dev);
+int camdd_get_next_lba_len(struct camdd_dev *dev, uint64_t *lba, ssize_t *len);
+int camdd_queue(struct camdd_dev *dev, struct camdd_buf *read_buf);
+void camdd_get_depth(struct camdd_dev *dev, uint32_t *our_depth,
+ uint32_t *peer_depth, uint32_t *our_bytes,
+ uint32_t *peer_bytes);
+void *camdd_worker(void *arg);
+void camdd_sig_handler(int sig);
+void camdd_print_status(struct camdd_dev *camdd_dev,
+ struct camdd_dev *other_dev,
+ struct timespec *start_time);
+int camdd_rw(struct camdd_io_opts *io_opts, int num_io_opts,
+ uint64_t max_io, int retry_count, int timeout);
+int camdd_parse_io_opts(char *args, int is_write,
+ struct camdd_io_opts *io_opts);
+void usage(void);
+
+/*
+ * Parse out a bus, or a bus, target and lun in the following
+ * format:
+ * bus
+ * bus:target
+ * bus:target:lun
+ *
+ * Returns the number of parsed components, or 0.
+ */
+static int
+parse_btl(char *tstr, int *bus, int *target, int *lun, camdd_argmask *arglst)
+{
+ char *tmpstr;
+ int convs = 0;
+
+ while (isspace(*tstr) && (*tstr != '\0'))
+ tstr++;
+
+ tmpstr = (char *)strtok(tstr, ":");
+ if ((tmpstr != NULL) && (*tmpstr != '\0')) {
+ *bus = strtol(tmpstr, NULL, 0);
+ *arglst |= CAMDD_ARG_BUS;
+ convs++;
+ tmpstr = (char *)strtok(NULL, ":");
+ if ((tmpstr != NULL) && (*tmpstr != '\0')) {
+ *target = strtol(tmpstr, NULL, 0);
+ *arglst |= CAMDD_ARG_TARGET;
+ convs++;
+ tmpstr = (char *)strtok(NULL, ":");
+ if ((tmpstr != NULL) && (*tmpstr != '\0')) {
+ *lun = strtol(tmpstr, NULL, 0);
+ *arglst |= CAMDD_ARG_LUN;
+ convs++;
+ }
+ }
+ }
+
+ return convs;
+}
+
+/*
+ * XXX KDM clean up and free all of the buffers on the queue!
+ */
+void
+camdd_free_dev(struct camdd_dev *dev)
+{
+ if (dev == NULL)
+ return;
+
+ switch (dev->dev_type) {
+ case CAMDD_DEV_FILE: {
+ struct camdd_dev_file *file_dev = &dev->dev_spec.file;
+
+ if (file_dev->fd != -1)
+ close(file_dev->fd);
+ free(file_dev->tmp_buf);
+ break;
+ }
+ case CAMDD_DEV_PASS: {
+ struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass;
+
+ if (pass_dev->dev != NULL)
+ cam_close_device(pass_dev->dev);
+ break;
+ }
+ default:
+ break;
+ }
+
+ free(dev);
+}
+
+struct camdd_dev *
+camdd_alloc_dev(camdd_dev_type dev_type, struct kevent *new_ke, int num_ke,
+ int retry_count, int timeout)
+{
+ struct camdd_dev *dev = NULL;
+ struct kevent *ke;
+ size_t ke_size;
+ int retval = 0;
+
+ dev = malloc(sizeof(*dev));
+ if (dev == NULL) {
+ warn("%s: unable to malloc %zu bytes", __func__, sizeof(*dev));
+ goto bailout;
+ }
+
+ bzero(dev, sizeof(*dev));
+
+ dev->dev_type = dev_type;
+ dev->io_timeout = timeout;
+ dev->retry_count = retry_count;
+ STAILQ_INIT(&dev->free_queue);
+ STAILQ_INIT(&dev->free_indirect_queue);
+ STAILQ_INIT(&dev->active_queue);
+ STAILQ_INIT(&dev->pending_queue);
+ STAILQ_INIT(&dev->run_queue);
+ STAILQ_INIT(&dev->reorder_queue);
+ STAILQ_INIT(&dev->work_queue);
+ STAILQ_INIT(&dev->peer_done_queue);
+ STAILQ_INIT(&dev->peer_work_queue);
+ retval = pthread_mutex_init(&dev->mutex, NULL);
+ if (retval != 0) {
+ warnc(retval, "%s: failed to initialize mutex", __func__);
+ goto bailout;
+ }
+
+ retval = pthread_cond_init(&dev->cond, NULL);
+ if (retval != 0) {
+ warnc(retval, "%s: failed to initialize condition variable",
+ __func__);
+ goto bailout;
+ }
+
+ dev->kq = kqueue();
+ if (dev->kq == -1) {
+ warn("%s: Unable to create kqueue", __func__);
+ goto bailout;
+ }
+
+ ke_size = sizeof(struct kevent) * (num_ke + 4);
+ ke = malloc(ke_size);
+ if (ke == NULL) {
+ warn("%s: unable to malloc %zu bytes", __func__, ke_size);
+ goto bailout;
+ }
+ bzero(ke, ke_size);
+ if (num_ke > 0)
+ bcopy(new_ke, ke, num_ke * sizeof(struct kevent));
+
+ EV_SET(&ke[num_ke++], (uintptr_t)&dev->work_queue, EVFILT_USER,
+ EV_ADD|EV_ENABLE|EV_CLEAR, 0,0, 0);
+ EV_SET(&ke[num_ke++], (uintptr_t)&dev->peer_done_queue, EVFILT_USER,
+ EV_ADD|EV_ENABLE|EV_CLEAR, 0,0, 0);
+ EV_SET(&ke[num_ke++], SIGINFO, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0,0,0);
+ EV_SET(&ke[num_ke++], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0,0,0);
+
+ retval = kevent(dev->kq, ke, num_ke, NULL, 0, NULL);
+ if (retval == -1) {
+ warn("%s: Unable to register kevents", __func__);
+ goto bailout;
+ }
+
+
+ return (dev);
+
+bailout:
+ free(dev);
+
+ return (NULL);
+}
+
+static struct camdd_buf *
+camdd_alloc_buf(struct camdd_dev *dev, camdd_buf_type buf_type)
+{
+ struct camdd_buf *buf = NULL;
+ uint8_t *data_ptr = NULL;
+
+ /*
+ * We only need to allocate data space for data buffers.
+ */
+ switch (buf_type) {
+ case CAMDD_BUF_DATA:
+ data_ptr = malloc(dev->blocksize);
+ if (data_ptr == NULL) {
+ warn("unable to allocate %u bytes", dev->blocksize);
+ goto bailout_error;
+ }
+ break;
+ default:
+ break;
+ }
+
+ buf = malloc(sizeof(*buf));
+ if (buf == NULL) {
+ warn("unable to allocate %zu bytes", sizeof(*buf));
+ goto bailout_error;
+ }
+
+ bzero(buf, sizeof(*buf));
+ buf->buf_type = buf_type;
+ buf->dev = dev;
+ switch (buf_type) {
+ case CAMDD_BUF_DATA: {
+ struct camdd_buf_data *data;
+
+ data = &buf->buf_type_spec.data;
+
+ data->alloc_len = dev->blocksize;
+ data->buf = data_ptr;
+ break;
+ }
+ case CAMDD_BUF_INDIRECT:
+ break;
+ default:
+ break;
+ }
+ STAILQ_INIT(&buf->src_list);
+
+ return (buf);
+
+bailout_error:
+ if (data_ptr != NULL)
+ free(data_ptr);
+
+ if (buf != NULL)
+ free(buf);
+
+ return (NULL);
+}
+
+void
+camdd_release_buf(struct camdd_buf *buf)
+{
+ struct camdd_dev *dev;
+
+ dev = buf->dev;
+
+ switch (buf->buf_type) {
+ case CAMDD_BUF_DATA: {
+ struct camdd_buf_data *data;
+
+ data = &buf->buf_type_spec.data;
+
+ if (data->segs != NULL) {
+ if (data->extra_buf != 0) {
+ void *extra_buf;
+
+ extra_buf = (void *)
+ data->segs[data->sg_count - 1].ds_addr;
+ free(extra_buf);
+ data->extra_buf = 0;
+ }
+ free(data->segs);
+ data->segs = NULL;
+ data->sg_count = 0;
+ } else if (data->iovec != NULL) {
+ if (data->extra_buf != 0) {
+ free(data->iovec[data->sg_count - 1].iov_base);
+ data->extra_buf = 0;
+ }
+ free(data->iovec);
+ data->iovec = NULL;
+ data->sg_count = 0;
+ }
+ STAILQ_INSERT_TAIL(&dev->free_queue, buf, links);
+ break;
+ }
+ case CAMDD_BUF_INDIRECT:
+ STAILQ_INSERT_TAIL(&dev->free_indirect_queue, buf, links);
+ break;
+ default:
+ err(1, "%s: Invalid buffer type %d for released buffer",
+ __func__, buf->buf_type);
+ break;
+ }
+}
+
+struct camdd_buf *
+camdd_get_buf(struct camdd_dev *dev, camdd_buf_type buf_type)
+{
+ struct camdd_buf *buf = NULL;
+
+ switch (buf_type) {
+ case CAMDD_BUF_DATA:
+ buf = STAILQ_FIRST(&dev->free_queue);
+ if (buf != NULL) {
+ struct camdd_buf_data *data;
+ uint8_t *data_ptr;
+ uint32_t alloc_len;
+
+ STAILQ_REMOVE_HEAD(&dev->free_queue, links);
+ data = &buf->buf_type_spec.data;
+ data_ptr = data->buf;
+ alloc_len = data->alloc_len;
+ bzero(buf, sizeof(*buf));
+ data->buf = data_ptr;
+ data->alloc_len = alloc_len;
+ }
+ break;
+ case CAMDD_BUF_INDIRECT:
+ buf = STAILQ_FIRST(&dev->free_indirect_queue);
+ if (buf != NULL) {
+ STAILQ_REMOVE_HEAD(&dev->free_indirect_queue, links);
+
+ bzero(buf, sizeof(*buf));
+ }
+ break;
+ default:
+ warnx("Unknown buffer type %d requested", buf_type);
+ break;
+ }
+
+
+ if (buf == NULL)
+ return (camdd_alloc_buf(dev, buf_type));
+ else {
+ STAILQ_INIT(&buf->src_list);
+ buf->dev = dev;
+ buf->buf_type = buf_type;
+
+ return (buf);
+ }
+}
+
+int
+camdd_buf_sg_create(struct camdd_buf *buf, int iovec, uint32_t sector_size,
+ uint32_t *num_sectors_used, int *double_buf_needed)
+{
+ struct camdd_buf *tmp_buf;
+ struct camdd_buf_data *data;
+ uint8_t *extra_buf = NULL;
+ size_t extra_buf_len = 0;
+ int i, retval = 0;
+
+ data = &buf->buf_type_spec.data;
+
+ data->sg_count = buf->src_count;
+ /*
+ * Compose a scatter/gather list from all of the buffers in the list.
+ * If the length of the buffer isn't a multiple of the sector size,
+ * we'll have to add an extra buffer. This should only happen
+ * at the end of a transfer.
+ */
+ if ((data->fill_len % sector_size) != 0) {
+ extra_buf_len = sector_size - (data->fill_len % sector_size);
+ extra_buf = calloc(extra_buf_len, 1);
+ if (extra_buf == NULL) {
+ warn("%s: unable to allocate %zu bytes for extra "
+ "buffer space", __func__, extra_buf_len);
+ retval = 1;
+ goto bailout;
+ }
+ data->extra_buf = 1;
+ data->sg_count++;
+ }
+ if (iovec == 0) {
+ data->segs = calloc(data->sg_count, sizeof(bus_dma_segment_t));
+ if (data->segs == NULL) {
+ warn("%s: unable to allocate %zu bytes for S/G list",
+ __func__, sizeof(bus_dma_segment_t) *
+ data->sg_count);
+ retval = 1;
+ goto bailout;
+ }
+
+ } else {
+ data->iovec = calloc(data->sg_count, sizeof(struct iovec));
+ if (data->iovec == NULL) {
+ warn("%s: unable to allocate %zu bytes for S/G list",
+ __func__, sizeof(struct iovec) * data->sg_count);
+ retval = 1;
+ goto bailout;
+ }
+ }
+
+ for (i = 0, tmp_buf = STAILQ_FIRST(&buf->src_list);
+ i < buf->src_count && tmp_buf != NULL; i++,
+ tmp_buf = STAILQ_NEXT(tmp_buf, src_links)) {
+
+ if (tmp_buf->buf_type == CAMDD_BUF_DATA) {
+ struct camdd_buf_data *tmp_data;
+
+ tmp_data = &tmp_buf->buf_type_spec.data;
+ if (iovec == 0) {
+ data->segs[i].ds_addr =
+ (bus_addr_t) tmp_data->buf;
+ data->segs[i].ds_len = tmp_data->fill_len -
+ tmp_data->resid;
+ } else {
+ data->iovec[i].iov_base = tmp_data->buf;
+ data->iovec[i].iov_len = tmp_data->fill_len -
+ tmp_data->resid;
+ }
+ if (((tmp_data->fill_len - tmp_data->resid) %
+ sector_size) != 0)
+ *double_buf_needed = 1;
+ } else {
+ struct camdd_buf_indirect *tmp_ind;
+
+ tmp_ind = &tmp_buf->buf_type_spec.indirect;
+ if (iovec == 0) {
+ data->segs[i].ds_addr =
+ (bus_addr_t)tmp_ind->start_ptr;
+ data->segs[i].ds_len = tmp_ind->len;
+ } else {
+ data->iovec[i].iov_base = tmp_ind->start_ptr;
+ data->iovec[i].iov_len = tmp_ind->len;
+ }
+ if ((tmp_ind->len % sector_size) != 0)
+ *double_buf_needed = 1;
+ }
+ }
+
+ if (extra_buf != NULL) {
+ if (iovec == 0) {
+ data->segs[i].ds_addr = (bus_addr_t)extra_buf;
+ data->segs[i].ds_len = extra_buf_len;
+ } else {
+ data->iovec[i].iov_base = extra_buf;
+ data->iovec[i].iov_len = extra_buf_len;
+ }
+ i++;
+ }
+ if ((tmp_buf != NULL) || (i != data->sg_count)) {
+ warnx("buffer source count does not match "
+ "number of buffers in list!");
+ retval = 1;
+ goto bailout;
+ }
+
+bailout:
+ if (retval == 0) {
+ *num_sectors_used = (data->fill_len + extra_buf_len) /
+ sector_size;
+ }
+ return (retval);
+}
+
+uint32_t
+camdd_buf_get_len(struct camdd_buf *buf)
+{
+ uint32_t len = 0;
+
+ if (buf->buf_type != CAMDD_BUF_DATA) {
+ struct camdd_buf_indirect *indirect;
+
+ indirect = &buf->buf_type_spec.indirect;
+ len = indirect->len;
+ } else {
+ struct camdd_buf_data *data;
+
+ data = &buf->buf_type_spec.data;
+ len = data->fill_len;
+ }
+
+ return (len);
+}
+
+void
+camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf)
+{
+ struct camdd_buf_data *data;
+
+ assert(buf->buf_type == CAMDD_BUF_DATA);
+
+ data = &buf->buf_type_spec.data;
+
+ STAILQ_INSERT_TAIL(&buf->src_list, child_buf, src_links);
+ buf->src_count++;
+
+ data->fill_len += camdd_buf_get_len(child_buf);
+}
+
+typedef enum {
+ CAMDD_TS_MAX_BLK,
+ CAMDD_TS_MIN_BLK,
+ CAMDD_TS_BLK_GRAN,
+ CAMDD_TS_EFF_IOSIZE
+} camdd_status_item_index;
+
+static struct camdd_status_items {
+ const char *name;
+ struct mt_status_entry *entry;
+} req_status_items[] = {
+ { "max_blk", NULL },
+ { "min_blk", NULL },
+ { "blk_gran", NULL },
+ { "max_effective_iosize", NULL }
+};
+
+int
+camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize,
+ uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran)
+{
+ struct mt_status_data status_data;
+ char *xml_str = NULL;
+ unsigned int i;
+ int retval = 0;
+
+ retval = mt_get_xml_str(fd, MTIOCEXTGET, &xml_str);
+ if (retval != 0)
+ err(1, "Couldn't get XML string from %s", filename);
+
+ retval = mt_get_status(xml_str, &status_data);
+ if (retval != XML_STATUS_OK) {
+ warn("couldn't get status for %s", filename);
+ retval = 1;
+ goto bailout;
+ } else
+ retval = 0;
+
+ if (status_data.error != 0) {
+ warnx("%s", status_data.error_str);
+ retval = 1;
+ goto bailout;
+ }
+
+ for (i = 0; i < sizeof(req_status_items) /
+ sizeof(req_status_items[0]); i++) {
+ char *name;
+
+ name = __DECONST(char *, req_status_items[i].name);
+ req_status_items[i].entry = mt_status_entry_find(&status_data,
+ name);
+ if (req_status_items[i].entry == NULL) {
+ errx(1, "Cannot find status entry %s",
+ req_status_items[i].name);
+ }
+ }
+
+ *max_iosize = req_status_items[CAMDD_TS_EFF_IOSIZE].entry->value_unsigned;
+ *max_blk= req_status_items[CAMDD_TS_MAX_BLK].entry->value_unsigned;
+ *min_blk= req_status_items[CAMDD_TS_MIN_BLK].entry->value_unsigned;
+ *blk_gran = req_status_items[CAMDD_TS_BLK_GRAN].entry->value_unsigned;
+bailout:
+
+ free(xml_str);
+ mt_status_free(&status_data);
+
+ return (retval);
+}
+
+struct camdd_dev *
+camdd_probe_file(int fd, struct camdd_io_opts *io_opts, int retry_count,
+ int timeout)
+{
+ struct camdd_dev *dev = NULL;
+ struct camdd_dev_file *file_dev;
+ uint64_t blocksize = io_opts->blocksize;
+
+ dev = camdd_alloc_dev(CAMDD_DEV_FILE, NULL, 0, retry_count, timeout);
+ if (dev == NULL)
+ goto bailout;
+
+ file_dev = &dev->dev_spec.file;
+ file_dev->fd = fd;
+ strlcpy(file_dev->filename, io_opts->dev_name,
+ sizeof(file_dev->filename));
+ strlcpy(dev->device_name, io_opts->dev_name, sizeof(dev->device_name));
+ if (blocksize == 0)
+ dev->blocksize = CAMDD_FILE_DEFAULT_BLOCK;
+ else
+ dev->blocksize = blocksize;
+
+ if ((io_opts->queue_depth != 0)
+ && (io_opts->queue_depth != 1)) {
+ warnx("Queue depth %ju for %s ignored, only 1 outstanding "
+ "command supported", (uintmax_t)io_opts->queue_depth,
+ io_opts->dev_name);
+ }
+ dev->target_queue_depth = CAMDD_FILE_DEFAULT_DEPTH;
+ dev->run = camdd_file_run;
+ dev->fetch = NULL;
+
+ /*
+ * We can effectively access files on byte boundaries. We'll reset
+ * this for devices like disks that can be accessed on sector
+ * boundaries.
+ */
+ dev->sector_size = 1;
+
+ if ((fd != STDIN_FILENO)
+ && (fd != STDOUT_FILENO)) {
+ int retval;
+
+ retval = fstat(fd, &file_dev->sb);
+ if (retval != 0) {
+ warn("Cannot stat %s", dev->device_name);
+ goto bailout;
+ camdd_free_dev(dev);
+ dev = NULL;
+ }
+ if (S_ISREG(file_dev->sb.st_mode)) {
+ file_dev->file_type = CAMDD_FILE_REG;
+ } else if (S_ISCHR(file_dev->sb.st_mode)) {
+ int type;
+
+ if (ioctl(fd, FIODTYPE, &type) == -1)
+ err(1, "FIODTYPE ioctl failed on %s",
+ dev->device_name);
+ else {
+ if (type & D_TAPE)
+ file_dev->file_type = CAMDD_FILE_TAPE;
+ else if (type & D_DISK)
+ file_dev->file_type = CAMDD_FILE_DISK;
+ else if (type & D_MEM)
+ file_dev->file_type = CAMDD_FILE_MEM;
+ else if (type & D_TTY)
+ file_dev->file_type = CAMDD_FILE_TTY;
+ }
+ } else if (S_ISDIR(file_dev->sb.st_mode)) {
+ errx(1, "cannot operate on directory %s",
+ dev->device_name);
+ } else if (S_ISFIFO(file_dev->sb.st_mode)) {
+ file_dev->file_type = CAMDD_FILE_PIPE;
+ } else
+ errx(1, "Cannot determine file type for %s",
+ dev->device_name);
+
+ switch (file_dev->file_type) {
+ case CAMDD_FILE_REG:
+ if (file_dev->sb.st_size != 0)
+ dev->max_sector = file_dev->sb.st_size - 1;
+ else
+ dev->max_sector = 0;
+ file_dev->file_flags |= CAMDD_FF_CAN_SEEK;
+ break;
+ case CAMDD_FILE_TAPE: {
+ uint64_t max_iosize, max_blk, min_blk, blk_gran;
+ /*
+ * Check block limits and maximum effective iosize.
+ * Make sure the blocksize is within the block
+ * limits (and a multiple of the minimum blocksize)
+ * and that the blocksize is <= maximum effective
+ * iosize.
+ */
+ retval = camdd_probe_tape(fd, dev->device_name,
+ &max_iosize, &max_blk, &min_blk, &blk_gran);
+ if (retval != 0)
+ errx(1, "Unable to probe tape %s",
+ dev->device_name);
+
+ /*
+ * The blocksize needs to be <= the maximum
+ * effective I/O size of the tape device. Note
+ * that this also takes into account the maximum
+ * blocksize reported by READ BLOCK LIMITS.
+ */
+ if (dev->blocksize > max_iosize) {
+ warnx("Blocksize %u too big for %s, limiting "
+ "to %ju", dev->blocksize, dev->device_name,
+ max_iosize);
+ dev->blocksize = max_iosize;
+ }
+
+ /*
+ * The blocksize needs to be at least min_blk;
+ */
+ if (dev->blocksize < min_blk) {
+ warnx("Blocksize %u too small for %s, "
+ "increasing to %ju", dev->blocksize,
+ dev->device_name, min_blk);
+ dev->blocksize = min_blk;
+ }
+
+ /*
+ * And the blocksize needs to be a multiple of
+ * the block granularity.
+ */
+ if ((blk_gran != 0)
+ && (dev->blocksize % (1 << blk_gran))) {
+ warnx("Blocksize %u for %s not a multiple of "
+ "%d, adjusting to %d", dev->blocksize,
+ dev->device_name, (1 << blk_gran),
+ dev->blocksize & ~((1 << blk_gran) - 1));
+ dev->blocksize &= ~((1 << blk_gran) - 1);
+ }
+
+ if (dev->blocksize == 0) {
+ errx(1, "Unable to derive valid blocksize for "
+ "%s", dev->device_name);
+ }
+
+ /*
+ * For tape drives, set the sector size to the
+ * blocksize so that we make sure not to write
+ * less than the blocksize out to the drive.
+ */
+ dev->sector_size = dev->blocksize;
+ break;
+ }
+ case CAMDD_FILE_DISK: {
+ off_t media_size;
+ unsigned int sector_size;
+
+ file_dev->file_flags |= CAMDD_FF_CAN_SEEK;
+
+ if (ioctl(fd, DIOCGSECTORSIZE, &sector_size) == -1) {
+ err(1, "DIOCGSECTORSIZE ioctl failed on %s",
+ dev->device_name);
+ }
+
+ if (sector_size == 0) {
+ errx(1, "DIOCGSECTORSIZE ioctl returned "
+ "invalid sector size %u for %s",
+ sector_size, dev->device_name);
+ }
+
+ if (ioctl(fd, DIOCGMEDIASIZE, &media_size) == -1) {
+ err(1, "DIOCGMEDIASIZE ioctl failed on %s",
+ dev->device_name);
+ }
+
+ if (media_size == 0) {
+ errx(1, "DIOCGMEDIASIZE ioctl returned "
+ "invalid media size %ju for %s",
+ (uintmax_t)media_size, dev->device_name);
+ }
+
+ if (dev->blocksize % sector_size) {
+ errx(1, "%s blocksize %u not a multiple of "
+ "sector size %u", dev->device_name,
+ dev->blocksize, sector_size);
+ }
+
+ dev->sector_size = sector_size;
+ dev->max_sector = (media_size / sector_size) - 1;
+ break;
+ }
+ case CAMDD_FILE_MEM:
+ file_dev->file_flags |= CAMDD_FF_CAN_SEEK;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if ((io_opts->offset != 0)
+ && ((file_dev->file_flags & CAMDD_FF_CAN_SEEK) == 0)) {
+ warnx("Offset %ju specified for %s, but we cannot seek on %s",
+ io_opts->offset, io_opts->dev_name, io_opts->dev_name);
+ goto bailout_error;
+ }
+#if 0
+ else if ((io_opts->offset != 0)
+ && ((io_opts->offset % dev->sector_size) != 0)) {
+ warnx("Offset %ju for %s is not a multiple of the "
+ "sector size %u", io_opts->offset,
+ io_opts->dev_name, dev->sector_size);
+ goto bailout_error;
+ } else {
+ dev->start_offset_bytes = io_opts->offset;
+ }
+#endif
+
+bailout:
+ return (dev);
+
+bailout_error:
+ camdd_free_dev(dev);
+ return (NULL);
+}
+
+/*
+ * Need to implement this. Do a basic probe:
+ * - Check the inquiry data, make sure we're talking to a device that we
+ * can reasonably expect to talk to -- direct, RBC, CD, WORM.
+ * - Send a test unit ready, make sure the device is available.
+ * - Get the capacity and block size.
+ */
+struct camdd_dev *
+camdd_probe_pass(struct cam_device *cam_dev, struct camdd_io_opts *io_opts,
+ camdd_argmask arglist, int probe_retry_count,
+ int probe_timeout, int io_retry_count, int io_timeout)
+{
+ union ccb *ccb;
+ uint64_t maxsector;
+ uint32_t cpi_maxio, max_iosize, pass_numblocks;
+ uint32_t block_len;
+ struct scsi_read_capacity_data rcap;
+ struct scsi_read_capacity_data_long rcaplong;
+ struct camdd_dev *dev;
+ struct camdd_dev_pass *pass_dev;
+ struct kevent ke;
+ int scsi_dev_type;
+ int retval;
+
+ dev = NULL;
+
+ scsi_dev_type = SID_TYPE(&cam_dev->inq_data);
+ maxsector = 0;
+ block_len = 0;
+
+ /*
+ * For devices that support READ CAPACITY, we'll attempt to get the
+ * capacity. Otherwise, we really don't support tape or other
+ * devices via SCSI passthrough, so just return an error in that case.
+ */
+ switch (scsi_dev_type) {
+ case T_DIRECT:
+ case T_WORM:
+ case T_CDROM:
+ case T_OPTICAL:
+ case T_RBC:
+ break;
+ default:
+ errx(1, "Unsupported SCSI device type %d", scsi_dev_type);
+ break; /*NOTREACHED*/
+ }
+
+ ccb = cam_getccb(cam_dev);
+
+ if (ccb == NULL) {
+ warnx("%s: error allocating ccb", __func__);
+ goto bailout;
+ }
+
+ bzero(&(&ccb->ccb_h)[1],
+ sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr));
+
+ scsi_read_capacity(&ccb->csio,
+ /*retries*/ probe_retry_count,
+ /*cbfcnp*/ NULL,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ &rcap,
+ SSD_FULL_SIZE,
+ /*timeout*/ probe_timeout ? probe_timeout : 5000);
+
+ /* Disable freezing the device queue */
+ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS;
+
+ if (arglist & CAMDD_ARG_ERR_RECOVER)
+ ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER;
+
+ if (cam_send_ccb(cam_dev, ccb) < 0) {
+ warn("error sending READ CAPACITY command");
+
+ cam_error_print(cam_dev, ccb, CAM_ESF_ALL,
+ CAM_EPF_ALL, stderr);
+
+ goto bailout;
+ }
+
+ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr);
+ retval = 1;
+ goto bailout;
+ }
+
+ maxsector = scsi_4btoul(rcap.addr);
+ block_len = scsi_4btoul(rcap.length);
+
+ /*
+ * A last block of 2^32-1 means that the true capacity is over 2TB,
+ * and we need to issue the long READ CAPACITY to get the real
+ * capacity. Otherwise, we're all set.
+ */
+ if (maxsector != 0xffffffff)
+ goto rcap_done;
+
+ scsi_read_capacity_16(&ccb->csio,
+ /*retries*/ probe_retry_count,
+ /*cbfcnp*/ NULL,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*lba*/ 0,
+ /*reladdr*/ 0,
+ /*pmi*/ 0,
+ (uint8_t *)&rcaplong,
+ sizeof(rcaplong),
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ probe_timeout ? probe_timeout : 5000);
+
+ /* Disable freezing the device queue */
+ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS;
+
+ if (arglist & CAMDD_ARG_ERR_RECOVER)
+ ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER;
+
+ if (cam_send_ccb(cam_dev, ccb) < 0) {
+ warn("error sending READ CAPACITY (16) command");
+
+ cam_error_print(cam_dev, ccb, CAM_ESF_ALL,
+ CAM_EPF_ALL, stderr);
+
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr);
+ goto bailout;
+ }
+
+ maxsector = scsi_8btou64(rcaplong.addr);
+ block_len = scsi_4btoul(rcaplong.length);
+
+rcap_done:
+
+ bzero(&(&ccb->ccb_h)[1],
+ sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr));
+
+ ccb->ccb_h.func_code = XPT_PATH_INQ;
+ ccb->ccb_h.flags = CAM_DIR_NONE;
+ ccb->ccb_h.retry_count = 1;
+
+ if (cam_send_ccb(cam_dev, ccb) < 0) {
+ warn("error sending XPT_PATH_INQ CCB");
+
+ cam_error_print(cam_dev, ccb, CAM_ESF_ALL,
+ CAM_EPF_ALL, stderr);
+ goto bailout;
+ }
+
+ EV_SET(&ke, cam_dev->fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0);
+
+ dev = camdd_alloc_dev(CAMDD_DEV_PASS, &ke, 1, io_retry_count,
+ io_timeout);
+ if (dev == NULL)
+ goto bailout;
+
+ pass_dev = &dev->dev_spec.pass;
+ pass_dev->scsi_dev_type = scsi_dev_type;
+ pass_dev->dev = cam_dev;
+ pass_dev->max_sector = maxsector;
+ pass_dev->block_len = block_len;
+ pass_dev->cpi_maxio = ccb->cpi.maxio;
+ snprintf(dev->device_name, sizeof(dev->device_name), "%s%u",
+ pass_dev->dev->device_name, pass_dev->dev->dev_unit_num);
+ dev->sector_size = block_len;
+ dev->max_sector = maxsector;
+
+
+ /*
+ * Determine the optimal blocksize to use for this device.
+ */
+
+ /*
+ * If the controller has not specified a maximum I/O size,
+ * just go with 128K as a somewhat conservative value.
+ */
+ if (pass_dev->cpi_maxio == 0)
+ cpi_maxio = 131072;
+ else
+ cpi_maxio = pass_dev->cpi_maxio;
+
+ /*
+ * If the controller has a large maximum I/O size, limit it
+ * to something smaller so that the kernel doesn't have trouble
+ * allocating buffers to copy data in and out for us.
+ * XXX KDM this is until we have unmapped I/O support in the kernel.
+ */
+ max_iosize = min(cpi_maxio, CAMDD_PASS_MAX_BLOCK);
+
+ /*
+ * If we weren't able to get a block size for some reason,
+ * default to 512 bytes.
+ */
+ block_len = pass_dev->block_len;
+ if (block_len == 0)
+ block_len = 512;
+
+ /*
+ * Figure out how many blocksize chunks will fit in the
+ * maximum I/O size.
+ */
+ pass_numblocks = max_iosize / block_len;
+
+ /*
+ * And finally, multiple the number of blocks by the LBA
+ * length to get our maximum block size;
+ */
+ dev->blocksize = pass_numblocks * block_len;
+
+ if (io_opts->blocksize != 0) {
+ if ((io_opts->blocksize % dev->sector_size) != 0) {
+ warnx("Blocksize %ju for %s is not a multiple of "
+ "sector size %u", (uintmax_t)io_opts->blocksize,
+ dev->device_name, dev->sector_size);
+ goto bailout_error;
+ }
+ dev->blocksize = io_opts->blocksize;
+ }
+ dev->target_queue_depth = CAMDD_PASS_DEFAULT_DEPTH;
+ if (io_opts->queue_depth != 0)
+ dev->target_queue_depth = io_opts->queue_depth;
+
+ if (io_opts->offset != 0) {
+ if (io_opts->offset > (dev->max_sector * dev->sector_size)) {
+ warnx("Offset %ju is past the end of device %s",
+ io_opts->offset, dev->device_name);
+ goto bailout_error;
+ }
+#if 0
+ else if ((io_opts->offset % dev->sector_size) != 0) {
+ warnx("Offset %ju for %s is not a multiple of the "
+ "sector size %u", io_opts->offset,
+ dev->device_name, dev->sector_size);
+ goto bailout_error;
+ }
+ dev->start_offset_bytes = io_opts->offset;
+#endif
+ }
+
+ dev->min_cmd_size = io_opts->min_cmd_size;
+
+ dev->run = camdd_pass_run;
+ dev->fetch = camdd_pass_fetch;
+
+bailout:
+ cam_freeccb(ccb);
+
+ return (dev);
+
+bailout_error:
+ cam_freeccb(ccb);
+
+ camdd_free_dev(dev);
+
+ return (NULL);
+}
+
+void *
+camdd_worker(void *arg)
+{
+ struct camdd_dev *dev = arg;
+ struct camdd_buf *buf;
+ struct timespec ts, *kq_ts;
+
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+
+ pthread_mutex_lock(&dev->mutex);
+
+ dev->flags |= CAMDD_DEV_FLAG_ACTIVE;
+
+ for (;;) {
+ struct kevent ke;
+ int retval = 0;
+
+ /*
+ * XXX KDM check the reorder queue depth?
+ */
+ if (dev->write_dev == 0) {
+ uint32_t our_depth, peer_depth, peer_bytes, our_bytes;
+ uint32_t target_depth = dev->target_queue_depth;
+ uint32_t peer_target_depth =
+ dev->peer_dev->target_queue_depth;
+ uint32_t peer_blocksize = dev->peer_dev->blocksize;
+
+ camdd_get_depth(dev, &our_depth, &peer_depth,
+ &our_bytes, &peer_bytes);
+
+#if 0
+ while (((our_depth < target_depth)
+ && (peer_depth < peer_target_depth))
+ || ((peer_bytes + our_bytes) <
+ (peer_blocksize * 2))) {
+#endif
+ while (((our_depth + peer_depth) <
+ (target_depth + peer_target_depth))
+ || ((peer_bytes + our_bytes) <
+ (peer_blocksize * 3))) {
+
+ retval = camdd_queue(dev, NULL);
+ if (retval == 1)
+ break;
+ else if (retval != 0) {
+ error_exit = 1;
+ goto bailout;
+ }
+
+ camdd_get_depth(dev, &our_depth, &peer_depth,
+ &our_bytes, &peer_bytes);
+ }
+ }
+ /*
+ * See if we have any I/O that is ready to execute.
+ */
+ buf = STAILQ_FIRST(&dev->run_queue);
+ if (buf != NULL) {
+ while (dev->target_queue_depth > dev->cur_active_io) {
+ retval = dev->run(dev);
+ if (retval == -1) {
+ dev->flags |= CAMDD_DEV_FLAG_EOF;
+ error_exit = 1;
+ break;
+ } else if (retval != 0) {
+ break;
+ }
+ }
+ }
+
+ /*
+ * We've reached EOF, or our partner has reached EOF.
+ */
+ if ((dev->flags & CAMDD_DEV_FLAG_EOF)
+ || (dev->flags & CAMDD_DEV_FLAG_PEER_EOF)) {
+ if (dev->write_dev != 0) {
+ if ((STAILQ_EMPTY(&dev->work_queue))
+ && (dev->num_run_queue == 0)
+ && (dev->cur_active_io == 0)) {
+ goto bailout;
+ }
+ } else {
+ /*
+ * If we're the reader, and the writer
+ * got EOF, he is already done. If we got
+ * the EOF, then we need to wait until
+ * everything is flushed out for the writer.
+ */
+ if (dev->flags & CAMDD_DEV_FLAG_PEER_EOF) {
+ goto bailout;
+ } else if ((dev->num_peer_work_queue == 0)
+ && (dev->num_peer_done_queue == 0)
+ && (dev->cur_active_io == 0)
+ && (dev->num_run_queue == 0)) {
+ goto bailout;
+ }
+ }
+ /*
+ * XXX KDM need to do something about the pending
+ * queue and cleanup resources.
+ */
+ }
+
+ if ((dev->write_dev == 0)
+ && (dev->cur_active_io == 0)
+ && (dev->peer_bytes_queued < dev->peer_dev->blocksize))
+ kq_ts = &ts;
+ else
+ kq_ts = NULL;
+
+ /*
+ * Run kevent to see if there are events to process.
+ */
+ pthread_mutex_unlock(&dev->mutex);
+ retval = kevent(dev->kq, NULL, 0, &ke, 1, kq_ts);
+ pthread_mutex_lock(&dev->mutex);
+ if (retval == -1) {
+ warn("%s: error returned from kevent",__func__);
+ goto bailout;
+ } else if (retval != 0) {
+ switch (ke.filter) {
+ case EVFILT_READ:
+ if (dev->fetch != NULL) {
+ retval = dev->fetch(dev);
+ if (retval == -1) {
+ error_exit = 1;
+ goto bailout;
+ }
+ }
+ break;
+ case EVFILT_SIGNAL:
+ /*
+ * We register for this so we don't get
+ * an error as a result of a SIGINFO or a
+ * SIGINT. It will actually get handled
+ * by the signal handler. If we get a
+ * SIGINT, bail out without printing an
+ * error message. Any other signals
+ * will result in the error message above.
+ */
+ if (ke.ident == SIGINT)
+ goto bailout;
+ break;
+ case EVFILT_USER:
+ retval = 0;
+ /*
+ * Check to see if the other thread has
+ * queued any I/O for us to do. (In this
+ * case we're the writer.)
+ */
+ for (buf = STAILQ_FIRST(&dev->work_queue);
+ buf != NULL;
+ buf = STAILQ_FIRST(&dev->work_queue)) {
+ STAILQ_REMOVE_HEAD(&dev->work_queue,
+ work_links);
+ retval = camdd_queue(dev, buf);
+ /*
+ * We keep going unless we get an
+ * actual error. If we get EOF, we
+ * still want to remove the buffers
+ * from the queue and send the back
+ * to the reader thread.
+ */
+ if (retval == -1) {
+ error_exit = 1;
+ goto bailout;
+ } else
+ retval = 0;
+ }
+
+ /*
+ * Next check to see if the other thread has
+ * queued any completed buffers back to us.
+ * (In this case we're the reader.)
+ */
+ for (buf = STAILQ_FIRST(&dev->peer_done_queue);
+ buf != NULL;
+ buf = STAILQ_FIRST(&dev->peer_done_queue)){
+ STAILQ_REMOVE_HEAD(
+ &dev->peer_done_queue, work_links);
+ dev->num_peer_done_queue--;
+ camdd_peer_done(buf);
+ }
+ break;
+ default:
+ warnx("%s: unknown kevent filter %d",
+ __func__, ke.filter);
+ break;
+ }
+ }
+ }
+
+bailout:
+
+ dev->flags &= ~CAMDD_DEV_FLAG_ACTIVE;
+
+ /* XXX KDM cleanup resources here? */
+
+ pthread_mutex_unlock(&dev->mutex);
+
+ need_exit = 1;
+ sem_post(&camdd_sem);
+
+ return (NULL);
+}
+
+/*
+ * Simplistic translation of CCB status to our local status.
+ */
+camdd_buf_status
+camdd_ccb_status(union ccb *ccb)
+{
+ camdd_buf_status status = CAMDD_STATUS_NONE;
+ cam_status ccb_status;
+
+ ccb_status = ccb->ccb_h.status & CAM_STATUS_MASK;
+
+ switch (ccb_status) {
+ case CAM_REQ_CMP: {
+ if (ccb->csio.resid == 0) {
+ status = CAMDD_STATUS_OK;
+ } else if (ccb->csio.dxfer_len > ccb->csio.resid) {
+ status = CAMDD_STATUS_SHORT_IO;
+ } else {
+ status = CAMDD_STATUS_EOF;
+ }
+ break;
+ }
+ case CAM_SCSI_STATUS_ERROR: {
+ switch (ccb->csio.scsi_status) {
+ case SCSI_STATUS_OK:
+ case SCSI_STATUS_COND_MET:
+ case SCSI_STATUS_INTERMED:
+ case SCSI_STATUS_INTERMED_COND_MET:
+ status = CAMDD_STATUS_OK;
+ break;
+ case SCSI_STATUS_CMD_TERMINATED:
+ case SCSI_STATUS_CHECK_COND:
+ case SCSI_STATUS_QUEUE_FULL:
+ case SCSI_STATUS_BUSY:
+ case SCSI_STATUS_RESERV_CONFLICT:
+ default:
+ status = CAMDD_STATUS_ERROR;
+ break;
+ }
+ break;
+ }
+ default:
+ status = CAMDD_STATUS_ERROR;
+ break;
+ }
+
+ return (status);
+}
+
+/*
+ * Queue a buffer to our peer's work thread for writing.
+ *
+ * Returns 0 for success, -1 for failure, 1 if the other thread exited.
+ */
+int
+camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf)
+{
+ struct kevent ke;
+ STAILQ_HEAD(, camdd_buf) local_queue;
+ struct camdd_buf *buf1, *buf2;
+ struct camdd_buf_data *data = NULL;
+ uint64_t peer_bytes_queued = 0;
+ int active = 1;
+ int retval = 0;
+
+ STAILQ_INIT(&local_queue);
+
+ /*
+ * Since we're the reader, we need to queue our I/O to the writer
+ * in sequential order in order to make sure it gets written out
+ * in sequential order.
+ *
+ * Check the next expected I/O starting offset. If this doesn't
+ * match, put it on the reorder queue.
+ */
+ if ((buf->lba * dev->sector_size) != dev->next_completion_pos_bytes) {
+
+ /*
+ * If there is nothing on the queue, there is no sorting
+ * needed.
+ */
+ if (STAILQ_EMPTY(&dev->reorder_queue)) {
+ STAILQ_INSERT_TAIL(&dev->reorder_queue, buf, links);
+ dev->num_reorder_queue++;
+ goto bailout;
+ }
+
+ /*
+ * Sort in ascending order by starting LBA. There should
+ * be no identical LBAs.
+ */
+ for (buf1 = STAILQ_FIRST(&dev->reorder_queue); buf1 != NULL;
+ buf1 = buf2) {
+ buf2 = STAILQ_NEXT(buf1, links);
+ if (buf->lba < buf1->lba) {
+ /*
+ * If we're less than the first one, then
+ * we insert at the head of the list
+ * because this has to be the first element
+ * on the list.
+ */
+ STAILQ_INSERT_HEAD(&dev->reorder_queue,
+ buf, links);
+ dev->num_reorder_queue++;
+ break;
+ } else if (buf->lba > buf1->lba) {
+ if (buf2 == NULL) {
+ STAILQ_INSERT_TAIL(&dev->reorder_queue,
+ buf, links);
+ dev->num_reorder_queue++;
+ break;
+ } else if (buf->lba < buf2->lba) {
+ STAILQ_INSERT_AFTER(&dev->reorder_queue,
+ buf1, buf, links);
+ dev->num_reorder_queue++;
+ break;
+ }
+ } else {
+ errx(1, "Found buffers with duplicate LBA %ju!",
+ buf->lba);
+ }
+ }
+ goto bailout;
+ } else {
+
+ /*
+ * We're the next expected I/O completion, so put ourselves
+ * on the local queue to be sent to the writer. We use
+ * work_links here so that we can queue this to the
+ * peer_work_queue before taking the buffer off of the
+ * local_queue.
+ */
+ dev->next_completion_pos_bytes += buf->len;
+ STAILQ_INSERT_TAIL(&local_queue, buf, work_links);
+
+ /*
+ * Go through the reorder queue looking for more sequential
+ * I/O and add it to the local queue.
+ */
+ for (buf1 = STAILQ_FIRST(&dev->reorder_queue); buf1 != NULL;
+ buf1 = STAILQ_FIRST(&dev->reorder_queue)) {
+ /*
+ * As soon as we see an I/O that is out of sequence,
+ * we're done.
+ */
+ if ((buf1->lba * dev->sector_size) !=
+ dev->next_completion_pos_bytes)
+ break;
+
+ STAILQ_REMOVE_HEAD(&dev->reorder_queue, links);
+ dev->num_reorder_queue--;
+ STAILQ_INSERT_TAIL(&local_queue, buf1, work_links);
+ dev->next_completion_pos_bytes += buf1->len;
+ }
+ }
+
+ /*
+ * Setup the event to let the other thread know that it has work
+ * pending.
+ */
+ EV_SET(&ke, (uintptr_t)&dev->peer_dev->work_queue, EVFILT_USER, 0,
+ NOTE_TRIGGER, 0, NULL);
+
+ /*
+ * Put this on our shadow queue so that we know what we've queued
+ * to the other thread.
+ */
+ STAILQ_FOREACH_SAFE(buf1, &local_queue, work_links, buf2) {
+ if (buf1->buf_type != CAMDD_BUF_DATA) {
+ errx(1, "%s: should have a data buffer, not an "
+ "indirect buffer", __func__);
+ }
+ data = &buf1->buf_type_spec.data;
+
+ /*
+ * We only need to send one EOF to the writer, and don't
+ * need to continue sending EOFs after that.
+ */
+ if (buf1->status == CAMDD_STATUS_EOF) {
+ if (dev->flags & CAMDD_DEV_FLAG_EOF_SENT) {
+ STAILQ_REMOVE(&local_queue, buf1, camdd_buf,
+ work_links);
+ camdd_release_buf(buf1);
+ retval = 1;
+ continue;
+ }
+ dev->flags |= CAMDD_DEV_FLAG_EOF_SENT;
+ }
+
+
+ STAILQ_INSERT_TAIL(&dev->peer_work_queue, buf1, links);
+ peer_bytes_queued += (data->fill_len - data->resid);
+ dev->peer_bytes_queued += (data->fill_len - data->resid);
+ dev->num_peer_work_queue++;
+ }
+
+ if (STAILQ_FIRST(&local_queue) == NULL)
+ goto bailout;
+
+ /*
+ * Drop our mutex and pick up the other thread's mutex. We need to
+ * do this to avoid deadlocks.
+ */
+ pthread_mutex_unlock(&dev->mutex);
+ pthread_mutex_lock(&dev->peer_dev->mutex);
+
+ if (dev->peer_dev->flags & CAMDD_DEV_FLAG_ACTIVE) {
+ /*
+ * Put the buffers on the other thread's incoming work queue.
+ */
+ for (buf1 = STAILQ_FIRST(&local_queue); buf1 != NULL;
+ buf1 = STAILQ_FIRST(&local_queue)) {
+ STAILQ_REMOVE_HEAD(&local_queue, work_links);
+ STAILQ_INSERT_TAIL(&dev->peer_dev->work_queue, buf1,
+ work_links);
+ }
+ /*
+ * Send an event to the other thread's kqueue to let it know
+ * that there is something on the work queue.
+ */
+ retval = kevent(dev->peer_dev->kq, &ke, 1, NULL, 0, NULL);
+ if (retval == -1)
+ warn("%s: unable to add peer work_queue kevent",
+ __func__);
+ else
+ retval = 0;
+ } else
+ active = 0;
+
+ pthread_mutex_unlock(&dev->peer_dev->mutex);
+ pthread_mutex_lock(&dev->mutex);
+
+ /*
+ * If the other side isn't active, run through the queue and
+ * release all of the buffers.
+ */
+ if (active == 0) {
+ for (buf1 = STAILQ_FIRST(&local_queue); buf1 != NULL;
+ buf1 = STAILQ_FIRST(&local_queue)) {
+ STAILQ_REMOVE_HEAD(&local_queue, work_links);
+ STAILQ_REMOVE(&dev->peer_work_queue, buf1, camdd_buf,
+ links);
+ dev->num_peer_work_queue--;
+ camdd_release_buf(buf1);
+ }
+ dev->peer_bytes_queued -= peer_bytes_queued;
+ retval = 1;
+ }
+
+bailout:
+ return (retval);
+}
+
+/*
+ * Return a buffer to the reader thread when we have completed writing it.
+ */
+int
+camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf)
+{
+ struct kevent ke;
+ int retval = 0;
+
+ /*
+ * Setup the event to let the other thread know that we have
+ * completed a buffer.
+ */
+ EV_SET(&ke, (uintptr_t)&dev->peer_dev->peer_done_queue, EVFILT_USER, 0,
+ NOTE_TRIGGER, 0, NULL);
+
+ /*
+ * Drop our lock and acquire the other thread's lock before
+ * manipulating
+ */
+ pthread_mutex_unlock(&dev->mutex);
+ pthread_mutex_lock(&dev->peer_dev->mutex);
+
+ /*
+ * Put the buffer on the reader thread's peer done queue now that
+ * we have completed it.
+ */
+ STAILQ_INSERT_TAIL(&dev->peer_dev->peer_done_queue, peer_buf,
+ work_links);
+ dev->peer_dev->num_peer_done_queue++;
+
+ /*
+ * Send an event to the peer thread to let it know that we've added
+ * something to its peer done queue.
+ */
+ retval = kevent(dev->peer_dev->kq, &ke, 1, NULL, 0, NULL);
+ if (retval == -1)
+ warn("%s: unable to add peer_done_queue kevent", __func__);
+ else
+ retval = 0;
+
+ /*
+ * Drop the other thread's lock and reacquire ours.
+ */
+ pthread_mutex_unlock(&dev->peer_dev->mutex);
+ pthread_mutex_lock(&dev->mutex);
+
+ return (retval);
+}
+
+/*
+ * Free a buffer that was written out by the writer thread and returned to
+ * the reader thread.
+ */
+void
+camdd_peer_done(struct camdd_buf *buf)
+{
+ struct camdd_dev *dev;
+ struct camdd_buf_data *data;
+
+ dev = buf->dev;
+ if (buf->buf_type != CAMDD_BUF_DATA) {
+ errx(1, "%s: should have a data buffer, not an "
+ "indirect buffer", __func__);
+ }
+
+ data = &buf->buf_type_spec.data;
+
+ STAILQ_REMOVE(&dev->peer_work_queue, buf, camdd_buf, links);
+ dev->num_peer_work_queue--;
+ dev->peer_bytes_queued -= (data->fill_len - data->resid);
+
+ if (buf->status == CAMDD_STATUS_EOF)
+ dev->flags |= CAMDD_DEV_FLAG_PEER_EOF;
+
+ STAILQ_INSERT_TAIL(&dev->free_queue, buf, links);
+}
+
+/*
+ * Assumes caller holds the lock for this device.
+ */
+void
+camdd_complete_buf(struct camdd_dev *dev, struct camdd_buf *buf,
+ int *error_count)
+{
+ int retval = 0;
+
+ /*
+ * If we're the reader, we need to send the completed I/O
+ * to the writer. If we're the writer, we need to just
+ * free up resources, or let the reader know if we've
+ * encountered an error.
+ */
+ if (dev->write_dev == 0) {
+ retval = camdd_queue_peer_buf(dev, buf);
+ if (retval != 0)
+ (*error_count)++;
+ } else {
+ struct camdd_buf *tmp_buf, *next_buf;
+
+ STAILQ_FOREACH_SAFE(tmp_buf, &buf->src_list, src_links,
+ next_buf) {
+ struct camdd_buf *src_buf;
+ struct camdd_buf_indirect *indirect;
+
+ STAILQ_REMOVE(&buf->src_list, tmp_buf,
+ camdd_buf, src_links);
+
+ tmp_buf->status = buf->status;
+
+ if (tmp_buf->buf_type == CAMDD_BUF_DATA) {
+ camdd_complete_peer_buf(dev, tmp_buf);
+ continue;
+ }
+
+ indirect = &tmp_buf->buf_type_spec.indirect;
+ src_buf = indirect->src_buf;
+ src_buf->refcount--;
+ /*
+ * XXX KDM we probably need to account for
+ * exactly how many bytes we were able to
+ * write. Allocate the residual to the
+ * first N buffers? Or just track the
+ * number of bytes written? Right now the reader
+ * doesn't do anything with a residual.
+ */
+ src_buf->status = buf->status;
+ if (src_buf->refcount <= 0)
+ camdd_complete_peer_buf(dev, src_buf);
+ STAILQ_INSERT_TAIL(&dev->free_indirect_queue,
+ tmp_buf, links);
+ }
+
+ STAILQ_INSERT_TAIL(&dev->free_queue, buf, links);
+ }
+}
+
+/*
+ * Fetch all completed commands from the pass(4) device.
+ *
+ * Returns the number of commands received, or -1 if any of the commands
+ * completed with an error. Returns 0 if no commands are available.
+ */
+int
+camdd_pass_fetch(struct camdd_dev *dev)
+{
+ struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass;
+ union ccb ccb;
+ int retval = 0, num_fetched = 0, error_count = 0;
+
+ pthread_mutex_unlock(&dev->mutex);
+ /*
+ * XXX KDM we don't distinguish between EFAULT and ENOENT.
+ */
+ while ((retval = ioctl(pass_dev->dev->fd, CAMIOGET, &ccb)) != -1) {
+ struct camdd_buf *buf;
+ struct camdd_buf_data *data;
+ cam_status ccb_status;
+ union ccb *buf_ccb;
+
+ buf = ccb.ccb_h.ccb_buf;
+ data = &buf->buf_type_spec.data;
+ buf_ccb = &data->ccb;
+
+ num_fetched++;
+
+ /*
+ * Copy the CCB back out so we get status, sense data, etc.
+ */
+ bcopy(&ccb, buf_ccb, sizeof(ccb));
+
+ pthread_mutex_lock(&dev->mutex);
+
+ /*
+ * We're now done, so take this off the active queue.
+ */
+ STAILQ_REMOVE(&dev->active_queue, buf, camdd_buf, links);
+ dev->cur_active_io--;
+
+ ccb_status = ccb.ccb_h.status & CAM_STATUS_MASK;
+ if (ccb_status != CAM_REQ_CMP) {
+ cam_error_print(pass_dev->dev, &ccb, CAM_ESF_ALL,
+ CAM_EPF_ALL, stderr);
+ }
+
+ data->resid = ccb.csio.resid;
+ dev->bytes_transferred += (ccb.csio.dxfer_len - ccb.csio.resid);
+
+ if (buf->status == CAMDD_STATUS_NONE)
+ buf->status = camdd_ccb_status(&ccb);
+ if (buf->status == CAMDD_STATUS_ERROR)
+ error_count++;
+ else if (buf->status == CAMDD_STATUS_EOF) {
+ /*
+ * Once we queue this buffer to our partner thread,
+ * he will know that we've hit EOF.
+ */
+ dev->flags |= CAMDD_DEV_FLAG_EOF;
+ }
+
+ camdd_complete_buf(dev, buf, &error_count);
+
+ /*
+ * Unlock in preparation for the ioctl call.
+ */
+ pthread_mutex_unlock(&dev->mutex);
+ }
+
+ pthread_mutex_lock(&dev->mutex);
+
+ if (error_count > 0)
+ return (-1);
+ else
+ return (num_fetched);
+}
+
+/*
+ * Returns -1 for error, 0 for success/continue, and 1 for resource
+ * shortage/stop processing.
+ */
+int
+camdd_file_run(struct camdd_dev *dev)
+{
+ struct camdd_dev_file *file_dev = &dev->dev_spec.file;
+ struct camdd_buf_data *data;
+ struct camdd_buf *buf;
+ off_t io_offset;
+ int retval = 0, write_dev = dev->write_dev;
+ int error_count = 0, no_resources = 0, double_buf_needed = 0;
+ uint32_t num_sectors = 0, db_len = 0;
+
+ buf = STAILQ_FIRST(&dev->run_queue);
+ if (buf == NULL) {
+ no_resources = 1;
+ goto bailout;
+ } else if ((dev->write_dev == 0)
+ && (dev->flags & (CAMDD_DEV_FLAG_EOF |
+ CAMDD_DEV_FLAG_EOF_SENT))) {
+ STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links);
+ dev->num_run_queue--;
+ buf->status = CAMDD_STATUS_EOF;
+ error_count++;
+ goto bailout;
+ }
+
+ /*
+ * If we're writing, we need to go through the source buffer list
+ * and create an S/G list.
+ */
+ if (write_dev != 0) {
+ retval = camdd_buf_sg_create(buf, /*iovec*/ 1,
+ dev->sector_size, &num_sectors, &double_buf_needed);
+ if (retval != 0) {
+ no_resources = 1;
+ goto bailout;
+ }
+ }
+
+ STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links);
+ dev->num_run_queue--;
+
+ data = &buf->buf_type_spec.data;
+
+ /*
+ * pread(2) and pwrite(2) offsets are byte offsets.
+ */
+ io_offset = buf->lba * dev->sector_size;
+
+ /*
+ * Unlock the mutex while we read or write.
+ */
+ pthread_mutex_unlock(&dev->mutex);
+
+ /*
+ * Note that we don't need to double buffer if we're the reader
+ * because in that case, we have allocated a single buffer of
+ * sufficient size to do the read. This copy is necessary on
+ * writes because if one of the components of the S/G list is not
+ * a sector size multiple, the kernel will reject the write. This
+ * is unfortunate but not surprising. So this will make sure that
+ * we're using a single buffer that is a multiple of the sector size.
+ */
+ if ((double_buf_needed != 0)
+ && (data->sg_count > 1)
+ && (write_dev != 0)) {
+ uint32_t cur_offset;
+ int i;
+
+ if (file_dev->tmp_buf == NULL)
+ file_dev->tmp_buf = calloc(dev->blocksize, 1);
+ if (file_dev->tmp_buf == NULL) {
+ buf->status = CAMDD_STATUS_ERROR;
+ error_count++;
+ goto bailout;
+ }
+ for (i = 0, cur_offset = 0; i < data->sg_count; i++) {
+ bcopy(data->iovec[i].iov_base,
+ &file_dev->tmp_buf[cur_offset],
+ data->iovec[i].iov_len);
+ cur_offset += data->iovec[i].iov_len;
+ }
+ db_len = cur_offset;
+ }
+
+ if (file_dev->file_flags & CAMDD_FF_CAN_SEEK) {
+ if (write_dev == 0) {
+ /*
+ * XXX KDM is there any way we would need a S/G
+ * list here?
+ */
+ retval = pread(file_dev->fd, data->buf,
+ buf->len, io_offset);
+ } else {
+ if (double_buf_needed != 0) {
+ retval = pwrite(file_dev->fd, file_dev->tmp_buf,
+ db_len, io_offset);
+ } else if (data->sg_count == 0) {
+ retval = pwrite(file_dev->fd, data->buf,
+ data->fill_len, io_offset);
+ } else {
+ retval = pwritev(file_dev->fd, data->iovec,
+ data->sg_count, io_offset);
+ }
+ }
+ } else {
+ if (write_dev == 0) {
+ /*
+ * XXX KDM is there any way we would need a S/G
+ * list here?
+ */
+ retval = read(file_dev->fd, data->buf, buf->len);
+ } else {
+ if (double_buf_needed != 0) {
+ retval = write(file_dev->fd, file_dev->tmp_buf,
+ db_len);
+ } else if (data->sg_count == 0) {
+ retval = write(file_dev->fd, data->buf,
+ data->fill_len);
+ } else {
+ retval = writev(file_dev->fd, data->iovec,
+ data->sg_count);
+ }
+ }
+ }
+
+ /* We're done, re-acquire the lock */
+ pthread_mutex_lock(&dev->mutex);
+
+ if (retval >= (ssize_t)data->fill_len) {
+ /*
+ * If the bytes transferred is more than the request size,
+ * that indicates an overrun, which should only happen at
+ * the end of a transfer if we have to round up to a sector
+ * boundary.
+ */
+ if (buf->status == CAMDD_STATUS_NONE)
+ buf->status = CAMDD_STATUS_OK;
+ data->resid = 0;
+ dev->bytes_transferred += retval;
+ } else if (retval == -1) {
+ warn("Error %s %s", (write_dev) ? "writing to" :
+ "reading from", file_dev->filename);
+
+ buf->status = CAMDD_STATUS_ERROR;
+ data->resid = data->fill_len;
+ error_count++;
+
+ if (dev->debug == 0)
+ goto bailout;
+
+ if ((double_buf_needed != 0)
+ && (write_dev != 0)) {
+ fprintf(stderr, "%s: fd %d, DB buf %p, len %u lba %ju "
+ "offset %ju\n", __func__, file_dev->fd,
+ file_dev->tmp_buf, db_len, (uintmax_t)buf->lba,
+ (uintmax_t)io_offset);
+ } else if (data->sg_count == 0) {
+ fprintf(stderr, "%s: fd %d, buf %p, len %u, lba %ju "
+ "offset %ju\n", __func__, file_dev->fd, data->buf,
+ data->fill_len, (uintmax_t)buf->lba,
+ (uintmax_t)io_offset);
+ } else {
+ int i;
+
+ fprintf(stderr, "%s: fd %d, len %u, lba %ju "
+ "offset %ju\n", __func__, file_dev->fd,
+ data->fill_len, (uintmax_t)buf->lba,
+ (uintmax_t)io_offset);
+
+ for (i = 0; i < data->sg_count; i++) {
+ fprintf(stderr, "index %d ptr %p len %zu\n",
+ i, data->iovec[i].iov_base,
+ data->iovec[i].iov_len);
+ }
+ }
+ } else if (retval == 0) {
+ buf->status = CAMDD_STATUS_EOF;
+ if (dev->debug != 0)
+ printf("%s: got EOF from %s!\n", __func__,
+ file_dev->filename);
+ data->resid = data->fill_len;
+ error_count++;
+ } else if (retval < (ssize_t)data->fill_len) {
+ if (buf->status == CAMDD_STATUS_NONE)
+ buf->status = CAMDD_STATUS_SHORT_IO;
+ data->resid = data->fill_len - retval;
+ dev->bytes_transferred += retval;
+ }
+
+bailout:
+ if (buf != NULL) {
+ if (buf->status == CAMDD_STATUS_EOF) {
+ struct camdd_buf *buf2;
+ dev->flags |= CAMDD_DEV_FLAG_EOF;
+ STAILQ_FOREACH(buf2, &dev->run_queue, links)
+ buf2->status = CAMDD_STATUS_EOF;
+ }
+
+ camdd_complete_buf(dev, buf, &error_count);
+ }
+
+ if (error_count != 0)
+ return (-1);
+ else if (no_resources != 0)
+ return (1);
+ else
+ return (0);
+}
+
+/*
+ * Execute one command from the run queue. Returns 0 for success, 1 for
+ * stop processing, and -1 for error.
+ */
+int
+camdd_pass_run(struct camdd_dev *dev)
+{
+ struct camdd_buf *buf = NULL;
+ struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass;
+ struct camdd_buf_data *data;
+ uint32_t num_blocks, sectors_used = 0;
+ union ccb *ccb;
+ int retval = 0, is_write = dev->write_dev;
+ int double_buf_needed = 0;
+
+ buf = STAILQ_FIRST(&dev->run_queue);
+ if (buf == NULL) {
+ retval = 1;
+ goto bailout;
+ }
+
+ /*
+ * If we're writing, we need to go through the source buffer list
+ * and create an S/G list.
+ */
+ if (is_write != 0) {
+ retval = camdd_buf_sg_create(buf, /*iovec*/ 0,dev->sector_size,
+ &sectors_used, &double_buf_needed);
+ if (retval != 0) {
+ retval = -1;
+ goto bailout;
+ }
+ }
+
+ STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links);
+ dev->num_run_queue--;
+
+ data = &buf->buf_type_spec.data;
+
+ ccb = &data->ccb;
+ bzero(&(&ccb->ccb_h)[1],
+ sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr));
+
+ /*
+ * In almost every case the number of blocks should be the device
+ * block size. The exception may be at the end of an I/O stream
+ * for a partial block or at the end of a device.
+ */
+ if (is_write != 0)
+ num_blocks = sectors_used;
+ else
+ num_blocks = data->fill_len / pass_dev->block_len;
+
+ scsi_read_write(&ccb->csio,
+ /*retries*/ dev->retry_count,
+ /*cbfcnp*/ NULL,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*readop*/ (dev->write_dev == 0) ? SCSI_RW_READ :
+ SCSI_RW_WRITE,
+ /*byte2*/ 0,
+ /*minimum_cmd_size*/ dev->min_cmd_size,
+ /*lba*/ buf->lba,
+ /*block_count*/ num_blocks,
+ /*data_ptr*/ (data->sg_count != 0) ?
+ (uint8_t *)data->segs : data->buf,
+ /*dxfer_len*/ (num_blocks * pass_dev->block_len),
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ dev->io_timeout);
+
+ /* Disable freezing the device queue */
+ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS;
+
+ if (dev->retry_count != 0)
+ ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER;
+
+ if (data->sg_count != 0) {
+ ccb->csio.sglist_cnt = data->sg_count;
+ ccb->ccb_h.flags |= CAM_DATA_SG;
+ }
+
+ /*
+ * Store a pointer to the buffer in the CCB. The kernel will
+ * restore this when we get it back, and we'll use it to identify
+ * the buffer this CCB came from.
+ */
+ ccb->ccb_h.ccb_buf = buf;
+
+ /*
+ * Unlock our mutex in preparation for issuing the ioctl.
+ */
+ pthread_mutex_unlock(&dev->mutex);
+ /*
+ * Queue the CCB to the pass(4) driver.
+ */
+ if (ioctl(pass_dev->dev->fd, CAMIOQUEUE, ccb) == -1) {
+ pthread_mutex_lock(&dev->mutex);
+
+ warn("%s: error sending CAMIOQUEUE ioctl to %s%u", __func__,
+ pass_dev->dev->device_name, pass_dev->dev->dev_unit_num);
+ warn("%s: CCB address is %p", __func__, ccb);
+ retval = -1;
+
+ STAILQ_INSERT_TAIL(&dev->free_queue, buf, links);
+ } else {
+ pthread_mutex_lock(&dev->mutex);
+
+ dev->cur_active_io++;
+ STAILQ_INSERT_TAIL(&dev->active_queue, buf, links);
+ }
+
+bailout:
+ return (retval);
+}
+
+int
+camdd_get_next_lba_len(struct camdd_dev *dev, uint64_t *lba, ssize_t *len)
+{
+ struct camdd_dev_pass *pass_dev;
+ uint32_t num_blocks;
+ int retval = 0;
+
+ pass_dev = &dev->dev_spec.pass;
+
+ *lba = dev->next_io_pos_bytes / dev->sector_size;
+ *len = dev->blocksize;
+ num_blocks = *len / dev->sector_size;
+
+ /*
+ * If max_sector is 0, then we have no set limit. This can happen
+ * if we're writing to a file in a filesystem, or reading from
+ * something like /dev/zero.
+ */
+ if ((dev->max_sector != 0)
+ || (dev->sector_io_limit != 0)) {
+ uint64_t max_sector;
+
+ if ((dev->max_sector != 0)
+ && (dev->sector_io_limit != 0))
+ max_sector = min(dev->sector_io_limit, dev->max_sector);
+ else if (dev->max_sector != 0)
+ max_sector = dev->max_sector;
+ else
+ max_sector = dev->sector_io_limit;
+
+
+ /*
+ * Check to see whether we're starting off past the end of
+ * the device. If so, we need to just send an EOF
+ * notification to the writer.
+ */
+ if (*lba > max_sector) {
+ *len = 0;
+ retval = 1;
+ } else if (((*lba + num_blocks) > max_sector + 1)
+ || ((*lba + num_blocks) < *lba)) {
+ /*
+ * If we get here (but pass the first check), we
+ * can trim the request length down to go to the
+ * end of the device.
+ */
+ num_blocks = (max_sector + 1) - *lba;
+ *len = num_blocks * dev->sector_size;
+ retval = 1;
+ }
+ }
+
+ dev->next_io_pos_bytes += *len;
+
+ return (retval);
+}
+
+/*
+ * Returns 0 for success, 1 for EOF detected, and -1 for failure.
+ */
+int
+camdd_queue(struct camdd_dev *dev, struct camdd_buf *read_buf)
+{
+ struct camdd_buf *buf = NULL;
+ struct camdd_buf_data *data;
+ struct camdd_dev_pass *pass_dev;
+ size_t new_len;
+ struct camdd_buf_data *rb_data;
+ int is_write = dev->write_dev;
+ int eof_flush_needed = 0;
+ int retval = 0;
+ int error;
+
+ pass_dev = &dev->dev_spec.pass;
+
+ /*
+ * If we've gotten EOF or our partner has, we should not continue
+ * queueing I/O. If we're a writer, though, we should continue
+ * to write any buffers that don't have EOF status.
+ */
+ if ((dev->flags & CAMDD_DEV_FLAG_EOF)
+ || ((dev->flags & CAMDD_DEV_FLAG_PEER_EOF)
+ && (is_write == 0))) {
+ /*
+ * Tell the worker thread that we have seen EOF.
+ */
+ retval = 1;
+
+ /*
+ * If we're the writer, send the buffer back with EOF status.
+ */
+ if (is_write) {
+ read_buf->status = CAMDD_STATUS_EOF;
+
+ error = camdd_complete_peer_buf(dev, read_buf);
+ }
+ goto bailout;
+ }
+
+ if (is_write == 0) {
+ buf = camdd_get_buf(dev, CAMDD_BUF_DATA);
+ if (buf == NULL) {
+ retval = -1;
+ goto bailout;
+ }
+ data = &buf->buf_type_spec.data;
+
+ retval = camdd_get_next_lba_len(dev, &buf->lba, &buf->len);
+ if (retval != 0) {
+ buf->status = CAMDD_STATUS_EOF;
+
+ if ((buf->len == 0)
+ && ((dev->flags & (CAMDD_DEV_FLAG_EOF_SENT |
+ CAMDD_DEV_FLAG_EOF_QUEUED)) != 0)) {
+ camdd_release_buf(buf);
+ goto bailout;
+ }
+ dev->flags |= CAMDD_DEV_FLAG_EOF_QUEUED;
+ }
+
+ data->fill_len = buf->len;
+ data->src_start_offset = buf->lba * dev->sector_size;
+
+ /*
+ * Put this on the run queue.
+ */
+ STAILQ_INSERT_TAIL(&dev->run_queue, buf, links);
+ dev->num_run_queue++;
+
+ /* We're done. */
+ goto bailout;
+ }
+
+ /*
+ * Check for new EOF status from the reader.
+ */
+ if ((read_buf->status == CAMDD_STATUS_EOF)
+ || (read_buf->status == CAMDD_STATUS_ERROR)) {
+ dev->flags |= CAMDD_DEV_FLAG_PEER_EOF;
+ if ((STAILQ_FIRST(&dev->pending_queue) == NULL)
+ && (read_buf->len == 0)) {
+ camdd_complete_peer_buf(dev, read_buf);
+ retval = 1;
+ goto bailout;
+ } else
+ eof_flush_needed = 1;
+ }
+
+ /*
+ * See if we have a buffer we're composing with pieces from our
+ * partner thread.
+ */
+ buf = STAILQ_FIRST(&dev->pending_queue);
+ if (buf == NULL) {
+ uint64_t lba;
+ ssize_t len;
+
+ retval = camdd_get_next_lba_len(dev, &lba, &len);
+ if (retval != 0) {
+ read_buf->status = CAMDD_STATUS_EOF;
+
+ if (len == 0) {
+ dev->flags |= CAMDD_DEV_FLAG_EOF;
+ error = camdd_complete_peer_buf(dev, read_buf);
+ goto bailout;
+ }
+ }
+
+ /*
+ * If we don't have a pending buffer, we need to grab a new
+ * one from the free list or allocate another one.
+ */
+ buf = camdd_get_buf(dev, CAMDD_BUF_DATA);
+ if (buf == NULL) {
+ retval = 1;
+ goto bailout;
+ }
+
+ buf->lba = lba;
+ buf->len = len;
+
+ STAILQ_INSERT_TAIL(&dev->pending_queue, buf, links);
+ dev->num_pending_queue++;
+ }
+
+ data = &buf->buf_type_spec.data;
+
+ rb_data = &read_buf->buf_type_spec.data;
+
+ if ((rb_data->src_start_offset != dev->next_peer_pos_bytes)
+ && (dev->debug != 0)) {
+ printf("%s: WARNING: reader offset %#jx != expected offset "
+ "%#jx\n", __func__, (uintmax_t)rb_data->src_start_offset,
+ (uintmax_t)dev->next_peer_pos_bytes);
+ }
+ dev->next_peer_pos_bytes = rb_data->src_start_offset +
+ (rb_data->fill_len - rb_data->resid);
+
+ new_len = (rb_data->fill_len - rb_data->resid) + data->fill_len;
+ if (new_len < buf->len) {
+ /*
+ * There are three cases here:
+ * 1. We need more data to fill up a block, so we put
+ * this I/O on the queue and wait for more I/O.
+ * 2. We have a pending buffer in the queue that is
+ * smaller than our blocksize, but we got an EOF. So we
+ * need to go ahead and flush the write out.
+ * 3. We got an error.
+ */
+
+ /*
+ * Increment our fill length.
+ */
+ data->fill_len += (rb_data->fill_len - rb_data->resid);
+
+ /*
+ * Add the new read buffer to the list for writing.
+ */
+ STAILQ_INSERT_TAIL(&buf->src_list, read_buf, src_links);
+
+ /* Increment the count */
+ buf->src_count++;
+
+ if (eof_flush_needed == 0) {
+ /*
+ * We need to exit, because we don't have enough
+ * data yet.
+ */
+ goto bailout;
+ } else {
+ /*
+ * Take the buffer off of the pending queue.
+ */
+ STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf,
+ links);
+ dev->num_pending_queue--;
+
+ /*
+ * If we need an EOF flush, but there is no data
+ * to flush, go ahead and return this buffer.
+ */
+ if (data->fill_len == 0) {
+ camdd_complete_buf(dev, buf, /*error_count*/0);
+ retval = 1;
+ goto bailout;
+ }
+
+ /*
+ * Put this on the next queue for execution.
+ */
+ STAILQ_INSERT_TAIL(&dev->run_queue, buf, links);
+ dev->num_run_queue++;
+ }
+ } else if (new_len == buf->len) {
+ /*
+ * We have enough data to completey fill one block,
+ * so we're ready to issue the I/O.
+ */
+
+ /*
+ * Take the buffer off of the pending queue.
+ */
+ STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, links);
+ dev->num_pending_queue--;
+
+ /*
+ * Add the new read buffer to the list for writing.
+ */
+ STAILQ_INSERT_TAIL(&buf->src_list, read_buf, src_links);
+
+ /* Increment the count */
+ buf->src_count++;
+
+ /*
+ * Increment our fill length.
+ */
+ data->fill_len += (rb_data->fill_len - rb_data->resid);
+
+ /*
+ * Put this on the next queue for execution.
+ */
+ STAILQ_INSERT_TAIL(&dev->run_queue, buf, links);
+ dev->num_run_queue++;
+ } else {
+ struct camdd_buf *idb;
+ struct camdd_buf_indirect *indirect;
+ uint32_t len_to_go, cur_offset;
+
+
+ idb = camdd_get_buf(dev, CAMDD_BUF_INDIRECT);
+ if (idb == NULL) {
+ retval = 1;
+ goto bailout;
+ }
+ indirect = &idb->buf_type_spec.indirect;
+ indirect->src_buf = read_buf;
+ read_buf->refcount++;
+ indirect->offset = 0;
+ indirect->start_ptr = rb_data->buf;
+ /*
+ * We've already established that there is more
+ * data in read_buf than we have room for in our
+ * current write request. So this particular chunk
+ * of the request should just be the remainder
+ * needed to fill up a block.
+ */
+ indirect->len = buf->len - (data->fill_len - data->resid);
+
+ camdd_buf_add_child(buf, idb);
+
+ /*
+ * This buffer is ready to execute, so we can take
+ * it off the pending queue and put it on the run
+ * queue.
+ */
+ STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf,
+ links);
+ dev->num_pending_queue--;
+ STAILQ_INSERT_TAIL(&dev->run_queue, buf, links);
+ dev->num_run_queue++;
+
+ cur_offset = indirect->offset + indirect->len;
+
+ /*
+ * The resulting I/O would be too large to fit in
+ * one block. We need to split this I/O into
+ * multiple pieces. Allocate as many buffers as needed.
+ */
+ for (len_to_go = rb_data->fill_len - rb_data->resid -
+ indirect->len; len_to_go > 0;) {
+ struct camdd_buf *new_buf;
+ struct camdd_buf_data *new_data;
+ uint64_t lba;
+ ssize_t len;
+
+ retval = camdd_get_next_lba_len(dev, &lba, &len);
+ if ((retval != 0)
+ && (len == 0)) {
+ /*
+ * The device has already been marked
+ * as EOF, and there is no space left.
+ */
+ goto bailout;
+ }
+
+ new_buf = camdd_get_buf(dev, CAMDD_BUF_DATA);
+ if (new_buf == NULL) {
+ retval = 1;
+ goto bailout;
+ }
+
+ new_buf->lba = lba;
+ new_buf->len = len;
+
+ idb = camdd_get_buf(dev, CAMDD_BUF_INDIRECT);
+ if (idb == NULL) {
+ retval = 1;
+ goto bailout;
+ }
+
+ indirect = &idb->buf_type_spec.indirect;
+
+ indirect->src_buf = read_buf;
+ read_buf->refcount++;
+ indirect->offset = cur_offset;
+ indirect->start_ptr = rb_data->buf + cur_offset;
+ indirect->len = min(len_to_go, new_buf->len);
+#if 0
+ if (((indirect->len % dev->sector_size) != 0)
+ || ((indirect->offset % dev->sector_size) != 0)) {
+ warnx("offset %ju len %ju not aligned with "
+ "sector size %u", indirect->offset,
+ (uintmax_t)indirect->len, dev->sector_size);
+ }
+#endif
+ cur_offset += indirect->len;
+ len_to_go -= indirect->len;
+
+ camdd_buf_add_child(new_buf, idb);
+
+ new_data = &new_buf->buf_type_spec.data;
+
+ if ((new_data->fill_len == new_buf->len)
+ || (eof_flush_needed != 0)) {
+ STAILQ_INSERT_TAIL(&dev->run_queue,
+ new_buf, links);
+ dev->num_run_queue++;
+ } else if (new_data->fill_len < buf->len) {
+ STAILQ_INSERT_TAIL(&dev->pending_queue,
+ new_buf, links);
+ dev->num_pending_queue++;
+ } else {
+ warnx("%s: too much data in new "
+ "buffer!", __func__);
+ retval = 1;
+ goto bailout;
+ }
+ }
+ }
+
+bailout:
+ return (retval);
+}
+
+void
+camdd_get_depth(struct camdd_dev *dev, uint32_t *our_depth,
+ uint32_t *peer_depth, uint32_t *our_bytes, uint32_t *peer_bytes)
+{
+ *our_depth = dev->cur_active_io + dev->num_run_queue;
+ if (dev->num_peer_work_queue >
+ dev->num_peer_done_queue)
+ *peer_depth = dev->num_peer_work_queue -
+ dev->num_peer_done_queue;
+ else
+ *peer_depth = 0;
+ *our_bytes = *our_depth * dev->blocksize;
+ *peer_bytes = dev->peer_bytes_queued;
+}
+
+void
+camdd_sig_handler(int sig)
+{
+ if (sig == SIGINFO)
+ need_status = 1;
+ else {
+ need_exit = 1;
+ error_exit = 1;
+ }
+
+ sem_post(&camdd_sem);
+}
+
+void
+camdd_print_status(struct camdd_dev *camdd_dev, struct camdd_dev *other_dev,
+ struct timespec *start_time)
+{
+ struct timespec done_time;
+ uint64_t total_ns;
+ long double mb_sec, total_sec;
+ int error = 0;
+
+ error = clock_gettime(CLOCK_MONOTONIC_PRECISE, &done_time);
+ if (error != 0) {
+ warn("Unable to get done time");
+ return;
+ }
+
+ timespecsub(&done_time, start_time);
+
+ total_ns = done_time.tv_nsec + (done_time.tv_sec * 1000000000);
+ total_sec = total_ns;
+ total_sec /= 1000000000;
+
+ fprintf(stderr, "%ju bytes %s %s\n%ju bytes %s %s\n"
+ "%.4Lf seconds elapsed\n",
+ (uintmax_t)camdd_dev->bytes_transferred,
+ (camdd_dev->write_dev == 0) ? "read from" : "written to",
+ camdd_dev->device_name,
+ (uintmax_t)other_dev->bytes_transferred,
+ (other_dev->write_dev == 0) ? "read from" : "written to",
+ other_dev->device_name, total_sec);
+
+ mb_sec = min(other_dev->bytes_transferred,camdd_dev->bytes_transferred);
+ mb_sec /= 1024 * 1024;
+ mb_sec *= 1000000000;
+ mb_sec /= total_ns;
+ fprintf(stderr, "%.2Lf MB/sec\n", mb_sec);
+}
+
+int
+camdd_rw(struct camdd_io_opts *io_opts, int num_io_opts, uint64_t max_io,
+ int retry_count, int timeout)
+{
+ char *device = NULL;
+ struct cam_device *new_cam_dev = NULL;
+ struct camdd_dev *devs[2];
+ struct timespec start_time;
+ pthread_t threads[2];
+ int unit = 0;
+ int error = 0;
+ int i;
+
+ if (num_io_opts != 2) {
+ warnx("Must have one input and one output path");
+ error = 1;
+ goto bailout;
+ }
+
+ bzero(devs, sizeof(devs));
+
+ for (i = 0; i < num_io_opts; i++) {
+ switch (io_opts[i].dev_type) {
+ case CAMDD_DEV_PASS: {
+ camdd_argmask new_arglist = CAMDD_ARG_NONE;
+ int bus = 0, target = 0, lun = 0;
+ char name[30];
+ int rv;
+
+ if (isdigit(io_opts[i].dev_name[0])) {
+ /* device specified as bus:target[:lun] */
+ rv = parse_btl(io_opts[i].dev_name, &bus,
+ &target, &lun, &new_arglist);
+ if (rv < 2) {
+ warnx("numeric device specification "
+ "must be either bus:target, or "
+ "bus:target:lun");
+ error = 1;
+ goto bailout;
+ }
+ /* default to 0 if lun was not specified */
+ if ((new_arglist & CAMDD_ARG_LUN) == 0) {
+ lun = 0;
+ new_arglist |= CAMDD_ARG_LUN;
+ }
+ } else {
+ if (cam_get_device(io_opts[i].dev_name, name,
+ sizeof name, &unit) == -1) {
+ warnx("%s", cam_errbuf);
+ error = 1;
+ goto bailout;
+ }
+ device = strdup(name);
+ new_arglist |= CAMDD_ARG_DEVICE |CAMDD_ARG_UNIT;
+ }
+
+ if (new_arglist & (CAMDD_ARG_BUS | CAMDD_ARG_TARGET))
+ new_cam_dev = cam_open_btl(bus, target, lun,
+ O_RDWR, NULL);
+ else
+ new_cam_dev = cam_open_spec_device(device, unit,
+ O_RDWR, NULL);
+ if (new_cam_dev == NULL) {
+ warnx("%s", cam_errbuf);
+ error = 1;
+ goto bailout;
+ }
+
+ devs[i] = camdd_probe_pass(new_cam_dev,
+ /*io_opts*/ &io_opts[i],
+ CAMDD_ARG_ERR_RECOVER,
+ /*probe_retry_count*/ 3,
+ /*probe_timeout*/ 5000,
+ /*io_retry_count*/ retry_count,
+ /*io_timeout*/ timeout);
+ if (devs[i] == NULL) {
+ warn("Unable to probe device %s%u",
+ new_cam_dev->device_name,
+ new_cam_dev->dev_unit_num);
+ error = 1;
+ goto bailout;
+ }
+ break;
+ }
+ case CAMDD_DEV_FILE: {
+ int fd = -1;
+
+ if (io_opts[i].dev_name[0] == '-') {
+ if (io_opts[i].write_dev != 0)
+ fd = STDOUT_FILENO;
+ else
+ fd = STDIN_FILENO;
+ } else {
+ if (io_opts[i].write_dev != 0) {
+ fd = open(io_opts[i].dev_name,
+ O_RDWR | O_CREAT, S_IWUSR |S_IRUSR);
+ } else {
+ fd = open(io_opts[i].dev_name,
+ O_RDONLY);
+ }
+ }
+ if (fd == -1) {
+ warn("error opening file %s",
+ io_opts[i].dev_name);
+ error = 1;
+ goto bailout;
+ }
+
+ devs[i] = camdd_probe_file(fd, &io_opts[i],
+ retry_count, timeout);
+ if (devs[i] == NULL) {
+ error = 1;
+ goto bailout;
+ }
+
+ break;
+ }
+ default:
+ warnx("Unknown device type %d (%s)",
+ io_opts[i].dev_type, io_opts[i].dev_name);
+ error = 1;
+ goto bailout;
+ break; /*NOTREACHED */
+ }
+
+ devs[i]->write_dev = io_opts[i].write_dev;
+
+ devs[i]->start_offset_bytes = io_opts[i].offset;
+
+ if (max_io != 0) {
+ devs[i]->sector_io_limit =
+ (devs[i]->start_offset_bytes /
+ devs[i]->sector_size) +
+ (max_io / devs[i]->sector_size) - 1;
+ devs[i]->sector_io_limit =
+ (devs[i]->start_offset_bytes /
+ devs[i]->sector_size) +
+ (max_io / devs[i]->sector_size) - 1;
+ }
+
+ devs[i]->next_io_pos_bytes = devs[i]->start_offset_bytes;
+ devs[i]->next_completion_pos_bytes =devs[i]->start_offset_bytes;
+ }
+
+ devs[0]->peer_dev = devs[1];
+ devs[1]->peer_dev = devs[0];
+ devs[0]->next_peer_pos_bytes = devs[0]->peer_dev->next_io_pos_bytes;
+ devs[1]->next_peer_pos_bytes = devs[1]->peer_dev->next_io_pos_bytes;
+
+ sem_init(&camdd_sem, /*pshared*/ 0, 0);
+
+ signal(SIGINFO, camdd_sig_handler);
+ signal(SIGINT, camdd_sig_handler);
+
+ error = clock_gettime(CLOCK_MONOTONIC_PRECISE, &start_time);
+ if (error != 0) {
+ warn("Unable to get start time");
+ goto bailout;
+ }
+
+ for (i = 0; i < num_io_opts; i++) {
+ error = pthread_create(&threads[i], NULL, camdd_worker,
+ (void *)devs[i]);
+ if (error != 0) {
+ warnc(error, "pthread_create() failed");
+ goto bailout;
+ }
+ }
+
+ for (;;) {
+ if ((sem_wait(&camdd_sem) == -1)
+ || (need_exit != 0)) {
+ struct kevent ke;
+
+ for (i = 0; i < num_io_opts; i++) {
+ EV_SET(&ke, (uintptr_t)&devs[i]->work_queue,
+ EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL);
+
+ devs[i]->flags |= CAMDD_DEV_FLAG_EOF;
+
+ error = kevent(devs[i]->kq, &ke, 1, NULL, 0,
+ NULL);
+ if (error == -1)
+ warn("%s: unable to wake up thread",
+ __func__);
+ error = 0;
+ }
+ break;
+ } else if (need_status != 0) {
+ camdd_print_status(devs[0], devs[1], &start_time);
+ need_status = 0;
+ }
+ }
+ for (i = 0; i < num_io_opts; i++) {
+ pthread_join(threads[i], NULL);
+ }
+
+ camdd_print_status(devs[0], devs[1], &start_time);
+
+bailout:
+
+ for (i = 0; i < num_io_opts; i++)
+ camdd_free_dev(devs[i]);
+
+ return (error + error_exit);
+}
+
+void
+usage(void)
+{
+ fprintf(stderr,
+"usage: camdd <-i|-o pass=pass0,bs=1M,offset=1M,depth=4>\n"
+" <-i|-o file=/tmp/file,bs=512K,offset=1M>\n"
+" <-i|-o file=/dev/da0,bs=512K,offset=1M>\n"
+" <-i|-o file=/dev/nsa0,bs=512K>\n"
+" [-C retry_count][-E][-m max_io_amt][-t timeout_secs][-v][-h]\n"
+"Option description\n"
+"-i <arg=val> Specify input device/file and parameters\n"
+"-o <arg=val> Specify output device/file and parameters\n"
+"Input and Output parameters\n"
+"pass=name Specify a pass(4) device like pass0 or /dev/pass0\n"
+"file=name Specify a file or device, /tmp/foo, /dev/da0, /dev/null\n"
+" or - for stdin/stdout\n"
+"bs=blocksize Specify blocksize in bytes, or using K, M, G, etc. suffix\n"
+"offset=len Specify starting offset in bytes or using K, M, G suffix\n"
+" NOTE: offset cannot be specified on tapes, pipes, stdin/out\n"
+"depth=N Specify a numeric queue depth. This only applies to pass(4)\n"
+"mcs=N Specify a minimum cmd size for pass(4) read/write commands\n"
+"Optional arguments\n"
+"-C retry_cnt Specify a retry count for pass(4) devices\n"
+"-E Enable CAM error recovery for pass(4) devices\n"
+"-m max_io Specify the maximum amount to be transferred in bytes or\n"
+" using K, G, M, etc. suffixes\n"
+"-t timeout Specify the I/O timeout to use with pass(4) devices\n"
+"-v Enable verbose error recovery\n"
+"-h Print this message\n");
+}
+
+
+int
+camdd_parse_io_opts(char *args, int is_write, struct camdd_io_opts *io_opts)
+{
+ char *tmpstr, *tmpstr2;
+ char *orig_tmpstr = NULL;
+ int retval = 0;
+
+ io_opts->write_dev = is_write;
+
+ tmpstr = strdup(args);
+ if (tmpstr == NULL) {
+ warn("strdup failed");
+ retval = 1;
+ goto bailout;
+ }
+ orig_tmpstr = tmpstr;
+ while ((tmpstr2 = strsep(&tmpstr, ",")) != NULL) {
+ char *name, *value;
+
+ /*
+ * If the user creates an empty parameter by putting in two
+ * commas, skip over it and look for the next field.
+ */
+ if (*tmpstr2 == '\0')
+ continue;
+
+ name = strsep(&tmpstr2, "=");
+ if (*name == '\0') {
+ warnx("Got empty I/O parameter name");
+ retval = 1;
+ goto bailout;
+ }
+ value = strsep(&tmpstr2, "=");
+ if ((value == NULL)
+ || (*value == '\0')) {
+ warnx("Empty I/O parameter value for %s", name);
+ retval = 1;
+ goto bailout;
+ }
+ if (strncasecmp(name, "file", 4) == 0) {
+ io_opts->dev_type = CAMDD_DEV_FILE;
+ io_opts->dev_name = strdup(value);
+ if (io_opts->dev_name == NULL) {
+ warn("Error allocating memory");
+ retval = 1;
+ goto bailout;
+ }
+ } else if (strncasecmp(name, "pass", 4) == 0) {
+ io_opts->dev_type = CAMDD_DEV_PASS;
+ io_opts->dev_name = strdup(value);
+ if (io_opts->dev_name == NULL) {
+ warn("Error allocating memory");
+ retval = 1;
+ goto bailout;
+ }
+ } else if ((strncasecmp(name, "bs", 2) == 0)
+ || (strncasecmp(name, "blocksize", 9) == 0)) {
+ retval = expand_number(value, &io_opts->blocksize);
+ if (retval == -1) {
+ warn("expand_number(3) failed on %s=%s", name,
+ value);
+ retval = 1;
+ goto bailout;
+ }
+ } else if (strncasecmp(name, "depth", 5) == 0) {
+ char *endptr;
+
+ io_opts->queue_depth = strtoull(value, &endptr, 0);
+ if (*endptr != '\0') {
+ warnx("invalid queue depth %s", value);
+ retval = 1;
+ goto bailout;
+ }
+ } else if (strncasecmp(name, "mcs", 3) == 0) {
+ char *endptr;
+
+ io_opts->min_cmd_size = strtol(value, &endptr, 0);
+ if ((*endptr != '\0')
+ || ((io_opts->min_cmd_size > 16)
+ || (io_opts->min_cmd_size < 0))) {
+ warnx("invalid minimum cmd size %s", value);
+ retval = 1;
+ goto bailout;
+ }
+ } else if (strncasecmp(name, "offset", 6) == 0) {
+ retval = expand_number(value, &io_opts->offset);
+ if (retval == -1) {
+ warn("expand_number(3) failed on %s=%s", name,
+ value);
+ retval = 1;
+ goto bailout;
+ }
+ } else if (strncasecmp(name, "debug", 5) == 0) {
+ char *endptr;
+
+ io_opts->debug = strtoull(value, &endptr, 0);
+ if (*endptr != '\0') {
+ warnx("invalid debug level %s", value);
+ retval = 1;
+ goto bailout;
+ }
+ } else {
+ warnx("Unrecognized parameter %s=%s", name, value);
+ }
+ }
+bailout:
+ free(orig_tmpstr);
+
+ return (retval);
+}
+
+int
+main(int argc, char **argv)
+{
+ int c;
+ camdd_argmask arglist = CAMDD_ARG_NONE;
+ int timeout = 0, retry_count = 1;
+ int error = 0;
+ uint64_t max_io = 0;
+ struct camdd_io_opts *opt_list = NULL;
+
+ if (argc == 1) {
+ usage();
+ exit(1);
+ }
+
+ opt_list = calloc(2, sizeof(struct camdd_io_opts));
+ if (opt_list == NULL) {
+ warn("Unable to allocate option list");
+ error = 1;
+ goto bailout;
+ }
+
+ while ((c = getopt(argc, argv, "C:Ehi:m:o:t:v")) != -1){
+ switch (c) {
+ case 'C':
+ retry_count = strtol(optarg, NULL, 0);
+ if (retry_count < 0)
+ errx(1, "retry count %d is < 0",
+ retry_count);
+ arglist |= CAMDD_ARG_RETRIES;
+ break;
+ case 'E':
+ arglist |= CAMDD_ARG_ERR_RECOVER;
+ break;
+ case 'i':
+ case 'o':
+ if (((c == 'i')
+ && (opt_list[0].dev_type != CAMDD_DEV_NONE))
+ || ((c == 'o')
+ && (opt_list[1].dev_type != CAMDD_DEV_NONE))) {
+ errx(1, "Only one input and output path "
+ "allowed");
+ }
+ error = camdd_parse_io_opts(optarg, (c == 'o') ? 1 : 0,
+ (c == 'o') ? &opt_list[1] : &opt_list[0]);
+ if (error != 0)
+ goto bailout;
+ break;
+ case 'm':
+ error = expand_number(optarg, &max_io);
+ if (error == -1) {
+ warn("invalid maximum I/O amount %s", optarg);
+ error = 1;
+ goto bailout;
+ }
+ break;
+ case 't':
+ timeout = strtol(optarg, NULL, 0);
+ if (timeout < 0)
+ errx(1, "invalid timeout %d", timeout);
+ /* Convert the timeout from seconds to ms */
+ timeout *= 1000;
+ arglist |= CAMDD_ARG_TIMEOUT;
+ break;
+ case 'v':
+ arglist |= CAMDD_ARG_VERBOSE;
+ break;
+ case 'h':
+ default:
+ usage();
+ exit(1);
+ break; /*NOTREACHED*/
+ }
+ }
+
+ if ((opt_list[0].dev_type == CAMDD_DEV_NONE)
+ || (opt_list[1].dev_type == CAMDD_DEV_NONE))
+ errx(1, "Must specify both -i and -o");
+
+ /*
+ * Set the timeout if the user hasn't specified one.
+ */
+ if (timeout == 0)
+ timeout = CAMDD_PASS_RW_TIMEOUT;
+
+ error = camdd_rw(opt_list, 2, max_io, retry_count, timeout);
+
+bailout:
+ free(opt_list);
+
+ exit(error);
+}
diff --git a/usr.sbin/nfsuserd/nfsuserd.8 b/usr.sbin/nfsuserd/nfsuserd.8
index 04100ea..da60bbc 100644
--- a/usr.sbin/nfsuserd/nfsuserd.8
+++ b/usr.sbin/nfsuserd/nfsuserd.8
@@ -24,14 +24,14 @@
.\"
.\" $FreeBSD$
.\"
-.Dd April 25, 2009
+.Dd November 1, 2015
.Dt NFSUSERD 8
.Os
.Sh NAME
.Nm nfsuserd
.Nd load user and group information into the kernel for
.Tn NFSv4
-services
+services plus support manage-gids for all NFS versions
.Sh SYNOPSIS
.Nm nfsuserd
.Op Fl domain Ar domain_name
@@ -39,11 +39,14 @@ services
.Op Fl usermax Ar max_cache_size
.Op Fl verbose
.Op Fl force
+.Op Fl manage-gids
.Op Ar num_servers
.Sh DESCRIPTION
.Nm
loads user and group information into the kernel for NFSv4.
It must be running for NFSv4 to function correctly, either client or server.
+It also provides support for manage-gids and must be running on the server if
+this is being used for any version of NFS.
.Pp
Upon startup, it loads the machines DNS domain name, plus timeout and
cache size limit into the kernel. It then preloads the cache with group
@@ -79,6 +82,15 @@ When set, the server logs a bunch of information to syslog.
This flag option must be set to restart the daemon after it has gone away
abnormally and refuses to start, because it thinks nfsuserd is already
running.
+.It Fl manage-gids
+This flag enables manage-gids for the NFS server
+.Xr nfsd 8 .
+When this is enabled, all NFS requests using
+AUTH_SYS authentication take the uid from the RPC request
+and uses the group list for that uid provided by
+.Xr getgrouplist 3
+on the server instead of the list of groups provided in the RPC authenticator.
+This can be used to avoid the 16 group limit for AUTH_SYS.
.It Ar num_servers
Specifies how many servers to create (max 20).
The default of 4 may be sufficient. You should run enough servers, so that
@@ -89,8 +101,9 @@ performance impact, whereas running too many will only tie up some resources,
such as a process table entry and swap space.
.El
.Sh SEE ALSO
-.Xr getpwent 3 ,
.Xr getgrent 3 ,
+.Xr getgrouplist 3 ,
+.Xr getpwent 3 ,
.Xr nfsv4 4 ,
.Xr group 5 ,
.Xr passwd 5 ,
@@ -103,7 +116,8 @@ utility was introduced with the NFSv4 experimental subsystem in 2009.
The
.Nm
use
-.Xr getgrent 3
+.Xr getgrent 3 ,
+.Xr getgrouplist 3
and
.Xr getpwent 3
library calls to resolve requests and will hang if the servers handling
diff --git a/usr.sbin/nfsuserd/nfsuserd.c b/usr.sbin/nfsuserd/nfsuserd.c
index 35e2849..293da71 100644
--- a/usr.sbin/nfsuserd/nfsuserd.c
+++ b/usr.sbin/nfsuserd/nfsuserd.c
@@ -92,7 +92,7 @@ uid_t defaultuid = (uid_t)32767;
u_char *defaultgroup = "nogroup";
gid_t defaultgid = (gid_t)32767;
int verbose = 0, im_a_slave = 0, nfsuserdcnt = -1, forcestart = 0;
-int defusertimeout = DEFUSERTIMEOUT;
+int defusertimeout = DEFUSERTIMEOUT, manage_gids = 0;
pid_t slaves[MAXNFSUSERD];
int
@@ -110,6 +110,8 @@ main(int argc, char *argv[])
char hostname[MAXHOSTNAMELEN + 1], *cp;
struct addrinfo *aip, hints;
static uid_t check_dups[MAXUSERMAX];
+ gid_t grps[NGROUPS];
+ int ngroup;
if (modfind("nfscommon") < 0) {
/* Not present in kernel, try loading it */
@@ -160,6 +162,8 @@ main(int argc, char *argv[])
verbose = 1;
} else if (!strcmp(*argv, "-force")) {
forcestart = 1;
+ } else if (!strcmp(*argv, "-manage-gids")) {
+ manage_gids = 1;
} else if (!strcmp(*argv, "-usermax")) {
if (argc == 1)
usage();
@@ -297,12 +301,14 @@ main(int argc, char *argv[])
nid.nid_gid = defaultgid;
nid.nid_name = dnsname;
nid.nid_namelen = strlen(nid.nid_name);
+ nid.nid_ngroup = 0;
+ nid.nid_grps = NULL;
nid.nid_flag = NFSID_INITIALIZE;
#ifdef DEBUG
printf("Initialize uid=%d gid=%d dns=%s\n", nid.nid_uid, nid.nid_gid,
nid.nid_name);
#else
- error = nfssvc(NFSSVC_IDNAME, &nid);
+ error = nfssvc(NFSSVC_IDNAME | NFSSVC_NEWSTRUCT, &nid);
if (error)
errx(1, "Can't initialize nfs user/groups");
#endif
@@ -316,11 +322,13 @@ main(int argc, char *argv[])
nid.nid_gid = grp->gr_gid;
nid.nid_name = grp->gr_name;
nid.nid_namelen = strlen(grp->gr_name);
+ nid.nid_ngroup = 0;
+ nid.nid_grps = NULL;
nid.nid_flag = NFSID_ADDGID;
#ifdef DEBUG
printf("add gid=%d name=%s\n", nid.nid_gid, nid.nid_name);
#else
- error = nfssvc(NFSSVC_IDNAME, &nid);
+ error = nfssvc(NFSSVC_IDNAME | NFSSVC_NEWSTRUCT, &nid);
if (error)
errx(1, "Can't add group %s", grp->gr_name);
#endif
@@ -352,11 +360,23 @@ main(int argc, char *argv[])
nid.nid_uid = pwd->pw_uid;
nid.nid_name = pwd->pw_name;
nid.nid_namelen = strlen(pwd->pw_name);
+ if (manage_gids != 0) {
+ /* Get the group list for this user. */
+ ngroup = NGROUPS;
+ if (getgrouplist(pwd->pw_name, pwd->pw_gid, grps,
+ &ngroup) < 0)
+ syslog(LOG_ERR, "Group list too small");
+ nid.nid_ngroup = ngroup;
+ nid.nid_grps = grps;
+ } else {
+ nid.nid_ngroup = 0;
+ nid.nid_grps = NULL;
+ }
nid.nid_flag = NFSID_ADDUID;
#ifdef DEBUG
printf("add uid=%d name=%s\n", nid.nid_uid, nid.nid_name);
#else
- error = nfssvc(NFSSVC_IDNAME, &nid);
+ error = nfssvc(NFSSVC_IDNAME | NFSSVC_NEWSTRUCT, &nid);
if (error)
errx(1, "Can't add user %s", pwd->pw_name);
#endif
@@ -439,6 +459,8 @@ nfsuserdsrv(struct svc_req *rqstp, SVCXPRT *transp)
struct info info;
struct nfsd_idargs nid;
u_int32_t saddr;
+ gid_t grps[NGROUPS];
+ int ngroup;
/*
* Only handle requests from 127.0.0.1 on a reserved port number.
@@ -472,14 +494,28 @@ nfsuserdsrv(struct svc_req *rqstp, SVCXPRT *transp)
nid.nid_usertimeout = defusertimeout;
nid.nid_uid = pwd->pw_uid;
nid.nid_name = pwd->pw_name;
+ if (manage_gids != 0) {
+ /* Get the group list for this user. */
+ ngroup = NGROUPS;
+ if (getgrouplist(pwd->pw_name, pwd->pw_gid,
+ grps, &ngroup) < 0)
+ syslog(LOG_ERR, "Group list too small");
+ nid.nid_ngroup = ngroup;
+ nid.nid_grps = grps;
+ } else {
+ nid.nid_ngroup = 0;
+ nid.nid_grps = NULL;
+ }
} else {
nid.nid_usertimeout = 5;
nid.nid_uid = (uid_t)info.id;
nid.nid_name = defaultuser;
+ nid.nid_ngroup = 0;
+ nid.nid_grps = NULL;
}
nid.nid_namelen = strlen(nid.nid_name);
nid.nid_flag = NFSID_ADDUID;
- error = nfssvc(NFSSVC_IDNAME, &nid);
+ error = nfssvc(NFSSVC_IDNAME | NFSSVC_NEWSTRUCT, &nid);
if (error) {
info.retval = error;
syslog(LOG_ERR, "Can't add user %s\n", pwd->pw_name);
@@ -509,8 +545,10 @@ nfsuserdsrv(struct svc_req *rqstp, SVCXPRT *transp)
nid.nid_name = defaultgroup;
}
nid.nid_namelen = strlen(nid.nid_name);
+ nid.nid_ngroup = 0;
+ nid.nid_grps = NULL;
nid.nid_flag = NFSID_ADDGID;
- error = nfssvc(NFSSVC_IDNAME, &nid);
+ error = nfssvc(NFSSVC_IDNAME | NFSSVC_NEWSTRUCT, &nid);
if (error) {
info.retval = error;
syslog(LOG_ERR, "Can't add group %s\n",
@@ -541,8 +579,10 @@ nfsuserdsrv(struct svc_req *rqstp, SVCXPRT *transp)
nid.nid_name = info.name;
}
nid.nid_namelen = strlen(nid.nid_name);
+ nid.nid_ngroup = 0;
+ nid.nid_grps = NULL;
nid.nid_flag = NFSID_ADDUSERNAME;
- error = nfssvc(NFSSVC_IDNAME, &nid);
+ error = nfssvc(NFSSVC_IDNAME | NFSSVC_NEWSTRUCT, &nid);
if (error) {
info.retval = error;
syslog(LOG_ERR, "Can't add user %s\n", pwd->pw_name);
@@ -572,8 +612,10 @@ nfsuserdsrv(struct svc_req *rqstp, SVCXPRT *transp)
nid.nid_name = info.name;
}
nid.nid_namelen = strlen(nid.nid_name);
+ nid.nid_ngroup = 0;
+ nid.nid_grps = NULL;
nid.nid_flag = NFSID_ADDGROUPNAME;
- error = nfssvc(NFSSVC_IDNAME, &nid);
+ error = nfssvc(NFSSVC_IDNAME | NFSSVC_NEWSTRUCT, &nid);
if (error) {
info.retval = error;
syslog(LOG_ERR, "Can't add group %s\n",
@@ -679,5 +721,5 @@ usage(void)
{
errx(1,
- "usage: nfsuserd [-usermax cache_size] [-usertimeout minutes] [-verbose] [-domain domain_name] [n]");
+ "usage: nfsuserd [-usermax cache_size] [-usertimeout minutes] [-verbose] [-manage-gids] [-domain domain_name] [n]");
}
OpenPOWER on IntegriCloud