summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--share/examples/scsi_target/scsi_cmds.c2
-rw-r--r--sys/amd64/conf/GENERIC3
-rw-r--r--sys/cam/ctl/README.ctl.txt449
-rw-r--r--sys/cam/ctl/ctl.c13082
-rw-r--r--sys/cam/ctl/ctl.h216
-rw-r--r--sys/cam/ctl/ctl_backend.c177
-rw-r--r--sys/cam/ctl/ctl_backend.h288
-rw-r--r--sys/cam/ctl/ctl_backend_block.c2213
-rw-r--r--sys/cam/ctl/ctl_backend_block.h72
-rw-r--r--sys/cam/ctl/ctl_backend_ramdisk.c835
-rw-r--r--sys/cam/ctl/ctl_cmd_table.c984
-rw-r--r--sys/cam/ctl/ctl_debug.h52
-rw-r--r--sys/cam/ctl/ctl_error.c811
-rw-r--r--sys/cam/ctl/ctl_error.h92
-rw-r--r--sys/cam/ctl/ctl_frontend.c187
-rw-r--r--sys/cam/ctl/ctl_frontend.h295
-rw-r--r--sys/cam/ctl/ctl_frontend_cam_sim.c866
-rw-r--r--sys/cam/ctl/ctl_frontend_internal.c1782
-rw-r--r--sys/cam/ctl/ctl_frontend_internal.h154
-rw-r--r--sys/cam/ctl/ctl_ha.h270
-rw-r--r--sys/cam/ctl/ctl_io.h474
-rw-r--r--sys/cam/ctl/ctl_ioctl.h604
-rw-r--r--sys/cam/ctl/ctl_mem_pool.c192
-rw-r--r--sys/cam/ctl/ctl_mem_pool.h83
-rw-r--r--sys/cam/ctl/ctl_private.h493
-rw-r--r--sys/cam/ctl/ctl_scsi_all.c227
-rw-r--r--sys/cam/ctl/ctl_scsi_all.h52
-rw-r--r--sys/cam/ctl/ctl_ser_table.c81
-rw-r--r--sys/cam/ctl/ctl_util.c843
-rw-r--r--sys/cam/ctl/ctl_util.h119
-rw-r--r--sys/cam/ctl/scsi_ctl.c2049
-rw-r--r--sys/cam/scsi/scsi_all.c9
-rw-r--r--sys/cam/scsi/scsi_all.h52
-rw-r--r--sys/cam/scsi/scsi_da.h50
-rw-r--r--sys/cam/scsi/scsi_targ_bh.c2
-rw-r--r--sys/conf/NOTES1
-rw-r--r--sys/conf/files13
-rw-r--r--sys/dev/ata/atapi-cam.c5
-rw-r--r--sys/dev/ciss/ciss.c2
-rw-r--r--sys/i386/conf/GENERIC3
-rw-r--r--sys/i386/conf/PAE3
-rw-r--r--sys/ia64/conf/GENERIC3
-rw-r--r--sys/sparc64/conf/GENERIC1
-rw-r--r--usr.bin/Makefile1
-rw-r--r--usr.bin/ctlstat/Makefile8
-rw-r--r--usr.bin/ctlstat/ctlstat.8122
-rw-r--r--usr.bin/ctlstat/ctlstat.c730
-rw-r--r--usr.sbin/Makefile1
-rw-r--r--usr.sbin/ctladm/Makefile21
-rw-r--r--usr.sbin/ctladm/ctladm.8963
-rw-r--r--usr.sbin/ctladm/ctladm.c4005
-rw-r--r--usr.sbin/ctladm/ctladm.h50
-rw-r--r--usr.sbin/ctladm/util.c156
-rw-r--r--usr.sbin/mlxcontrol/interface.c2
54 files changed, 34231 insertions, 19 deletions
diff --git a/share/examples/scsi_target/scsi_cmds.c b/share/examples/scsi_target/scsi_cmds.c
index b0da04e..7a95606 100644
--- a/share/examples/scsi_target/scsi_cmds.c
+++ b/share/examples/scsi_target/scsi_cmds.c
@@ -328,7 +328,7 @@ tcmd_inquiry(struct ccb_accept_tio *atio, struct ccb_scsiio *ctio)
bcopy(&inq_data, ctio->data_ptr, sizeof(inq_data));
ctio->dxfer_len = inq_data.additional_length + 4;
ctio->dxfer_len = min(ctio->dxfer_len,
- SCSI_CDB6_LEN(inq->length));
+ scsi_2btoul(inq->length));
ctio->ccb_h.flags |= CAM_DIR_IN | CAM_SEND_STATUS;
ctio->scsi_status = SCSI_STATUS_OK;
}
diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC
index 059f35f..46b9094 100644
--- a/sys/amd64/conf/GENERIC
+++ b/sys/amd64/conf/GENERIC
@@ -131,7 +131,8 @@ device da # Direct Access (disks)
device sa # Sequential Access (tape etc)
device cd # CD
device pass # Passthrough device (direct ATA/SCSI access)
-device ses # SCSI Environmental Services (and SAF-TE)
+device ses # Enclosure Services (SES and SAF-TE)
+device ctl # CAM Target Layer
# RAID controllers interfaced to the SCSI subsystem
device amr # AMI MegaRAID
diff --git a/sys/cam/ctl/README.ctl.txt b/sys/cam/ctl/README.ctl.txt
new file mode 100644
index 0000000..4ccbadf
--- /dev/null
+++ b/sys/cam/ctl/README.ctl.txt
@@ -0,0 +1,449 @@
+/* $FreeBSD$ */
+
+CTL - CAM Target Layer Description
+
+Revision 1.4 (December 29th, 2011)
+Ken Merry <ken@FreeBSD.org>
+
+Table of Contents:
+=================
+
+Introduction
+Features
+Configuring and Running CTL
+Revision 1.N Changes
+To Do List
+Code Roadmap
+Userland Commands
+
+Introduction:
+============
+
+CTL is a disk and processor device emulation subsystem originally written
+for Copan Systems under Linux starting in 2003. It has been shipping in
+Copan (now SGI) products since 2005.
+
+It was ported to FreeBSD in 2008, and thanks to an agreement between SGI
+(who acquired Copan's assets in 2010) and Spectra Logic in 2010, CTL is
+available under a BSD-style license. The intent behind the agreement was
+that Spectra would work to get CTL into the FreeBSD tree.
+
+Features:
+========
+
+ - Disk and processor device emulation.
+ - Tagged queueing
+ - SCSI task attribute support (ordered, head of queue, simple tags)
+ - SCSI implicit command ordering support. (e.g. if a read follows a mode
+ select, the read will be blocked until the mode select completes.)
+ - Full task management support (abort, LUN reset, target reset, etc.)
+ - Support for multiple ports
+ - Support for multiple simultaneous initiators
+ - Support for multiple simultaneous backing stores
+ - Persistent reservation support
+ - Mode sense/select support
+ - Error injection support
+ - High Availability support (1)
+ - All I/O handled in-kernel, no userland context switch overhead.
+
+(1) HA Support is just an API stub, and needs much more to be fully
+ functional. See the to-do list below.
+
+Configuring and Running CTL:
+===========================
+
+ - After applying the CTL patchset to your tree, build world and install it
+ on your target system.
+
+ - Add 'device ctl' to your kernel configuration file.
+
+ - If you're running with a 8Gb or 4Gb Qlogic FC board, add
+ 'options ISP_TARGET_MODE' to your kernel config file. Keep in mind that
+ the isp(4) driver can run in target or initiator mode, but not both on
+ the same machine. 'device ispfw' or loading the ispfw module is also
+ recommended.
+
+ - Rebuild and install a new kernel.
+
+ - Reboot with the new kernel.
+
+ - To add a LUN with the RAM disk backend:
+
+ ctladm create -b ramdisk -s 10485760000000000000
+ ctladm port -o on
+
+ - You should now see the CTL disk LUN through camcontrol devlist:
+
+scbus6 on ctl2cam0 bus 0:
+<FREEBSD CTLDISK 0001> at scbus6 target 1 lun 0 (da24,pass32)
+<> at scbus6 target -1 lun -1 ()
+
+ This is visible through the CTL CAM SIM. This allows using CTL without
+ any physical hardware. You should be able to issue any normal SCSI
+ commands to the device via the pass(4)/da(4) devices.
+
+ If any target-capable HBAs are in the system (e.g. isp(4)), and have
+ target mode enabled, you should now also be able to see the CTL LUNs via
+ that target interface.
+
+ Note that all CTL LUNs are presented to all frontends. There is no
+ LUN masking, or separate, per-port configuration.
+
+ - Note that the ramdisk backend is a "fake" ramdisk. That is, it is
+ backed by a small amount of RAM that is used for all I/O requests. This
+ is useful for performance testing, but not for any data integrity tests.
+
+ - To add a LUN with the block/file backend:
+
+ truncate -s +1T myfile
+ ctladm create -b block -o file=myfile
+ ctladm port -o on
+
+ - You can also see a list of LUNs and their backends like this:
+
+# ctladm devlist
+LUN Backend Size (Blocks) BS Serial Number Device ID
+ 0 block 2147483648 512 MYSERIAL 0 MYDEVID 0
+ 1 block 2147483648 512 MYSERIAL 1 MYDEVID 1
+ 2 block 2147483648 512 MYSERIAL 2 MYDEVID 2
+ 3 block 2147483648 512 MYSERIAL 3 MYDEVID 3
+ 4 block 2147483648 512 MYSERIAL 4 MYDEVID 4
+ 5 block 2147483648 512 MYSERIAL 5 MYDEVID 5
+ 6 block 2147483648 512 MYSERIAL 6 MYDEVID 6
+ 7 block 2147483648 512 MYSERIAL 7 MYDEVID 7
+ 8 block 2147483648 512 MYSERIAL 8 MYDEVID 8
+ 9 block 2147483648 512 MYSERIAL 9 MYDEVID 9
+ 10 block 2147483648 512 MYSERIAL 10 MYDEVID 10
+ 11 block 2147483648 512 MYSERIAL 11 MYDEVID 11
+
+ - You can see the LUN type and backing store for block/file backend LUNs
+ like this:
+
+# ctladm devlist -v
+LUN Backend Size (Blocks) BS Serial Number Device ID
+ 0 block 2147483648 512 MYSERIAL 0 MYDEVID 0
+ lun_type=0
+ num_threads=14
+ file=testdisk0
+ 1 block 2147483648 512 MYSERIAL 1 MYDEVID 1
+ lun_type=0
+ num_threads=14
+ file=testdisk1
+ 2 block 2147483648 512 MYSERIAL 2 MYDEVID 2
+ lun_type=0
+ num_threads=14
+ file=testdisk2
+ 3 block 2147483648 512 MYSERIAL 3 MYDEVID 3
+ lun_type=0
+ num_threads=14
+ file=testdisk3
+ 4 block 2147483648 512 MYSERIAL 4 MYDEVID 4
+ lun_type=0
+ num_threads=14
+ file=testdisk4
+ 5 block 2147483648 512 MYSERIAL 5 MYDEVID 5
+ lun_type=0
+ num_threads=14
+ file=testdisk5
+ 6 block 2147483648 512 MYSERIAL 6 MYDEVID 6
+ lun_type=0
+ num_threads=14
+ file=testdisk6
+ 7 block 2147483648 512 MYSERIAL 7 MYDEVID 7
+ lun_type=0
+ num_threads=14
+ file=testdisk7
+ 8 block 2147483648 512 MYSERIAL 8 MYDEVID 8
+ lun_type=0
+ num_threads=14
+ file=testdisk8
+ 9 block 2147483648 512 MYSERIAL 9 MYDEVID 9
+ lun_type=0
+ num_threads=14
+ file=testdisk9
+ 10 ramdisk 0 0 MYSERIAL 0 MYDEVID 0
+ lun_type=3
+ 11 ramdisk 204800000000000 512 MYSERIAL 1 MYDEVID 1
+ lun_type=0
+
+
+Revision 1.4 Changes
+====================
+ - Added in the second HA mode (where CTL does the data transfers instead
+ of having data transfers done below CTL), and abstracted out the Copan
+ HA API.
+
+ - Fixed the phantom device problem in the CTL CAM SIM and improved the
+ CAM SIM to automatically trigger a rescan when the port is enabled and
+ disabled.
+
+ - Made the number of threads in the block backend configurable via sysctl,
+ loader tunable and the ctladm command line. (You can now specify
+ -o num_threads=4 when creating a LUN with ctladm create.)
+
+ - Fixed some LUN selection issues in ctlstat(8) and allowed for selection
+ of LUN numbers up to 1023.
+
+ - General cleanup.
+
+ - This version intended for public release.
+
+Revision 1.3 Changes
+====================
+ - Added descriptor sense support to CTL. It can be enabled through the
+ control mode page (10), but is disabled by default.
+
+ - Improved error injection support. The number of errors that can be
+ injected with 'ctladm inject' has been increased, and any arbitrary
+ sense data may now be injected as well.
+
+ - The port infrastructure has been revamped. Individual ports and types
+ of ports may now be enabled and disabled from the command line. ctladm
+ now has the ability to set the WWNN and WWPN for each port.
+
+ - The block backend can now send multiple I/Os to backing files. Multiple
+ writes are only allowed for ZFS, but multiple readers are allowed for
+ any filesystem.
+
+ - The block and ramdisk backends now support setting the LUN blocksize.
+ There are some restrictions when the backing device is a block device,
+ but otherwise the blocksize may be set to anything.
+
+Revision 1.2 Changes
+====================
+
+ - CTL initialization process has been revamped. Instead of using an
+ ad-hoc method, it is now sequenced through SYSINIT() calls.
+
+ - A block/file backend has been added. This allows using arbitrary files
+ or block devices as a backing store.
+
+ - The userland LUN configuration interface has been completely rewritten.
+ Configuration is now done out of band.
+
+ - The ctladm(8) command line interface has been revamped, and is now
+ similar to camcontrol(8).
+
+To Do List:
+==========
+
+ - Make CTL buildable as a module. Work needs to be done on initialization,
+ and on freeing resources and LUNs when it is built as a module.
+
+ - Use devstat(9) for CTL's statistics collection. CTL uses a home-grown
+ statistics collection system that is similar to devstat(9). ctlstat
+ should be retired in favor of iostat, etc., once aggregation modes are
+ available in iostat to match the behavior of ctlstat -t and dump modes
+ are available to match the behavior of ctlstat -d/ctlstat -J.
+
+ - ZFS ARC backend for CTL. Since ZFS copies all I/O into the ARC
+ (Adaptive Replacement Cache), running the block/file backend on top of a
+ ZFS-backed zdev or file will involve an extra set of copies. The
+ optimal solution for backing targets served by CTL with ZFS would be to
+ allocate buffers out of the ARC directly, and DMA to/from them directly.
+ That would eliminate an extra data buffer allocation and copy.
+
+ - Switch CTL over to using CAM CCBs instead of its own union ctl_io. This
+ will likely require a significant amount of work, but will eliminate
+ another data structure in the stack, more memory allocations, etc. This
+ will also require changes to the CAM CCB structure to support CTL.
+
+ - Full-featured High Availability support. The HA API that is in ctl_ha.h
+ is essentially a renamed version of Copan's HA API. There is no
+ substance to it, but it remains in CTL to show what needs to be done to
+ implement active/active HA from a CTL standpoint. The things that would
+ need to be done include:
+ - A kernel level software API for message passing as well as DMA
+ between at least two nodes.
+ - Hardware support and drivers for inter-node communication. This
+ could be as simples as ethernet hardware and drivers.
+ - A "supervisor", or startup framework to control and coordinate
+ HA startup, failover (going from active/active to single mode),
+ and failback (going from single mode to active/active).
+ - HA support in other components of the stack. The goal behind HA
+ is that one node can fail and another node can seamlessly take
+ over handling I/O requests. This requires support from pretty
+ much every component in the storage stack, from top to bottom.
+ CTL is one piece of it, but you also need support in the RAID
+ stack/filesystem/backing store. You also need full configuration
+ mirroring, and all peer nodes need to be able to talk to the
+ underlying storage hardware.
+
+Code Roadmap:
+============
+
+CTL has the concept of pluggable frontend ports and backends. All
+frontends and backends can be active at the same time. You can have a
+ramdisk-backed LUN present along side a file backed LUN.
+
+ctl.c:
+-----
+
+This is the core of CTL, where all of the command handlers and a lot of
+other things live. Yes, it is large. It started off small and grew to its
+current size over time. Perhaps it can be split into more files at some
+point.
+
+Here is a roadmap of some of the primary functions in ctl.c. Starting here
+and following the various leaf functions will show the command flow.
+
+ctl_queue() This is where commands from the frontend ports come
+ in.
+
+ctl_queue_sense() This is only used for non-packetized SCSI. i.e.
+ parallel SCSI prior to U320 and perhaps U160.
+
+ctl_work_thread() This is the primary work thread, and everything gets
+ executed from there.
+
+ctl_scsiio_precheck() This where all of the initial checks are done, and I/O
+ is either queued for execution or blocked.
+
+ctl_scsiio() This is where the command handler is actually
+ executed. (See ctl_cmd_table.c for the mapping of
+ SCSI opcode to command handler function.)
+
+ctl_done() This is the routine called (or ctl_done_lock()) to
+ initiate the command completion process.
+
+ctl_process_done() This is where command completion actually happens.
+
+ctl.h:
+-----
+
+Basic function declarations and data structures.
+
+ctl_backend.c,
+ctl_backend.h:
+-------------
+
+These files define the basic CTL backend API. The comments in the header
+explain the API.
+
+ctl_backend_block.c
+ctl_backend_block.h:
+-------------------
+
+The block and file backend. This allows for using a disk or a file as the
+backing store for a LUN. Multiple threads are started to do I/O to the
+backing device, primarily because the VFS API requires that to get any
+concurrency.
+
+ctl_backend_ramdisk.c:
+---------------------
+
+A "fake" ramdisk backend. It only allocates a small amount of memory to
+act as a source and sink for reads and writes from an initiator. Therefore
+it cannot be used for any real data, but it can be used to test for
+throughput. It can also be used to test initiators' support for extremely
+large LUNs.
+
+ctl_cmd_table.c:
+---------------
+
+This is a table with all 256 possible SCSI opcodes, and command handler
+functions defined for supported opcodes. It is included in ctl.c.
+
+ctl_debug.h:
+-----------
+
+Simplistic debugging support.
+
+ctl_error.c,
+ctl_error.h:
+-----------
+
+CTL-specific wrappers around the CAM sense building functions.
+
+ctl_frontend.c,
+ctl_frontend.h:
+--------------
+
+These files define the basic CTL frontend port API. The comments in the
+header explain the API.
+
+ctl_frontend_cam_sim.c:
+----------------------
+
+This is a CTL frontend port that is also a CAM SIM. The idea is that this
+frontend allows for using CTL without any target-capable hardware. So any
+LUNs you create in CTL are visible via this port.
+
+
+ctl_frontend_internal.c
+ctl_frontend_internal.h:
+-----------------------
+
+This is a frontend port written for Copan to do some system-specific tasks
+that required sending commands into CTL from inside the kernel. This isn't
+entirely relevant to FreeBSD in general, but can perhaps be repurposed or
+removed later.
+
+ctl_ha.h:
+--------
+
+This is a stubbed-out High Availability API. See the comments in the
+header and the description of what is needed as far as HA support above.
+
+ctl_io.h:
+--------
+
+This defines most of the core CTL I/O structures. union ctl_io is
+conceptually very similar to CAM's union ccb.
+
+ctl_ioctl.h:
+-----------
+
+This defines all ioctls available through the CTL character device, and
+the data structures needed for those ioctls.
+
+ctl_mem_pool.c
+ctl_mem_pool.h:
+--------------
+
+Generic memory pool implementation. This is currently only used by the
+internal frontend. The internal frontend can probably be rewritten to use
+UMA zones and this can be removed.
+
+ctl_private.h:
+-------------
+
+Private data structres (e.g. CTL softc) and function prototypes. This also
+includes the SCSI vendor and product names used by CTL.
+
+ctl_scsi_all.c
+ctl_scsi_all.h:
+--------------
+
+CTL wrappers around CAM sense printing functions.
+
+ctl_ser_table.c:
+---------------
+
+Command serialization table. This defines what happens when one type of
+command is followed by another type of command. e.g., what do you do when
+you have a mode select followed by a write? You block the write until the
+mode select is complete. That is defined in this table.
+
+ctl_util.c
+ctl_util.h:
+----------
+
+CTL utility functions, primarily designed to be used from userland. See
+ctladm for the primary consumer of these functions. These include CDB
+building functions.
+
+scsi_ctl.c:
+----------
+
+CAM target peripheral driver and CTL frontend port. This is the path into
+CTL for commands from target-capable hardware/SIMs.
+
+Userland Commands:
+=================
+
+ctladm(8) fills a role similar to camcontrol(8). It allow configuring LUNs,
+issuing commands, injecting errors and various other control functions.
+
+ctlstat(8) fills a role similar to iostat(8). It reports I/O statistics
+for CTL.
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
new file mode 100644
index 0000000..56bea63
--- /dev/null
+++ b/sys/cam/ctl/ctl.c
@@ -0,0 +1,13082 @@
+/*-
+ * Copyright (c) 2003-2009 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.c#8 $
+ */
+/*
+ * CAM Target Layer, a SCSI device emulation subsystem.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#define _CTL_C
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/kthread.h>
+#include <sys/bio.h>
+#include <sys/fcntl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/malloc.h>
+#include <sys/conf.h>
+#include <sys/ioccom.h>
+#include <sys/queue.h>
+#include <sys/sbuf.h>
+#include <sys/endian.h>
+#include <sys/sysctl.h>
+
+#include <cam/cam.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_frontend_internal.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
+#include <cam/ctl/ctl_debug.h>
+#include <cam/ctl/ctl_scsi_all.h>
+#include <cam/ctl/ctl_error.h>
+
+struct ctl_softc *control_softc = NULL;
+
+/*
+ * The default is to run with CTL_DONE_THREAD turned on. Completed
+ * transactions are queued for processing by the CTL work thread. When
+ * CTL_DONE_THREAD is not defined, completed transactions are processed in
+ * the caller's context.
+ */
+#define CTL_DONE_THREAD
+
+/*
+ * * Use the serial number and device ID provided by the backend, rather than
+ * * making up our own.
+ * */
+#define CTL_USE_BACKEND_SN
+
+/*
+ * Size and alignment macros needed for Copan-specific HA hardware. These
+ * can go away when the HA code is re-written, and uses busdma for any
+ * hardware.
+ */
+#define CTL_ALIGN_8B(target, source, type) \
+ if (((uint32_t)source & 0x7) != 0) \
+ target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\
+ else \
+ target = (type)source;
+
+#define CTL_SIZE_8B(target, size) \
+ if ((size & 0x7) != 0) \
+ target = size + (0x8 - (size & 0x7)); \
+ else \
+ target = size;
+
+#define CTL_ALIGN_8B_MARGIN 16
+
+/*
+ * Template mode pages.
+ */
+
+/*
+ * Note that these are default values only. The actual values will be
+ * filled in when the user does a mode sense.
+ */
+static struct copan_power_subpage power_page_default = {
+ /*page_code*/ PWR_PAGE_CODE | SMPH_SPF,
+ /*subpage*/ PWR_SUBPAGE_CODE,
+ /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00,
+ (sizeof(struct copan_power_subpage) - 4) & 0x00ff},
+ /*page_version*/ PWR_VERSION,
+ /* total_luns */ 26,
+ /* max_active_luns*/ PWR_DFLT_MAX_LUNS,
+ /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0}
+};
+
+static struct copan_power_subpage power_page_changeable = {
+ /*page_code*/ PWR_PAGE_CODE | SMPH_SPF,
+ /*subpage*/ PWR_SUBPAGE_CODE,
+ /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00,
+ (sizeof(struct copan_power_subpage) - 4) & 0x00ff},
+ /*page_version*/ 0,
+ /* total_luns */ 0,
+ /* max_active_luns*/ 0,
+ /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0}
+};
+
+static struct copan_aps_subpage aps_page_default = {
+ APS_PAGE_CODE | SMPH_SPF, //page_code
+ APS_SUBPAGE_CODE, //subpage
+ {(sizeof(struct copan_aps_subpage) - 4) & 0xff00,
+ (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length
+ APS_VERSION, //page_version
+ 0, //lock_active
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0} //reserved
+};
+
+static struct copan_aps_subpage aps_page_changeable = {
+ APS_PAGE_CODE | SMPH_SPF, //page_code
+ APS_SUBPAGE_CODE, //subpage
+ {(sizeof(struct copan_aps_subpage) - 4) & 0xff00,
+ (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length
+ 0, //page_version
+ 0, //lock_active
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0} //reserved
+};
+
+static struct copan_debugconf_subpage debugconf_page_default = {
+ DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */
+ DBGCNF_SUBPAGE_CODE, /* subpage */
+ {(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
+ (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
+ DBGCNF_VERSION, /* page_version */
+ {CTL_TIME_IO_DEFAULT_SECS>>8,
+ CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */
+};
+
+static struct copan_debugconf_subpage debugconf_page_changeable = {
+ DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */
+ DBGCNF_SUBPAGE_CODE, /* subpage */
+ {(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
+ (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
+ 0, /* page_version */
+ {0xff,0xff}, /* ctl_time_io_secs */
+};
+
+static struct scsi_format_page format_page_default = {
+ /*page_code*/SMS_FORMAT_DEVICE_PAGE,
+ /*page_length*/sizeof(struct scsi_format_page) - 2,
+ /*tracks_per_zone*/ {0, 0},
+ /*alt_sectors_per_zone*/ {0, 0},
+ /*alt_tracks_per_zone*/ {0, 0},
+ /*alt_tracks_per_lun*/ {0, 0},
+ /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff,
+ CTL_DEFAULT_SECTORS_PER_TRACK & 0xff},
+ /*bytes_per_sector*/ {0, 0},
+ /*interleave*/ {0, 0},
+ /*track_skew*/ {0, 0},
+ /*cylinder_skew*/ {0, 0},
+ /*flags*/ SFP_HSEC,
+ /*reserved*/ {0, 0, 0}
+};
+
+static struct scsi_format_page format_page_changeable = {
+ /*page_code*/SMS_FORMAT_DEVICE_PAGE,
+ /*page_length*/sizeof(struct scsi_format_page) - 2,
+ /*tracks_per_zone*/ {0, 0},
+ /*alt_sectors_per_zone*/ {0, 0},
+ /*alt_tracks_per_zone*/ {0, 0},
+ /*alt_tracks_per_lun*/ {0, 0},
+ /*sectors_per_track*/ {0, 0},
+ /*bytes_per_sector*/ {0, 0},
+ /*interleave*/ {0, 0},
+ /*track_skew*/ {0, 0},
+ /*cylinder_skew*/ {0, 0},
+ /*flags*/ 0,
+ /*reserved*/ {0, 0, 0}
+};
+
+static struct scsi_rigid_disk_page rigid_disk_page_default = {
+ /*page_code*/SMS_RIGID_DISK_PAGE,
+ /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
+ /*cylinders*/ {0, 0, 0},
+ /*heads*/ CTL_DEFAULT_HEADS,
+ /*start_write_precomp*/ {0, 0, 0},
+ /*start_reduced_current*/ {0, 0, 0},
+ /*step_rate*/ {0, 0},
+ /*landing_zone_cylinder*/ {0, 0, 0},
+ /*rpl*/ SRDP_RPL_DISABLED,
+ /*rotational_offset*/ 0,
+ /*reserved1*/ 0,
+ /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff,
+ CTL_DEFAULT_ROTATION_RATE & 0xff},
+ /*reserved2*/ {0, 0}
+};
+
+static struct scsi_rigid_disk_page rigid_disk_page_changeable = {
+ /*page_code*/SMS_RIGID_DISK_PAGE,
+ /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
+ /*cylinders*/ {0, 0, 0},
+ /*heads*/ 0,
+ /*start_write_precomp*/ {0, 0, 0},
+ /*start_reduced_current*/ {0, 0, 0},
+ /*step_rate*/ {0, 0},
+ /*landing_zone_cylinder*/ {0, 0, 0},
+ /*rpl*/ 0,
+ /*rotational_offset*/ 0,
+ /*reserved1*/ 0,
+ /*rotation_rate*/ {0, 0},
+ /*reserved2*/ {0, 0}
+};
+
+static struct scsi_caching_page caching_page_default = {
+ /*page_code*/SMS_CACHING_PAGE,
+ /*page_length*/sizeof(struct scsi_caching_page) - 2,
+ /*flags1*/ SCP_DISC | SCP_WCE,
+ /*ret_priority*/ 0,
+ /*disable_pf_transfer_len*/ {0xff, 0xff},
+ /*min_prefetch*/ {0, 0},
+ /*max_prefetch*/ {0xff, 0xff},
+ /*max_pf_ceiling*/ {0xff, 0xff},
+ /*flags2*/ 0,
+ /*cache_segments*/ 0,
+ /*cache_seg_size*/ {0, 0},
+ /*reserved*/ 0,
+ /*non_cache_seg_size*/ {0, 0, 0}
+};
+
+static struct scsi_caching_page caching_page_changeable = {
+ /*page_code*/SMS_CACHING_PAGE,
+ /*page_length*/sizeof(struct scsi_caching_page) - 2,
+ /*flags1*/ 0,
+ /*ret_priority*/ 0,
+ /*disable_pf_transfer_len*/ {0, 0},
+ /*min_prefetch*/ {0, 0},
+ /*max_prefetch*/ {0, 0},
+ /*max_pf_ceiling*/ {0, 0},
+ /*flags2*/ 0,
+ /*cache_segments*/ 0,
+ /*cache_seg_size*/ {0, 0},
+ /*reserved*/ 0,
+ /*non_cache_seg_size*/ {0, 0, 0}
+};
+
+static struct scsi_control_page control_page_default = {
+ /*page_code*/SMS_CONTROL_MODE_PAGE,
+ /*page_length*/sizeof(struct scsi_control_page) - 2,
+ /*rlec*/0,
+ /*queue_flags*/0,
+ /*eca_and_aen*/0,
+ /*reserved*/0,
+ /*aen_holdoff_period*/{0, 0}
+};
+
+static struct scsi_control_page control_page_changeable = {
+ /*page_code*/SMS_CONTROL_MODE_PAGE,
+ /*page_length*/sizeof(struct scsi_control_page) - 2,
+ /*rlec*/SCP_DSENSE,
+ /*queue_flags*/0,
+ /*eca_and_aen*/0,
+ /*reserved*/0,
+ /*aen_holdoff_period*/{0, 0}
+};
+
+SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
+
+/*
+ * XXX KDM move these into the softc.
+ */
+static int rcv_sync_msg;
+static int persis_offset;
+static uint8_t ctl_pause_rtr;
+static int ctl_is_single;
+static int index_to_aps_page;
+
+
+/*
+ * Serial number (0x80), device id (0x83), and supported pages (0x00)
+ */
+#define SCSI_EVPD_NUM_SUPPORTED_PAGES 3
+
+static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
+ int param);
+static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest);
+static void ctl_init(void);
+void ctl_shutdown(void);
+static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
+static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td);
+static void ctl_ioctl_online(void *arg);
+static void ctl_ioctl_offline(void *arg);
+static int ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id);
+static int ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id);
+static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id);
+static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id);
+static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio);
+static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock);
+static int ctl_ioctl_submit_wait(union ctl_io *io);
+static void ctl_ioctl_datamove(union ctl_io *io);
+static void ctl_ioctl_done(union ctl_io *io);
+static void ctl_ioctl_hard_startstop_callback(void *arg,
+ struct cfi_metatask *metatask);
+static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask);
+static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
+ struct ctl_ooa *ooa_hdr);
+static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+ struct thread *td);
+uint32_t ctl_get_resindex(struct ctl_nexus *nexus);
+uint32_t ctl_port_idx(int port_num);
+#ifdef unused
+static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port,
+ uint32_t targ_target, uint32_t targ_lun,
+ int can_wait);
+static void ctl_kfree_io(union ctl_io *io);
+#endif /* unused */
+static void ctl_free_io_internal(union ctl_io *io, int have_lock);
+static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
+ struct ctl_be_lun *be_lun, struct ctl_id target_id);
+static int ctl_free_lun(struct ctl_lun *lun);
+static void ctl_create_lun(struct ctl_be_lun *be_lun);
+/**
+static void ctl_failover_change_pages(struct ctl_softc *softc,
+ struct ctl_scsiio *ctsio, int master);
+**/
+
+static int ctl_do_mode_select(union ctl_io *io);
+static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
+ uint64_t res_key, uint64_t sa_res_key,
+ uint8_t type, uint32_t residx,
+ struct ctl_scsiio *ctsio,
+ struct scsi_per_res_out *cdb,
+ struct scsi_per_res_out_parms* param);
+static void ctl_pro_preempt_other(struct ctl_lun *lun,
+ union ctl_ha_msg *msg);
+static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg);
+static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
+static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
+static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
+static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio);
+static int ctl_inquiry_std(struct ctl_scsiio *ctsio);
+static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len);
+static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2);
+static ctl_action ctl_check_for_blockage(union ctl_io *pending_io,
+ union ctl_io *ooa_io);
+static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
+ union ctl_io *starting_io);
+static int ctl_check_blocked(struct ctl_lun *lun);
+static int ctl_scsiio_lun_check(struct ctl_softc *ctl_softc,
+ struct ctl_lun *lun,
+ struct ctl_cmd_entry *entry,
+ struct ctl_scsiio *ctsio);
+//static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc);
+static void ctl_failover(void);
+static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc,
+ struct ctl_scsiio *ctsio);
+static int ctl_scsiio(struct ctl_scsiio *ctsio);
+
+static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
+static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
+ ctl_ua_type ua_type);
+static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io,
+ ctl_ua_type ua_type);
+static int ctl_abort_task(union ctl_io *io);
+static void ctl_run_task_queue(struct ctl_softc *ctl_softc);
+#ifdef CTL_IO_DELAY
+static void ctl_datamove_timer_wakeup(void *arg);
+static void ctl_done_timer_wakeup(void *arg);
+#endif /* CTL_IO_DELAY */
+
+static void ctl_send_datamove_done(union ctl_io *io, int have_lock);
+static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq);
+static int ctl_datamove_remote_dm_write_cb(union ctl_io *io);
+static void ctl_datamove_remote_write(union ctl_io *io);
+static int ctl_datamove_remote_dm_read_cb(union ctl_io *io);
+static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq);
+static int ctl_datamove_remote_sgl_setup(union ctl_io *io);
+static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
+ ctl_ha_dt_cb callback);
+static void ctl_datamove_remote_read(union ctl_io *io);
+static void ctl_datamove_remote(union ctl_io *io);
+static int ctl_process_done(union ctl_io *io, int have_lock);
+static void ctl_work_thread(void *arg);
+
+/*
+ * Load the serialization table. This isn't very pretty, but is probably
+ * the easiest way to do it.
+ */
+#include "ctl_ser_table.c"
+
+/*
+ * We only need to define open, close and ioctl routines for this driver.
+ */
+static struct cdevsw ctl_cdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = 0,
+ .d_open = ctl_open,
+ .d_close = ctl_close,
+ .d_ioctl = ctl_ioctl,
+ .d_name = "ctl",
+};
+
+
+MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL");
+
+/*
+ * If we have the CAM SIM, we may or may not have another SIM that will
+ * cause CTL to get initialized. If not, we need to initialize it.
+ */
+SYSINIT(ctl_init, SI_SUB_CONFIGURE, SI_ORDER_THIRD, ctl_init, NULL);
+
+static void
+ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
+ union ctl_ha_msg *msg_info)
+{
+ struct ctl_scsiio *ctsio;
+
+ if (msg_info->hdr.original_sc == NULL) {
+ printf("%s: original_sc == NULL!\n", __func__);
+ /* XXX KDM now what? */
+ return;
+ }
+
+ ctsio = &msg_info->hdr.original_sc->scsiio;
+ ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
+ ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
+ ctsio->io_hdr.status = msg_info->hdr.status;
+ ctsio->scsi_status = msg_info->scsi.scsi_status;
+ ctsio->sense_len = msg_info->scsi.sense_len;
+ ctsio->sense_residual = msg_info->scsi.sense_residual;
+ ctsio->residual = msg_info->scsi.residual;
+ memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
+ sizeof(ctsio->sense_data));
+ memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
+ &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen));;
+ STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links);
+ ctl_wakeup_thread();
+}
+
+static void
+ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc,
+ union ctl_ha_msg *msg_info)
+{
+ struct ctl_scsiio *ctsio;
+
+ if (msg_info->hdr.serializing_sc == NULL) {
+ printf("%s: serializing_sc == NULL!\n", __func__);
+ /* XXX KDM now what? */
+ return;
+ }
+
+ ctsio = &msg_info->hdr.serializing_sc->scsiio;
+#if 0
+ /*
+ * Attempt to catch the situation where an I/O has
+ * been freed, and we're using it again.
+ */
+ if (ctsio->io_hdr.io_type == 0xff) {
+ union ctl_io *tmp_io;
+ tmp_io = (union ctl_io *)ctsio;
+ printf("%s: %p use after free!\n", __func__,
+ ctsio);
+ printf("%s: type %d msg %d cdb %x iptl: "
+ "%d:%d:%d:%d tag 0x%04x "
+ "flag %#x status %x\n",
+ __func__,
+ tmp_io->io_hdr.io_type,
+ tmp_io->io_hdr.msg_type,
+ tmp_io->scsiio.cdb[0],
+ tmp_io->io_hdr.nexus.initid.id,
+ tmp_io->io_hdr.nexus.targ_port,
+ tmp_io->io_hdr.nexus.targ_target.id,
+ tmp_io->io_hdr.nexus.targ_lun,
+ (tmp_io->io_hdr.io_type ==
+ CTL_IO_TASK) ?
+ tmp_io->taskio.tag_num :
+ tmp_io->scsiio.tag_num,
+ tmp_io->io_hdr.flags,
+ tmp_io->io_hdr.status);
+ }
+#endif
+ ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
+ STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links);
+ ctl_wakeup_thread();
+}
+
+/*
+ * ISC (Inter Shelf Communication) event handler. Events from the HA
+ * subsystem come in here.
+ */
+static void
+ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
+{
+ struct ctl_softc *ctl_softc;
+ union ctl_io *io;
+ struct ctl_prio *presio;
+ ctl_ha_status isc_status;
+
+ ctl_softc = control_softc;
+ io = NULL;
+
+
+#if 0
+ printf("CTL: Isc Msg event %d\n", event);
+#endif
+ if (event == CTL_HA_EVT_MSG_RECV) {
+ union ctl_ha_msg msg_info;
+
+ isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info), /*wait*/ 0);
+#if 0
+ printf("CTL: msg_type %d\n", msg_info.msg_type);
+#endif
+ if (isc_status != 0) {
+ printf("Error receiving message, status = %d\n",
+ isc_status);
+ return;
+ }
+ mtx_lock(&ctl_softc->ctl_lock);
+
+ switch (msg_info.hdr.msg_type) {
+ case CTL_MSG_SERIALIZE:
+#if 0
+ printf("Serialize\n");
+#endif
+ io = ctl_alloc_io((void *)ctl_softc->othersc_pool);
+ if (io == NULL) {
+ printf("ctl_isc_event_handler: can't allocate "
+ "ctl_io!\n");
+ /* Bad Juju */
+ /* Need to set busy and send msg back */
+ mtx_unlock(&ctl_softc->ctl_lock);
+ msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
+ msg_info.hdr.status = CTL_SCSI_ERROR;
+ msg_info.scsi.scsi_status = SCSI_STATUS_BUSY;
+ msg_info.scsi.sense_len = 0;
+ if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){
+ }
+ goto bailout;
+ }
+ ctl_zero_io(io);
+ // populate ctsio from msg_info
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ io->io_hdr.msg_type = CTL_MSG_SERIALIZE;
+ io->io_hdr.original_sc = msg_info.hdr.original_sc;
+#if 0
+ printf("pOrig %x\n", (int)msg_info.original_sc);
+#endif
+ io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC |
+ CTL_FLAG_IO_ACTIVE;
+ /*
+ * If we're in serialization-only mode, we don't
+ * want to go through full done processing. Thus
+ * the COPY flag.
+ *
+ * XXX KDM add another flag that is more specific.
+ */
+ if (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)
+ io->io_hdr.flags |= CTL_FLAG_INT_COPY;
+ io->io_hdr.nexus = msg_info.hdr.nexus;
+#if 0
+ printf("targ %d, port %d, iid %d, lun %d\n",
+ io->io_hdr.nexus.targ_target.id,
+ io->io_hdr.nexus.targ_port,
+ io->io_hdr.nexus.initid.id,
+ io->io_hdr.nexus.targ_lun);
+#endif
+ io->scsiio.tag_num = msg_info.scsi.tag_num;
+ io->scsiio.tag_type = msg_info.scsi.tag_type;
+ memcpy(io->scsiio.cdb, msg_info.scsi.cdb,
+ CTL_MAX_CDBLEN);
+ if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
+ struct ctl_cmd_entry *entry;
+ uint8_t opcode;
+
+ opcode = io->scsiio.cdb[0];
+ entry = &ctl_cmd_table[opcode];
+ io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
+ io->io_hdr.flags |=
+ entry->flags & CTL_FLAG_DATA_MASK;
+ }
+ STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
+ &io->io_hdr, links);
+ ctl_wakeup_thread();
+ break;
+
+ /* Performed on the Originating SC, XFER mode only */
+ case CTL_MSG_DATAMOVE: {
+ struct ctl_sg_entry *sgl;
+ int i, j;
+
+ io = msg_info.hdr.original_sc;
+ if (io == NULL) {
+ printf("%s: original_sc == NULL!\n", __func__);
+ /* XXX KDM do something here */
+ break;
+ }
+ io->io_hdr.msg_type = CTL_MSG_DATAMOVE;
+ io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
+ /*
+ * Keep track of this, we need to send it back over
+ * when the datamove is complete.
+ */
+ io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
+
+ if (msg_info.dt.sg_sequence == 0) {
+ /*
+ * XXX KDM we use the preallocated S/G list
+ * here, but we'll need to change this to
+ * dynamic allocation if we need larger S/G
+ * lists.
+ */
+ if (msg_info.dt.kern_sg_entries >
+ sizeof(io->io_hdr.remote_sglist) /
+ sizeof(io->io_hdr.remote_sglist[0])) {
+ printf("%s: number of S/G entries "
+ "needed %u > allocated num %zd\n",
+ __func__,
+ msg_info.dt.kern_sg_entries,
+ sizeof(io->io_hdr.remote_sglist)/
+ sizeof(io->io_hdr.remote_sglist[0]));
+
+ /*
+ * XXX KDM send a message back to
+ * the other side to shut down the
+ * DMA. The error will come back
+ * through via the normal channel.
+ */
+ break;
+ }
+ sgl = io->io_hdr.remote_sglist;
+ memset(sgl, 0,
+ sizeof(io->io_hdr.remote_sglist));
+
+ io->scsiio.kern_data_ptr = (uint8_t *)sgl;
+
+ io->scsiio.kern_sg_entries =
+ msg_info.dt.kern_sg_entries;
+ io->scsiio.rem_sg_entries =
+ msg_info.dt.kern_sg_entries;
+ io->scsiio.kern_data_len =
+ msg_info.dt.kern_data_len;
+ io->scsiio.kern_total_len =
+ msg_info.dt.kern_total_len;
+ io->scsiio.kern_data_resid =
+ msg_info.dt.kern_data_resid;
+ io->scsiio.kern_rel_offset =
+ msg_info.dt.kern_rel_offset;
+ /*
+ * Clear out per-DMA flags.
+ */
+ io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK;
+ /*
+ * Add per-DMA flags that are set for this
+ * particular DMA request.
+ */
+ io->io_hdr.flags |= msg_info.dt.flags &
+ CTL_FLAG_RDMA_MASK;
+ } else
+ sgl = (struct ctl_sg_entry *)
+ io->scsiio.kern_data_ptr;
+
+ for (i = msg_info.dt.sent_sg_entries, j = 0;
+ i < (msg_info.dt.sent_sg_entries +
+ msg_info.dt.cur_sg_entries); i++, j++) {
+ sgl[i].addr = msg_info.dt.sg_list[j].addr;
+ sgl[i].len = msg_info.dt.sg_list[j].len;
+
+#if 0
+ printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n",
+ __func__,
+ msg_info.dt.sg_list[j].addr,
+ msg_info.dt.sg_list[j].len,
+ sgl[i].addr, sgl[i].len, j, i);
+#endif
+ }
+#if 0
+ memcpy(&sgl[msg_info.dt.sent_sg_entries],
+ msg_info.dt.sg_list,
+ sizeof(*sgl) * msg_info.dt.cur_sg_entries);
+#endif
+
+ /*
+ * If this is the last piece of the I/O, we've got
+ * the full S/G list. Queue processing in the thread.
+ * Otherwise wait for the next piece.
+ */
+ if (msg_info.dt.sg_last != 0) {
+ STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
+ &io->io_hdr, links);
+ ctl_wakeup_thread();
+ }
+ break;
+ }
+ /* Performed on the Serializing (primary) SC, XFER mode only */
+ case CTL_MSG_DATAMOVE_DONE: {
+ if (msg_info.hdr.serializing_sc == NULL) {
+ printf("%s: serializing_sc == NULL!\n",
+ __func__);
+ /* XXX KDM now what? */
+ break;
+ }
+ /*
+ * We grab the sense information here in case
+ * there was a failure, so we can return status
+ * back to the initiator.
+ */
+ io = msg_info.hdr.serializing_sc;
+ io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
+ io->io_hdr.status = msg_info.hdr.status;
+ io->scsiio.scsi_status = msg_info.scsi.scsi_status;
+ io->scsiio.sense_len = msg_info.scsi.sense_len;
+ io->scsiio.sense_residual =msg_info.scsi.sense_residual;
+ io->io_hdr.port_status = msg_info.scsi.fetd_status;
+ io->scsiio.residual = msg_info.scsi.residual;
+ memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data,
+ sizeof(io->scsiio.sense_data));
+
+ STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
+ &io->io_hdr, links);
+ ctl_wakeup_thread();
+ break;
+ }
+
+ /* Preformed on Originating SC, SER_ONLY mode */
+ case CTL_MSG_R2R:
+ io = msg_info.hdr.original_sc;
+ if (io == NULL) {
+ printf("%s: Major Bummer\n", __func__);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return;
+ } else {
+#if 0
+ printf("pOrig %x\n",(int) ctsio);
+#endif
+ }
+ io->io_hdr.msg_type = CTL_MSG_R2R;
+ io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
+ STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
+ &io->io_hdr, links);
+ ctl_wakeup_thread();
+ break;
+
+ /*
+ * Performed on Serializing(i.e. primary SC) SC in SER_ONLY
+ * mode.
+ * Performed on the Originating (i.e. secondary) SC in XFER
+ * mode
+ */
+ case CTL_MSG_FINISH_IO:
+ if (ctl_softc->ha_mode == CTL_HA_MODE_XFER)
+ ctl_isc_handler_finish_xfer(ctl_softc,
+ &msg_info);
+ else
+ ctl_isc_handler_finish_ser_only(ctl_softc,
+ &msg_info);
+ break;
+
+ /* Preformed on Originating SC */
+ case CTL_MSG_BAD_JUJU:
+ io = msg_info.hdr.original_sc;
+ if (io == NULL) {
+ printf("%s: Bad JUJU!, original_sc is NULL!\n",
+ __func__);
+ break;
+ }
+ ctl_copy_sense_data(&msg_info, io);
+ /*
+ * IO should have already been cleaned up on other
+ * SC so clear this flag so we won't send a message
+ * back to finish the IO there.
+ */
+ io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
+ io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
+
+ /* io = msg_info.hdr.serializing_sc; */
+ io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
+ STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
+ &io->io_hdr, links);
+ ctl_wakeup_thread();
+ break;
+
+ /* Handle resets sent from the other side */
+ case CTL_MSG_MANAGE_TASKS: {
+ struct ctl_taskio *taskio;
+ taskio = (struct ctl_taskio *)ctl_alloc_io(
+ (void *)ctl_softc->othersc_pool);
+ if (taskio == NULL) {
+ printf("ctl_isc_event_handler: can't allocate "
+ "ctl_io!\n");
+ /* Bad Juju */
+ /* should I just call the proper reset func
+ here??? */
+ mtx_unlock(&ctl_softc->ctl_lock);
+ goto bailout;
+ }
+ ctl_zero_io((union ctl_io *)taskio);
+ taskio->io_hdr.io_type = CTL_IO_TASK;
+ taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
+ taskio->io_hdr.nexus = msg_info.hdr.nexus;
+ taskio->task_action = msg_info.task.task_action;
+ taskio->tag_num = msg_info.task.tag_num;
+ taskio->tag_type = msg_info.task.tag_type;
+#ifdef CTL_TIME_IO
+ taskio->io_hdr.start_time = time_uptime;
+ getbintime(&taskio->io_hdr.start_bt);
+#if 0
+ cs_prof_gettime(&taskio->io_hdr.start_ticks);
+#endif
+#endif /* CTL_TIME_IO */
+ STAILQ_INSERT_TAIL(&ctl_softc->task_queue,
+ &taskio->io_hdr, links);
+ ctl_softc->flags |= CTL_FLAG_TASK_PENDING;
+ ctl_wakeup_thread();
+ break;
+ }
+ /* Persistent Reserve action which needs attention */
+ case CTL_MSG_PERS_ACTION:
+ presio = (struct ctl_prio *)ctl_alloc_io(
+ (void *)ctl_softc->othersc_pool);
+ if (presio == NULL) {
+ printf("ctl_isc_event_handler: can't allocate "
+ "ctl_io!\n");
+ /* Bad Juju */
+ /* Need to set busy and send msg back */
+ mtx_unlock(&ctl_softc->ctl_lock);
+ goto bailout;
+ }
+ ctl_zero_io((union ctl_io *)presio);
+ presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
+ presio->pr_msg = msg_info.pr;
+ STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
+ &presio->io_hdr, links);
+ ctl_wakeup_thread();
+ break;
+ case CTL_MSG_SYNC_FE:
+ rcv_sync_msg = 1;
+ break;
+ case CTL_MSG_APS_LOCK: {
+ // It's quicker to execute this then to
+ // queue it.
+ struct ctl_lun *lun;
+ struct ctl_page_index *page_index;
+ struct copan_aps_subpage *current_sp;
+
+ lun = ctl_softc->ctl_luns[msg_info.hdr.nexus.targ_lun];
+ page_index = &lun->mode_pages.index[index_to_aps_page];
+ current_sp = (struct copan_aps_subpage *)
+ (page_index->page_data +
+ (page_index->page_len * CTL_PAGE_CURRENT));
+
+ current_sp->lock_active = msg_info.aps.lock_flag;
+ break;
+ }
+ default:
+ printf("How did I get here?\n");
+ }
+ mtx_unlock(&ctl_softc->ctl_lock);
+ } else if (event == CTL_HA_EVT_MSG_SENT) {
+ if (param != CTL_HA_STATUS_SUCCESS) {
+ printf("Bad status from ctl_ha_msg_send status %d\n",
+ param);
+ }
+ return;
+ } else if (event == CTL_HA_EVT_DISCONNECT) {
+ printf("CTL: Got a disconnect from Isc\n");
+ return;
+ } else {
+ printf("ctl_isc_event_handler: Unknown event %d\n", event);
+ return;
+ }
+
+bailout:
+ return;
+}
+
+static void
+ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest)
+{
+ struct scsi_sense_data *sense;
+
+ sense = &dest->scsiio.sense_data;
+ bcopy(&src->scsi.sense_data, sense, sizeof(*sense));
+ dest->scsiio.scsi_status = src->scsi.scsi_status;
+ dest->scsiio.sense_len = src->scsi.sense_len;
+ dest->io_hdr.status = src->hdr.status;
+}
+
+static void
+ctl_init(void)
+{
+ struct ctl_softc *softc;
+ struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool;
+ struct ctl_frontend *fe;
+ struct ctl_lun *lun;
+ uint8_t sc_id =0;
+#if 0
+ int i;
+#endif
+ int retval;
+ //int isc_retval;
+
+ retval = 0;
+ ctl_pause_rtr = 0;
+ rcv_sync_msg = 0;
+
+ control_softc = malloc(sizeof(*control_softc), M_DEVBUF, M_WAITOK);
+ softc = control_softc;
+
+ memset(softc, 0, sizeof(*softc));
+
+ softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600,
+ "cam/ctl");
+
+ softc->dev->si_drv1 = softc;
+
+ mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
+ softc->open_count = 0;
+
+ /*
+ * Default to actually sending a SYNCHRONIZE CACHE command down to
+ * the drive.
+ */
+ softc->flags = CTL_FLAG_REAL_SYNC;
+
+ /*
+ * In Copan's HA scheme, the "master" and "slave" roles are
+ * figured out through the slot the controller is in. Although it
+ * is an active/active system, someone has to be in charge.
+ */
+#ifdef NEEDTOPORT
+ scmicro_rw(SCMICRO_GET_SHELF_ID, &sc_id);
+#endif
+
+ if (sc_id == 0) {
+ softc->flags |= CTL_FLAG_MASTER_SHELF;
+ persis_offset = 0;
+ } else
+ persis_offset = CTL_MAX_INITIATORS;
+
+ /*
+ * XXX KDM need to figure out where we want to get our target ID
+ * and WWID. Is it different on each port?
+ */
+ softc->target.id = 0;
+ softc->target.wwid[0] = 0x12345678;
+ softc->target.wwid[1] = 0x87654321;
+ STAILQ_INIT(&softc->lun_list);
+ STAILQ_INIT(&softc->pending_lun_queue);
+ STAILQ_INIT(&softc->task_queue);
+ STAILQ_INIT(&softc->incoming_queue);
+ STAILQ_INIT(&softc->rtr_queue);
+ STAILQ_INIT(&softc->done_queue);
+ STAILQ_INIT(&softc->isc_queue);
+ STAILQ_INIT(&softc->fe_list);
+ STAILQ_INIT(&softc->be_list);
+ STAILQ_INIT(&softc->io_pools);
+
+ lun = &softc->lun;
+
+ /*
+ * We don't bother calling these with ctl_lock held here, because,
+ * in theory, no one else can try to do anything while we're in our
+ * module init routine.
+ */
+ if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL,
+ &internal_pool)!= 0){
+ printf("ctl: can't allocate %d entry internal pool, "
+ "exiting\n", CTL_POOL_ENTRIES_INTERNAL);
+ return;
+ }
+
+ if (ctl_pool_create(softc, CTL_POOL_EMERGENCY,
+ CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) {
+ printf("ctl: can't allocate %d entry emergency pool, "
+ "exiting\n", CTL_POOL_ENTRIES_EMERGENCY);
+ ctl_pool_free(softc, internal_pool);
+ return;
+ }
+
+ if (ctl_pool_create(softc, CTL_POOL_4OTHERSC, CTL_POOL_ENTRIES_OTHER_SC,
+ &other_pool) != 0)
+ {
+ printf("ctl: can't allocate %d entry other SC pool, "
+ "exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
+ ctl_pool_free(softc, internal_pool);
+ ctl_pool_free(softc, emergency_pool);
+ return;
+ }
+
+ softc->internal_pool = internal_pool;
+ softc->emergency_pool = emergency_pool;
+ softc->othersc_pool = other_pool;
+
+ ctl_pool_acquire(internal_pool);
+ ctl_pool_acquire(emergency_pool);
+ ctl_pool_acquire(other_pool);
+
+ /*
+ * We used to allocate a processor LUN here. The new scheme is to
+ * just let the user allocate LUNs as he sees fit.
+ */
+#if 0
+ mtx_lock(&softc->ctl_lock);
+ ctl_alloc_lun(softc, lun, /*be_lun*/NULL, /*target*/softc->target);
+ mtx_unlock(&softc->ctl_lock);
+#endif
+
+ if (kproc_create(ctl_work_thread, softc, &softc->work_thread, 0, 0,
+ "ctl_thrd") != 0) {
+ printf("error creating CTL work thread!\n");
+ ctl_free_lun(lun);
+ ctl_pool_free(softc, internal_pool);
+ ctl_pool_free(softc, emergency_pool);
+ ctl_pool_free(softc, other_pool);
+ return;
+ }
+ printf("ctl: CAM Target Layer loaded\n");
+
+ /*
+ * Initialize the initiator and portname mappings
+ */
+ memset(softc->wwpn_iid, 0, sizeof(softc->wwpn_iid));
+
+ /*
+ * Initialize the ioctl front end.
+ */
+ fe = &softc->ioctl_info.fe;
+ sprintf(softc->ioctl_info.port_name, "CTL ioctl");
+ fe->port_type = CTL_PORT_IOCTL;
+ fe->num_requested_ctl_io = 100;
+ fe->port_name = softc->ioctl_info.port_name;
+ fe->port_online = ctl_ioctl_online;
+ fe->port_offline = ctl_ioctl_offline;
+ fe->onoff_arg = &softc->ioctl_info;
+ fe->targ_enable = ctl_ioctl_targ_enable;
+ fe->targ_disable = ctl_ioctl_targ_disable;
+ fe->lun_enable = ctl_ioctl_lun_enable;
+ fe->lun_disable = ctl_ioctl_lun_disable;
+ fe->targ_lun_arg = &softc->ioctl_info;
+ fe->fe_datamove = ctl_ioctl_datamove;
+ fe->fe_done = ctl_ioctl_done;
+ fe->max_targets = 15;
+ fe->max_target_id = 15;
+
+ if (ctl_frontend_register(&softc->ioctl_info.fe,
+ (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0) {
+ printf("ctl: ioctl front end registration failed, will "
+ "continue anyway\n");
+ }
+
+#ifdef CTL_IO_DELAY
+ if (sizeof(struct callout) > CTL_TIMER_BYTES) {
+ printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n",
+ sizeof(struct callout), CTL_TIMER_BYTES);
+ return;
+ }
+#endif /* CTL_IO_DELAY */
+
+}
+
+void
+ctl_shutdown(void)
+{
+ struct ctl_softc *softc;
+ struct ctl_lun *lun, *next_lun;
+ struct ctl_io_pool *pool, *next_pool;
+
+ softc = (struct ctl_softc *)control_softc;
+
+ if (ctl_frontend_deregister(&softc->ioctl_info.fe) != 0)
+ printf("ctl: ioctl front end deregistration failed\n");
+
+ mtx_lock(&softc->ctl_lock);
+
+ /*
+ * Free up each LUN.
+ */
+ for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
+ next_lun = STAILQ_NEXT(lun, links);
+ ctl_free_lun(lun);
+ }
+
+ /*
+ * This will rip the rug out from under any FETDs or anyone else
+ * that has a pool allocated. Since we increment our module
+ * refcount any time someone outside the main CTL module allocates
+ * a pool, we shouldn't have any problems here. The user won't be
+ * able to unload the CTL module until client modules have
+ * successfully unloaded.
+ */
+ for (pool = STAILQ_FIRST(&softc->io_pools); pool != NULL;
+ pool = next_pool) {
+ next_pool = STAILQ_NEXT(pool, links);
+ ctl_pool_free(softc, pool);
+ }
+
+ mtx_unlock(&softc->ctl_lock);
+
+#if 0
+ ctl_shutdown_thread(softc->work_thread);
+#endif
+
+ mtx_destroy(&softc->ctl_lock);
+
+ destroy_dev(softc->dev);
+
+ printf("ctl: CAM Target Layer unloaded\n");
+}
+
+/*
+ * XXX KDM should we do some access checks here? Bump a reference count to
+ * prevent a CTL module from being unloaded while someone has it open?
+ */
+static int
+ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td)
+{
+ return (0);
+}
+
+static int
+ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td)
+{
+ return (0);
+}
+
+int
+ctl_port_enable(ctl_port_type port_type)
+{
+ struct ctl_softc *softc;
+ struct ctl_frontend *fe;
+
+ if (ctl_is_single == 0) {
+ union ctl_ha_msg msg_info;
+ int isc_retval;
+
+#if 0
+ printf("%s: HA mode, synchronizing frontend enable\n",
+ __func__);
+#endif
+ msg_info.hdr.msg_type = CTL_MSG_SYNC_FE;
+ if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) {
+ printf("Sync msg send error retval %d\n", isc_retval);
+ }
+ if (!rcv_sync_msg) {
+ isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info), 1);
+ }
+#if 0
+ printf("CTL:Frontend Enable\n");
+ } else {
+ printf("%s: single mode, skipping frontend synchronization\n",
+ __func__);
+#endif
+ }
+
+ softc = control_softc;
+
+ STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ if (port_type & fe->port_type)
+ {
+#if 0
+ printf("port %d\n", fe->targ_port);
+#endif
+ ctl_frontend_online(fe);
+ }
+ }
+
+ return (0);
+}
+
+int
+ctl_port_disable(ctl_port_type port_type)
+{
+ struct ctl_softc *softc;
+ struct ctl_frontend *fe;
+
+ softc = control_softc;
+
+ STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ if (port_type & fe->port_type)
+ ctl_frontend_offline(fe);
+ }
+
+ return (0);
+}
+
+/*
+ * Returns 0 for success, 1 for failure.
+ * Currently the only failure mode is if there aren't enough entries
+ * allocated. So, in case of a failure, look at num_entries_dropped,
+ * reallocate and try again.
+ */
+int
+ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
+ int *num_entries_filled, int *num_entries_dropped,
+ ctl_port_type port_type, int no_virtual)
+{
+ struct ctl_softc *softc;
+ struct ctl_frontend *fe;
+ int entries_dropped, entries_filled;
+ int retval;
+ int i;
+
+ softc = control_softc;
+
+ retval = 0;
+ entries_filled = 0;
+ entries_dropped = 0;
+
+ i = 0;
+ mtx_lock(&softc->ctl_lock);
+ STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ struct ctl_port_entry *entry;
+
+ if ((fe->port_type & port_type) == 0)
+ continue;
+
+ if ((no_virtual != 0)
+ && (fe->virtual_port != 0))
+ continue;
+
+ if (entries_filled >= num_entries_alloced) {
+ entries_dropped++;
+ continue;
+ }
+ entry = &entries[i];
+
+ entry->port_type = fe->port_type;
+ strlcpy(entry->port_name, fe->port_name,
+ sizeof(entry->port_name));
+ entry->physical_port = fe->physical_port;
+ entry->virtual_port = fe->virtual_port;
+ entry->wwnn = fe->wwnn;
+ entry->wwpn = fe->wwpn;
+
+ i++;
+ entries_filled++;
+ }
+
+ mtx_unlock(&softc->ctl_lock);
+
+ if (entries_dropped > 0)
+ retval = 1;
+
+ *num_entries_dropped = entries_dropped;
+ *num_entries_filled = entries_filled;
+
+ return (retval);
+}
+
+static void
+ctl_ioctl_online(void *arg)
+{
+ struct ctl_ioctl_info *ioctl_info;
+
+ ioctl_info = (struct ctl_ioctl_info *)arg;
+
+ ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED;
+}
+
+static void
+ctl_ioctl_offline(void *arg)
+{
+ struct ctl_ioctl_info *ioctl_info;
+
+ ioctl_info = (struct ctl_ioctl_info *)arg;
+
+ ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED;
+}
+
+/*
+ * Remove an initiator by port number and initiator ID.
+ * Returns 0 for success, 1 for failure.
+ * Assumes the caller does NOT hold the CTL lock.
+ */
+int
+ctl_remove_initiator(int32_t targ_port, uint32_t iid)
+{
+ struct ctl_softc *softc;
+
+ softc = control_softc;
+
+ if ((targ_port < 0)
+ || (targ_port > CTL_MAX_PORTS)) {
+ printf("%s: invalid port number %d\n", __func__, targ_port);
+ return (1);
+ }
+ if (iid > CTL_MAX_INIT_PER_PORT) {
+ printf("%s: initiator ID %u > maximun %u!\n",
+ __func__, iid, CTL_MAX_INIT_PER_PORT);
+ return (1);
+ }
+
+ mtx_lock(&softc->ctl_lock);
+
+ softc->wwpn_iid[targ_port][iid].in_use = 0;
+
+ mtx_unlock(&softc->ctl_lock);
+
+ return (0);
+}
+
+/*
+ * Add an initiator to the initiator map.
+ * Returns 0 for success, 1 for failure.
+ * Assumes the caller does NOT hold the CTL lock.
+ */
+int
+ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid)
+{
+ struct ctl_softc *softc;
+ int retval;
+
+ softc = control_softc;
+
+ retval = 0;
+
+ if ((targ_port < 0)
+ || (targ_port > CTL_MAX_PORTS)) {
+ printf("%s: invalid port number %d\n", __func__, targ_port);
+ return (1);
+ }
+ if (iid > CTL_MAX_INIT_PER_PORT) {
+ printf("%s: WWPN %#jx initiator ID %u > maximun %u!\n",
+ __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT);
+ return (1);
+ }
+
+ mtx_lock(&softc->ctl_lock);
+
+ if (softc->wwpn_iid[targ_port][iid].in_use != 0) {
+ /*
+ * We don't treat this as an error.
+ */
+ if (softc->wwpn_iid[targ_port][iid].wwpn == wwpn) {
+ printf("%s: port %d iid %u WWPN %#jx arrived again?\n",
+ __func__, targ_port, iid, (uintmax_t)wwpn);
+ goto bailout;
+ }
+
+ /*
+ * This is an error, but what do we do about it? The
+ * driver is telling us we have a new WWPN for this
+ * initiator ID, so we pretty much need to use it.
+ */
+ printf("%s: port %d iid %u WWPN %#jx arrived, WWPN %#jx is "
+ "still at that address\n", __func__, targ_port, iid,
+ (uintmax_t)wwpn,
+ (uintmax_t)softc->wwpn_iid[targ_port][iid].wwpn);
+
+ /*
+ * XXX KDM clear have_ca and ua_pending on each LUN for
+ * this initiator.
+ */
+ }
+ softc->wwpn_iid[targ_port][iid].in_use = 1;
+ softc->wwpn_iid[targ_port][iid].iid = iid;
+ softc->wwpn_iid[targ_port][iid].wwpn = wwpn;
+ softc->wwpn_iid[targ_port][iid].port = targ_port;
+
+bailout:
+
+ mtx_unlock(&softc->ctl_lock);
+
+ return (retval);
+}
+
+/*
+ * XXX KDM should we pretend to do something in the target/lun
+ * enable/disable functions?
+ */
+static int
+ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id)
+{
+ return (0);
+}
+
+static int
+ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id)
+{
+ return (0);
+}
+
+static int
+ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id)
+{
+ return (0);
+}
+
+static int
+ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id)
+{
+ return (0);
+}
+
+/*
+ * Data movement routine for the CTL ioctl frontend port.
+ */
+static int
+ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
+{
+ struct ctl_sg_entry *ext_sglist, *kern_sglist;
+ struct ctl_sg_entry ext_entry, kern_entry;
+ int ext_sglen, ext_sg_entries, kern_sg_entries;
+ int ext_sg_start, ext_offset;
+ int len_to_copy, len_copied;
+ int kern_watermark, ext_watermark;
+ int ext_sglist_malloced;
+ int i, j;
+
+ ext_sglist_malloced = 0;
+ ext_sg_start = 0;
+ ext_offset = 0;
+
+ CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
+
+ /*
+ * If this flag is set, fake the data transfer.
+ */
+ if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
+ ctsio->ext_data_filled = ctsio->ext_data_len;
+ goto bailout;
+ }
+
+ /*
+ * To simplify things here, if we have a single buffer, stick it in
+ * a S/G entry and just make it a single entry S/G list.
+ */
+ if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
+ int len_seen;
+
+ ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
+
+ ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
+ M_WAITOK);
+ if (ext_sglist == NULL) {
+ ctl_set_internal_failure(ctsio,
+ /*sks_valid*/ 0,
+ /*retry_count*/ 0);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ ext_sglist_malloced = 1;
+ if (copyin(ctsio->ext_data_ptr, ext_sglist,
+ ext_sglen) != 0) {
+ ctl_set_internal_failure(ctsio,
+ /*sks_valid*/ 0,
+ /*retry_count*/ 0);
+ goto bailout;
+ }
+ ext_sg_entries = ctsio->ext_sg_entries;
+ len_seen = 0;
+ for (i = 0; i < ext_sg_entries; i++) {
+ if ((len_seen + ext_sglist[i].len) >=
+ ctsio->ext_data_filled) {
+ ext_sg_start = i;
+ ext_offset = ctsio->ext_data_filled - len_seen;
+ break;
+ }
+ len_seen += ext_sglist[i].len;
+ }
+ } else {
+ ext_sglist = &ext_entry;
+ ext_sglist->addr = ctsio->ext_data_ptr;
+ ext_sglist->len = ctsio->ext_data_len;
+ ext_sg_entries = 1;
+ ext_sg_start = 0;
+ ext_offset = ctsio->ext_data_filled;
+ }
+
+ if (ctsio->kern_sg_entries > 0) {
+ kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
+ kern_sg_entries = ctsio->kern_sg_entries;
+ } else {
+ kern_sglist = &kern_entry;
+ kern_sglist->addr = ctsio->kern_data_ptr;
+ kern_sglist->len = ctsio->kern_data_len;
+ kern_sg_entries = 1;
+ }
+
+
+ kern_watermark = 0;
+ ext_watermark = ext_offset;
+ len_copied = 0;
+ for (i = ext_sg_start, j = 0;
+ i < ext_sg_entries && j < kern_sg_entries;) {
+ uint8_t *ext_ptr, *kern_ptr;
+
+ len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark,
+ kern_sglist[j].len - kern_watermark);
+
+ ext_ptr = (uint8_t *)ext_sglist[i].addr;
+ ext_ptr = ext_ptr + ext_watermark;
+ if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
+ /*
+ * XXX KDM fix this!
+ */
+ panic("need to implement bus address support");
+#if 0
+ kern_ptr = bus_to_virt(kern_sglist[j].addr);
+#endif
+ } else
+ kern_ptr = (uint8_t *)kern_sglist[j].addr;
+ kern_ptr = kern_ptr + kern_watermark;
+
+ kern_watermark += len_to_copy;
+ ext_watermark += len_to_copy;
+
+ if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+ CTL_FLAG_DATA_IN) {
+ CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
+ "bytes to user\n", len_to_copy));
+ CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
+ "to %p\n", kern_ptr, ext_ptr));
+ if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
+ ctl_set_internal_failure(ctsio,
+ /*sks_valid*/ 0,
+ /*retry_count*/ 0);
+ goto bailout;
+ }
+ } else {
+ CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
+ "bytes from user\n", len_to_copy));
+ CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
+ "to %p\n", ext_ptr, kern_ptr));
+ if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
+ ctl_set_internal_failure(ctsio,
+ /*sks_valid*/ 0,
+ /*retry_count*/0);
+ goto bailout;
+ }
+ }
+
+ len_copied += len_to_copy;
+
+ if (ext_sglist[i].len == ext_watermark) {
+ i++;
+ ext_watermark = 0;
+ }
+
+ if (kern_sglist[j].len == kern_watermark) {
+ j++;
+ kern_watermark = 0;
+ }
+ }
+
+ ctsio->ext_data_filled += len_copied;
+
+ CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
+ "kern_sg_entries: %d\n", ext_sg_entries,
+ kern_sg_entries));
+ CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, "
+ "kern_data_len = %d\n", ctsio->ext_data_len,
+ ctsio->kern_data_len));
+
+
+ /* XXX KDM set residual?? */
+bailout:
+
+ if (ext_sglist_malloced != 0)
+ free(ext_sglist, M_CTL);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+/*
+ * Serialize a command that went down the "wrong" side, and so was sent to
+ * this controller for execution. The logic is a little different than the
+ * standard case in ctl_scsiio_precheck(). Errors in this case need to get
+ * sent back to the other side, but in the success case, we execute the
+ * command on this side (XFER mode) or tell the other side to execute it
+ * (SER_ONLY mode).
+ */
+static int
+ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
+{
+ struct ctl_softc *ctl_softc;
+ union ctl_ha_msg msg_info;
+ struct ctl_lun *lun;
+ int retval = 0;
+
+ ctl_softc = control_softc;
+ if (have_lock == 0)
+ mtx_lock(&ctl_softc->ctl_lock);
+
+ lun = ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun];
+ if (lun==NULL)
+ {
+ /*
+ * Why isn't LUN defined? The other side wouldn't
+ * send a cmd if the LUN is undefined.
+ */
+ printf("%s: Bad JUJU!, LUN is NULL!\n", __func__);
+
+ /* "Logical unit not supported" */
+ ctl_set_sense_data(&msg_info.scsi.sense_data,
+ lun,
+ /*sense_format*/SSD_TYPE_NONE,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x25,
+ /*ascq*/ 0x00,
+ SSD_ELEM_NONE);
+
+ msg_info.scsi.sense_len = SSD_FULL_SIZE;
+ msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
+ msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+ msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
+ msg_info.hdr.serializing_sc = NULL;
+ msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
+ if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
+ }
+ if (have_lock == 0)
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return(1);
+
+ }
+
+ TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+
+ switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
+ (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq,
+ ooa_links))) {
+ case CTL_ACTION_BLOCK:
+ ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
+ TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
+ blocked_links);
+ break;
+ case CTL_ACTION_PASS:
+ case CTL_ACTION_SKIP:
+ if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
+ ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
+ STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
+ &ctsio->io_hdr, links);
+ } else {
+
+ /* send msg back to other side */
+ msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
+ msg_info.hdr.serializing_sc = (union ctl_io *)ctsio;
+ msg_info.hdr.msg_type = CTL_MSG_R2R;
+#if 0
+ printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc);
+#endif
+ if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
+ }
+ }
+ break;
+ case CTL_ACTION_OVERLAP:
+ /* OVERLAPPED COMMANDS ATTEMPTED */
+ ctl_set_sense_data(&msg_info.scsi.sense_data,
+ lun,
+ /*sense_format*/SSD_TYPE_NONE,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x4E,
+ /*ascq*/ 0x00,
+ SSD_ELEM_NONE);
+
+ msg_info.scsi.sense_len = SSD_FULL_SIZE;
+ msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
+ msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+ msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
+ msg_info.hdr.serializing_sc = NULL;
+ msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
+#if 0
+ printf("BAD JUJU:Major Bummer Overlap\n");
+#endif
+ TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+ retval = 1;
+ if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
+ }
+ break;
+ case CTL_ACTION_OVERLAP_TAG:
+ /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */
+ ctl_set_sense_data(&msg_info.scsi.sense_data,
+ lun,
+ /*sense_format*/SSD_TYPE_NONE,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x4D,
+ /*ascq*/ ctsio->tag_num & 0xff,
+ SSD_ELEM_NONE);
+
+ msg_info.scsi.sense_len = SSD_FULL_SIZE;
+ msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
+ msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+ msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
+ msg_info.hdr.serializing_sc = NULL;
+ msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
+#if 0
+ printf("BAD JUJU:Major Bummer Overlap Tag\n");
+#endif
+ TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+ retval = 1;
+ if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
+ }
+ break;
+ case CTL_ACTION_ERROR:
+ default:
+ /* "Internal target failure" */
+ ctl_set_sense_data(&msg_info.scsi.sense_data,
+ lun,
+ /*sense_format*/SSD_TYPE_NONE,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
+ /*asc*/ 0x44,
+ /*ascq*/ 0x00,
+ SSD_ELEM_NONE);
+
+ msg_info.scsi.sense_len = SSD_FULL_SIZE;
+ msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
+ msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+ msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
+ msg_info.hdr.serializing_sc = NULL;
+ msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
+#if 0
+ printf("BAD JUJU:Major Bummer HW Error\n");
+#endif
+ TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+ retval = 1;
+ if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
+ }
+ break;
+ }
+ if (have_lock == 0)
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return (retval);
+}
+
+static int
+ctl_ioctl_submit_wait(union ctl_io *io)
+{
+ struct ctl_fe_ioctl_params params;
+ ctl_fe_ioctl_state last_state;
+ int done, retval;
+
+ retval = 0;
+
+ bzero(&params, sizeof(params));
+
+ mtx_init(&params.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF);
+ cv_init(&params.sem, "ctlioccv");
+ params.state = CTL_IOCTL_INPROG;
+ last_state = params.state;
+
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = &params;
+
+ CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n"));
+
+ /* This shouldn't happen */
+ if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE)
+ return (retval);
+
+ done = 0;
+
+ do {
+ mtx_lock(&params.ioctl_mtx);
+ /*
+ * Check the state here, and don't sleep if the state has
+ * already changed (i.e. wakeup has already occured, but we
+ * weren't waiting yet).
+ */
+ if (params.state == last_state) {
+ /* XXX KDM cv_wait_sig instead? */
+ cv_wait(&params.sem, &params.ioctl_mtx);
+ }
+ last_state = params.state;
+
+ switch (params.state) {
+ case CTL_IOCTL_INPROG:
+ /* Why did we wake up? */
+ /* XXX KDM error here? */
+ mtx_unlock(&params.ioctl_mtx);
+ break;
+ case CTL_IOCTL_DATAMOVE:
+ CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n"));
+
+ /*
+ * change last_state back to INPROG to avoid
+ * deadlock on subsequent data moves.
+ */
+ params.state = last_state = CTL_IOCTL_INPROG;
+
+ mtx_unlock(&params.ioctl_mtx);
+ ctl_ioctl_do_datamove(&io->scsiio);
+ /*
+ * Note that in some cases, most notably writes,
+ * this will queue the I/O and call us back later.
+ * In other cases, generally reads, this routine
+ * will immediately call back and wake us up,
+ * probably using our own context.
+ */
+ io->scsiio.be_move_done(io);
+ break;
+ case CTL_IOCTL_DONE:
+ mtx_unlock(&params.ioctl_mtx);
+ CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n"));
+ done = 1;
+ break;
+ default:
+ mtx_unlock(&params.ioctl_mtx);
+ /* XXX KDM error here? */
+ break;
+ }
+ } while (done == 0);
+
+ mtx_destroy(&params.ioctl_mtx);
+ cv_destroy(&params.sem);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+static void
+ctl_ioctl_datamove(union ctl_io *io)
+{
+ struct ctl_fe_ioctl_params *params;
+
+ params = (struct ctl_fe_ioctl_params *)
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+
+ mtx_lock(&params->ioctl_mtx);
+ params->state = CTL_IOCTL_DATAMOVE;
+ cv_broadcast(&params->sem);
+ mtx_unlock(&params->ioctl_mtx);
+}
+
+static void
+ctl_ioctl_done(union ctl_io *io)
+{
+ struct ctl_fe_ioctl_params *params;
+
+ params = (struct ctl_fe_ioctl_params *)
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+
+ mtx_lock(&params->ioctl_mtx);
+ params->state = CTL_IOCTL_DONE;
+ cv_broadcast(&params->sem);
+ mtx_unlock(&params->ioctl_mtx);
+}
+
+static void
+ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask)
+{
+ struct ctl_fe_ioctl_startstop_info *sd_info;
+
+ sd_info = (struct ctl_fe_ioctl_startstop_info *)arg;
+
+ sd_info->hs_info.status = metatask->status;
+ sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns;
+ sd_info->hs_info.luns_complete =
+ metatask->taskinfo.startstop.luns_complete;
+ sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed;
+
+ cv_broadcast(&sd_info->sem);
+}
+
+static void
+ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask)
+{
+ struct ctl_fe_ioctl_bbrread_info *fe_bbr_info;
+
+ fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg;
+
+ mtx_lock(fe_bbr_info->lock);
+ fe_bbr_info->bbr_info->status = metatask->status;
+ fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status;
+ fe_bbr_info->wakeup_done = 1;
+ mtx_unlock(fe_bbr_info->lock);
+
+ cv_broadcast(&fe_bbr_info->sem);
+}
+
+/*
+ * Must be called with the ctl_lock held.
+ * Returns 0 for success, errno for failure.
+ */
+static int
+ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
+ struct ctl_ooa *ooa_hdr)
+{
+ union ctl_io *io;
+ int retval;
+
+ retval = 0;
+
+ for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL);
+ (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
+ ooa_links)) {
+ struct ctl_ooa_entry *cur_entry, entry;
+
+ /*
+ * If we've got more than we can fit, just count the
+ * remaining entries.
+ */
+ if (*cur_fill_num >= ooa_hdr->alloc_num)
+ continue;
+
+ cur_entry = &ooa_hdr->entries[*cur_fill_num];
+
+ bzero(&entry, sizeof(entry));
+
+ entry.tag_num = io->scsiio.tag_num;
+ entry.lun_num = lun->lun;
+#ifdef CTL_TIME_IO
+ entry.start_bt = io->io_hdr.start_bt;
+#endif
+ bcopy(io->scsiio.cdb, entry.cdb, io->scsiio.cdb_len);
+ entry.cdb_len = io->scsiio.cdb_len;
+ if (io->io_hdr.flags & CTL_FLAG_BLOCKED)
+ entry.cmd_flags |= CTL_OOACMD_FLAG_BLOCKED;
+
+ if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG)
+ entry.cmd_flags |= CTL_OOACMD_FLAG_DMA;
+
+ if (io->io_hdr.flags & CTL_FLAG_ABORT)
+ entry.cmd_flags |= CTL_OOACMD_FLAG_ABORT;
+
+ if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR)
+ entry.cmd_flags |= CTL_OOACMD_FLAG_RTR;
+
+ if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
+ entry.cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED;
+
+ retval = copyout(&entry, cur_entry, sizeof(entry));
+
+ if (retval != 0)
+ break;
+ }
+
+ return (retval);
+}
+
+static void *
+ctl_copyin_alloc(void *user_addr, int len, char *error_str,
+ size_t error_str_len)
+{
+ void *kptr;
+
+ kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO);
+ if (kptr == NULL) {
+ snprintf(error_str, error_str_len, "Cannot allocate %d bytes",
+ len);
+ return (NULL);
+ }
+
+ if (copyin(user_addr, kptr, len) != 0) {
+ snprintf(error_str, error_str_len, "Error copying %d bytes "
+ "from user address %p to kernel address %p", len,
+ user_addr, kptr);
+ free(kptr, M_CTL);
+ return (NULL);
+ }
+
+ return (kptr);
+}
+
+static void
+ctl_free_args(int num_be_args, struct ctl_be_arg *be_args)
+{
+ int i;
+
+ if (be_args == NULL)
+ return;
+
+ for (i = 0; i < num_be_args; i++) {
+ free(be_args[i].kname, M_CTL);
+ free(be_args[i].kvalue, M_CTL);
+ }
+
+ free(be_args, M_CTL);
+}
+
+static struct ctl_be_arg *
+ctl_copyin_args(int num_be_args, struct ctl_be_arg *be_args,
+ char *error_str, size_t error_str_len)
+{
+ struct ctl_be_arg *args;
+ int i;
+
+ args = ctl_copyin_alloc(be_args, num_be_args * sizeof(*be_args),
+ error_str, error_str_len);
+
+ if (args == NULL)
+ goto bailout;
+
+ for (i = 0; i < num_be_args; i++) {
+ uint8_t *tmpptr;
+
+ args[i].kname = ctl_copyin_alloc(args[i].name,
+ args[i].namelen, error_str, error_str_len);
+ if (args[i].kname == NULL)
+ goto bailout;
+
+ if (args[i].kname[args[i].namelen - 1] != '\0') {
+ snprintf(error_str, error_str_len, "Argument %d "
+ "name is not NUL-terminated", i);
+ goto bailout;
+ }
+
+ args[i].kvalue = NULL;
+
+ tmpptr = ctl_copyin_alloc(args[i].value,
+ args[i].vallen, error_str, error_str_len);
+ if (tmpptr == NULL)
+ goto bailout;
+
+ args[i].kvalue = tmpptr;
+
+ if ((args[i].flags & CTL_BEARG_ASCII)
+ && (tmpptr[args[i].vallen - 1] != '\0')) {
+ snprintf(error_str, error_str_len, "Argument %d "
+ "value is not NUL-terminated", i);
+ goto bailout;
+ }
+ }
+
+ return (args);
+bailout:
+
+ ctl_free_args(num_be_args, args);
+
+ return (NULL);
+}
+
+/*
+ * Escape characters that are illegal or not recommended in XML.
+ */
+int
+ctl_sbuf_printf_esc(struct sbuf *sb, char *str)
+{
+ int retval;
+
+ retval = 0;
+
+ for (; *str; str++) {
+ switch (*str) {
+ case '&':
+ retval = sbuf_printf(sb, "&amp;");
+ break;
+ case '>':
+ retval = sbuf_printf(sb, "&gt;");
+ break;
+ case '<':
+ retval = sbuf_printf(sb, "&lt;");
+ break;
+ default:
+ retval = sbuf_putc(sb, *str);
+ break;
+ }
+
+ if (retval != 0)
+ break;
+
+ }
+
+ return (retval);
+}
+
+static int
+ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+ struct thread *td)
+{
+ struct ctl_softc *softc;
+ int retval;
+
+ softc = control_softc;
+
+ retval = 0;
+
+ switch (cmd) {
+ case CTL_IO: {
+ union ctl_io *io;
+ void *pool_tmp;
+
+ /*
+ * If we haven't been "enabled", don't allow any SCSI I/O
+ * to this FETD.
+ */
+ if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) {
+ retval = -EPERM;
+ break;
+ }
+
+ io = ctl_alloc_io(softc->ioctl_info.fe.ctl_pool_ref);
+ if (io == NULL) {
+ printf("ctl_ioctl: can't allocate ctl_io!\n");
+ retval = -ENOSPC;
+ break;
+ }
+
+ /*
+ * Need to save the pool reference so it doesn't get
+ * spammed by the user's ctl_io.
+ */
+ pool_tmp = io->io_hdr.pool;
+
+ memcpy(io, (void *)addr, sizeof(*io));
+
+ io->io_hdr.pool = pool_tmp;
+ /*
+ * No status yet, so make sure the status is set properly.
+ */
+ io->io_hdr.status = CTL_STATUS_NONE;
+
+ /*
+ * The user sets the initiator ID, target and LUN IDs.
+ */
+ io->io_hdr.nexus.targ_port = softc->ioctl_info.fe.targ_port;
+ io->io_hdr.flags |= CTL_FLAG_USER_REQ;
+ if ((io->io_hdr.io_type == CTL_IO_SCSI)
+ && (io->scsiio.tag_type != CTL_TAG_UNTAGGED))
+ io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++;
+
+ retval = ctl_ioctl_submit_wait(io);
+
+ if (retval != 0) {
+ ctl_free_io(io);
+ break;
+ }
+
+ memcpy((void *)addr, io, sizeof(*io));
+
+ /* return this to our pool */
+ ctl_free_io(io);
+
+ break;
+ }
+ case CTL_ENABLE_PORT:
+ case CTL_DISABLE_PORT:
+ case CTL_SET_PORT_WWNS: {
+ struct ctl_frontend *fe;
+ struct ctl_port_entry *entry;
+
+ entry = (struct ctl_port_entry *)addr;
+
+ mtx_lock(&softc->ctl_lock);
+ STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ int action, done;
+
+ action = 0;
+ done = 0;
+
+ if ((entry->port_type == CTL_PORT_NONE)
+ && (entry->targ_port == fe->targ_port)) {
+ /*
+ * If the user only wants to enable or
+ * disable or set WWNs on a specific port,
+ * do the operation and we're done.
+ */
+ action = 1;
+ done = 1;
+ } else if (entry->port_type & fe->port_type) {
+ /*
+ * Compare the user's type mask with the
+ * particular frontend type to see if we
+ * have a match.
+ */
+ action = 1;
+ done = 0;
+
+ /*
+ * Make sure the user isn't trying to set
+ * WWNs on multiple ports at the same time.
+ */
+ if (cmd == CTL_SET_PORT_WWNS) {
+ printf("%s: Can't set WWNs on "
+ "multiple ports\n", __func__);
+ retval = EINVAL;
+ break;
+ }
+ }
+ if (action != 0) {
+ /*
+ * XXX KDM we have to drop the lock here,
+ * because the online/offline operations
+ * can potentially block. We need to
+ * reference count the frontends so they
+ * can't go away,
+ */
+ mtx_unlock(&softc->ctl_lock);
+
+ if (cmd == CTL_ENABLE_PORT)
+ ctl_frontend_online(fe);
+ else if (cmd == CTL_DISABLE_PORT)
+ ctl_frontend_offline(fe);
+
+ mtx_lock(&softc->ctl_lock);
+
+ if (cmd == CTL_SET_PORT_WWNS)
+ ctl_frontend_set_wwns(fe,
+ (entry->flags & CTL_PORT_WWNN_VALID) ?
+ 1 : 0, entry->wwnn,
+ (entry->flags & CTL_PORT_WWPN_VALID) ?
+ 1 : 0, entry->wwpn);
+ }
+ if (done != 0)
+ break;
+ }
+ mtx_unlock(&softc->ctl_lock);
+ break;
+ }
+ case CTL_GET_PORT_LIST: {
+ struct ctl_frontend *fe;
+ struct ctl_port_list *list;
+ int i;
+
+ list = (struct ctl_port_list *)addr;
+
+ if (list->alloc_len != (list->alloc_num *
+ sizeof(struct ctl_port_entry))) {
+ printf("%s: CTL_GET_PORT_LIST: alloc_len %u != "
+ "alloc_num %u * sizeof(struct ctl_port_entry) "
+ "%zu\n", __func__, list->alloc_len,
+ list->alloc_num, sizeof(struct ctl_port_entry));
+ retval = EINVAL;
+ break;
+ }
+ list->fill_len = 0;
+ list->fill_num = 0;
+ list->dropped_num = 0;
+ i = 0;
+ mtx_lock(&softc->ctl_lock);
+ STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ struct ctl_port_entry entry, *list_entry;
+
+ if (list->fill_num >= list->alloc_num) {
+ list->dropped_num++;
+ continue;
+ }
+
+ entry.port_type = fe->port_type;
+ strlcpy(entry.port_name, fe->port_name,
+ sizeof(entry.port_name));
+ entry.targ_port = fe->targ_port;
+ entry.physical_port = fe->physical_port;
+ entry.virtual_port = fe->virtual_port;
+ entry.wwnn = fe->wwnn;
+ entry.wwpn = fe->wwpn;
+ if (fe->status & CTL_PORT_STATUS_ONLINE)
+ entry.online = 1;
+ else
+ entry.online = 0;
+
+ list_entry = &list->entries[i];
+
+ retval = copyout(&entry, list_entry, sizeof(entry));
+ if (retval != 0) {
+ printf("%s: CTL_GET_PORT_LIST: copyout "
+ "returned %d\n", __func__, retval);
+ break;
+ }
+ i++;
+ list->fill_num++;
+ list->fill_len += sizeof(entry);
+ }
+ mtx_unlock(&softc->ctl_lock);
+
+ /*
+ * If this is non-zero, we had a copyout fault, so there's
+ * probably no point in attempting to set the status inside
+ * the structure.
+ */
+ if (retval != 0)
+ break;
+
+ if (list->dropped_num > 0)
+ list->status = CTL_PORT_LIST_NEED_MORE_SPACE;
+ else
+ list->status = CTL_PORT_LIST_OK;
+ break;
+ }
+ case CTL_DUMP_OOA: {
+ struct ctl_lun *lun;
+ union ctl_io *io;
+ char printbuf[128];
+ struct sbuf sb;
+
+ mtx_lock(&softc->ctl_lock);
+ printf("Dumping OOA queues:\n");
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ for (io = (union ctl_io *)TAILQ_FIRST(
+ &lun->ooa_queue); io != NULL;
+ io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
+ ooa_links)) {
+ sbuf_new(&sb, printbuf, sizeof(printbuf),
+ SBUF_FIXEDLEN);
+ sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ",
+ (intmax_t)lun->lun,
+ io->scsiio.tag_num,
+ (io->io_hdr.flags &
+ CTL_FLAG_BLOCKED) ? "" : " BLOCKED",
+ (io->io_hdr.flags &
+ CTL_FLAG_DMA_INPROG) ? " DMA" : "",
+ (io->io_hdr.flags &
+ CTL_FLAG_ABORT) ? " ABORT" : "",
+ (io->io_hdr.flags &
+ CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : "");
+ ctl_scsi_command_string(&io->scsiio, NULL, &sb);
+ sbuf_finish(&sb);
+ printf("%s\n", sbuf_data(&sb));
+ }
+ }
+ printf("OOA queues dump done\n");
+ mtx_unlock(&softc->ctl_lock);
+ break;
+ }
+ case CTL_GET_OOA: {
+ struct ctl_lun *lun;
+ struct ctl_ooa *ooa_hdr;
+ uint32_t cur_fill_num;
+
+ ooa_hdr = (struct ctl_ooa *)addr;
+
+ if ((ooa_hdr->alloc_len == 0)
+ || (ooa_hdr->alloc_num == 0)) {
+ printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u "
+ "must be non-zero\n", __func__,
+ ooa_hdr->alloc_len, ooa_hdr->alloc_num);
+ retval = EINVAL;
+ break;
+ }
+
+ if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num *
+ sizeof(struct ctl_ooa_entry))) {
+ printf("%s: CTL_GET_OOA: alloc len %u must be alloc "
+ "num %d * sizeof(struct ctl_ooa_entry) %zd\n",
+ __func__, ooa_hdr->alloc_len,
+ ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry));
+ retval = EINVAL;
+ break;
+ }
+
+ mtx_lock(&softc->ctl_lock);
+ if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0)
+ && ((ooa_hdr->lun_num > CTL_MAX_LUNS)
+ || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) {
+ mtx_unlock(&softc->ctl_lock);
+ printf("%s: CTL_GET_OOA: invalid LUN %ju\n",
+ __func__, (uintmax_t)ooa_hdr->lun_num);
+ retval = EINVAL;
+ break;
+ }
+
+ cur_fill_num = 0;
+
+ if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) {
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,
+ ooa_hdr);
+ if (retval != 0)
+ break;
+ }
+ if (retval != 0) {
+ mtx_unlock(&softc->ctl_lock);
+ break;
+ }
+ } else {
+ lun = softc->ctl_luns[ooa_hdr->lun_num];
+
+ retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr);
+ }
+ mtx_unlock(&softc->ctl_lock);
+
+ ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num);
+ ooa_hdr->fill_len = ooa_hdr->fill_num *
+ sizeof(struct ctl_ooa_entry);
+
+ getbintime(&ooa_hdr->cur_bt);
+
+ if (cur_fill_num > ooa_hdr->alloc_num) {
+ ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num;
+ ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE;
+ } else {
+ ooa_hdr->dropped_num = 0;
+ ooa_hdr->status = CTL_OOA_OK;
+ }
+ break;
+ }
+ case CTL_CHECK_OOA: {
+ union ctl_io *io;
+ struct ctl_lun *lun;
+ struct ctl_ooa_info *ooa_info;
+
+
+ ooa_info = (struct ctl_ooa_info *)addr;
+
+ if (ooa_info->lun_id >= CTL_MAX_LUNS) {
+ ooa_info->status = CTL_OOA_INVALID_LUN;
+ break;
+ }
+ mtx_lock(&softc->ctl_lock);
+ lun = softc->ctl_luns[ooa_info->lun_id];
+ if (lun == NULL) {
+ mtx_unlock(&softc->ctl_lock);
+ ooa_info->status = CTL_OOA_INVALID_LUN;
+ break;
+ }
+
+ ooa_info->num_entries = 0;
+ for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
+ io != NULL; io = (union ctl_io *)TAILQ_NEXT(
+ &io->io_hdr, ooa_links)) {
+ ooa_info->num_entries++;
+ }
+
+ mtx_unlock(&softc->ctl_lock);
+ ooa_info->status = CTL_OOA_SUCCESS;
+
+ break;
+ }
+ case CTL_HARD_START:
+ case CTL_HARD_STOP: {
+ struct ctl_fe_ioctl_startstop_info ss_info;
+ struct cfi_metatask *metatask;
+ struct mtx hs_mtx;
+
+ mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF);
+
+ cv_init(&ss_info.sem, "hard start/stop cv" );
+
+ metatask = cfi_alloc_metatask(/*can_wait*/ 1);
+ if (metatask == NULL) {
+ retval = ENOMEM;
+ mtx_destroy(&hs_mtx);
+ break;
+ }
+
+ if (cmd == CTL_HARD_START)
+ metatask->tasktype = CFI_TASK_STARTUP;
+ else
+ metatask->tasktype = CFI_TASK_SHUTDOWN;
+
+ metatask->callback = ctl_ioctl_hard_startstop_callback;
+ metatask->callback_arg = &ss_info;
+
+ cfi_action(metatask);
+
+ /* Wait for the callback */
+ mtx_lock(&hs_mtx);
+ cv_wait_sig(&ss_info.sem, &hs_mtx);
+ mtx_unlock(&hs_mtx);
+
+ /*
+ * All information has been copied from the metatask by the
+ * time cv_broadcast() is called, so we free the metatask here.
+ */
+ cfi_free_metatask(metatask);
+
+ memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info));
+
+ mtx_destroy(&hs_mtx);
+ break;
+ }
+ case CTL_BBRREAD: {
+ struct ctl_bbrread_info *bbr_info;
+ struct ctl_fe_ioctl_bbrread_info fe_bbr_info;
+ struct mtx bbr_mtx;
+ struct cfi_metatask *metatask;
+
+ bbr_info = (struct ctl_bbrread_info *)addr;
+
+ bzero(&fe_bbr_info, sizeof(fe_bbr_info));
+
+ bzero(&bbr_mtx, sizeof(bbr_mtx));
+ mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF);
+
+ fe_bbr_info.bbr_info = bbr_info;
+ fe_bbr_info.lock = &bbr_mtx;
+
+ cv_init(&fe_bbr_info.sem, "BBR read cv");
+ metatask = cfi_alloc_metatask(/*can_wait*/ 1);
+
+ if (metatask == NULL) {
+ mtx_destroy(&bbr_mtx);
+ cv_destroy(&fe_bbr_info.sem);
+ retval = ENOMEM;
+ break;
+ }
+ metatask->tasktype = CFI_TASK_BBRREAD;
+ metatask->callback = ctl_ioctl_bbrread_callback;
+ metatask->callback_arg = &fe_bbr_info;
+ metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num;
+ metatask->taskinfo.bbrread.lba = bbr_info->lba;
+ metatask->taskinfo.bbrread.len = bbr_info->len;
+
+ cfi_action(metatask);
+
+ mtx_lock(&bbr_mtx);
+ while (fe_bbr_info.wakeup_done == 0)
+ cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx);
+ mtx_unlock(&bbr_mtx);
+
+ bbr_info->status = metatask->status;
+ bbr_info->bbr_status = metatask->taskinfo.bbrread.status;
+ bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status;
+ memcpy(&bbr_info->sense_data,
+ &metatask->taskinfo.bbrread.sense_data,
+ ctl_min(sizeof(bbr_info->sense_data),
+ sizeof(metatask->taskinfo.bbrread.sense_data)));
+
+ cfi_free_metatask(metatask);
+
+ mtx_destroy(&bbr_mtx);
+ cv_destroy(&fe_bbr_info.sem);
+
+ break;
+ }
+ case CTL_DELAY_IO: {
+ struct ctl_io_delay_info *delay_info;
+#ifdef CTL_IO_DELAY
+ struct ctl_lun *lun;
+#endif /* CTL_IO_DELAY */
+
+ delay_info = (struct ctl_io_delay_info *)addr;
+
+#ifdef CTL_IO_DELAY
+ mtx_lock(&softc->ctl_lock);
+
+ if ((delay_info->lun_id > CTL_MAX_LUNS)
+ || (softc->ctl_luns[delay_info->lun_id] == NULL)) {
+ delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
+ } else {
+ lun = softc->ctl_luns[delay_info->lun_id];
+
+ delay_info->status = CTL_DELAY_STATUS_OK;
+
+ switch (delay_info->delay_type) {
+ case CTL_DELAY_TYPE_CONT:
+ break;
+ case CTL_DELAY_TYPE_ONESHOT:
+ break;
+ default:
+ delay_info->status =
+ CTL_DELAY_STATUS_INVALID_TYPE;
+ break;
+ }
+
+ switch (delay_info->delay_loc) {
+ case CTL_DELAY_LOC_DATAMOVE:
+ lun->delay_info.datamove_type =
+ delay_info->delay_type;
+ lun->delay_info.datamove_delay =
+ delay_info->delay_secs;
+ break;
+ case CTL_DELAY_LOC_DONE:
+ lun->delay_info.done_type =
+ delay_info->delay_type;
+ lun->delay_info.done_delay =
+ delay_info->delay_secs;
+ break;
+ default:
+ delay_info->status =
+ CTL_DELAY_STATUS_INVALID_LOC;
+ break;
+ }
+ }
+
+ mtx_unlock(&softc->ctl_lock);
+#else
+ delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED;
+#endif /* CTL_IO_DELAY */
+ break;
+ }
+ case CTL_REALSYNC_SET: {
+ int *syncstate;
+
+ syncstate = (int *)addr;
+
+ mtx_lock(&softc->ctl_lock);
+ switch (*syncstate) {
+ case 0:
+ softc->flags &= ~CTL_FLAG_REAL_SYNC;
+ break;
+ case 1:
+ softc->flags |= CTL_FLAG_REAL_SYNC;
+ break;
+ default:
+ retval = -EINVAL;
+ break;
+ }
+ mtx_unlock(&softc->ctl_lock);
+ break;
+ }
+ case CTL_REALSYNC_GET: {
+ int *syncstate;
+
+ syncstate = (int*)addr;
+
+ mtx_lock(&softc->ctl_lock);
+ if (softc->flags & CTL_FLAG_REAL_SYNC)
+ *syncstate = 1;
+ else
+ *syncstate = 0;
+ mtx_unlock(&softc->ctl_lock);
+
+ break;
+ }
+ case CTL_SETSYNC:
+ case CTL_GETSYNC: {
+ struct ctl_sync_info *sync_info;
+ struct ctl_lun *lun;
+
+ sync_info = (struct ctl_sync_info *)addr;
+
+ mtx_lock(&softc->ctl_lock);
+ lun = softc->ctl_luns[sync_info->lun_id];
+ if (lun == NULL) {
+ mtx_unlock(&softc->ctl_lock);
+ sync_info->status = CTL_GS_SYNC_NO_LUN;
+ }
+ /*
+ * Get or set the sync interval. We're not bounds checking
+ * in the set case, hopefully the user won't do something
+ * silly.
+ */
+ if (cmd == CTL_GETSYNC)
+ sync_info->sync_interval = lun->sync_interval;
+ else
+ lun->sync_interval = sync_info->sync_interval;
+
+ mtx_unlock(&softc->ctl_lock);
+
+ sync_info->status = CTL_GS_SYNC_OK;
+
+ break;
+ }
+ case CTL_GETSTATS: {
+ struct ctl_stats *stats;
+ struct ctl_lun *lun;
+ int i;
+
+ stats = (struct ctl_stats *)addr;
+
+ if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) >
+ stats->alloc_len) {
+ stats->status = CTL_SS_NEED_MORE_SPACE;
+ stats->num_luns = softc->num_luns;
+ break;
+ }
+ /*
+ * XXX KDM no locking here. If the LUN list changes,
+ * things can blow up.
+ */
+ for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL;
+ i++, lun = STAILQ_NEXT(lun, links)) {
+ retval = copyout(&lun->stats, &stats->lun_stats[i],
+ sizeof(lun->stats));
+ if (retval != 0)
+ break;
+ }
+ stats->num_luns = softc->num_luns;
+ stats->fill_len = sizeof(struct ctl_lun_io_stats) *
+ softc->num_luns;
+ stats->status = CTL_SS_OK;
+#ifdef CTL_TIME_IO
+ stats->flags = CTL_STATS_FLAG_TIME_VALID;
+#else
+ stats->flags = CTL_STATS_FLAG_NONE;
+#endif
+ getnanouptime(&stats->timestamp);
+ break;
+ }
+ case CTL_ERROR_INJECT: {
+ struct ctl_error_desc *err_desc, *new_err_desc;
+ struct ctl_lun *lun;
+
+ err_desc = (struct ctl_error_desc *)addr;
+
+ new_err_desc = malloc(sizeof(*new_err_desc), M_CTL,
+ M_WAITOK | M_ZERO);
+ if (new_err_desc == NULL) {
+ printf("%s: CTL_ERROR_INJECT: error allocating %zu "
+ "bytes\n", __func__, sizeof(*new_err_desc));
+ retval = ENOMEM;
+ break;
+ }
+ bcopy(err_desc, new_err_desc, sizeof(*new_err_desc));
+
+ mtx_lock(&softc->ctl_lock);
+ lun = softc->ctl_luns[err_desc->lun_id];
+ if (lun == NULL) {
+ mtx_unlock(&softc->ctl_lock);
+ printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n",
+ __func__, (uintmax_t)err_desc->lun_id);
+ retval = EINVAL;
+ break;
+ }
+
+ /*
+ * We could do some checking here to verify the validity
+ * of the request, but given the complexity of error
+ * injection requests, the checking logic would be fairly
+ * complex.
+ *
+ * For now, if the request is invalid, it just won't get
+ * executed and might get deleted.
+ */
+ STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links);
+
+ /*
+ * XXX KDM check to make sure the serial number is unique,
+ * in case we somehow manage to wrap. That shouldn't
+ * happen for a very long time, but it's the right thing to
+ * do.
+ */
+ new_err_desc->serial = lun->error_serial;
+ err_desc->serial = lun->error_serial;
+ lun->error_serial++;
+
+ mtx_unlock(&softc->ctl_lock);
+ break;
+ }
+ case CTL_ERROR_INJECT_DELETE: {
+ struct ctl_error_desc *delete_desc, *desc, *desc2;
+ struct ctl_lun *lun;
+ int delete_done;
+
+ delete_desc = (struct ctl_error_desc *)addr;
+ delete_done = 0;
+
+ mtx_lock(&softc->ctl_lock);
+ lun = softc->ctl_luns[delete_desc->lun_id];
+ if (lun == NULL) {
+ mtx_unlock(&softc->ctl_lock);
+ printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n",
+ __func__, (uintmax_t)delete_desc->lun_id);
+ retval = EINVAL;
+ break;
+ }
+ STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
+ if (desc->serial != delete_desc->serial)
+ continue;
+
+ STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc,
+ links);
+ free(desc, M_CTL);
+ delete_done = 1;
+ }
+ mtx_unlock(&softc->ctl_lock);
+ if (delete_done == 0) {
+ printf("%s: CTL_ERROR_INJECT_DELETE: can't find "
+ "error serial %ju on LUN %u\n", __func__,
+ delete_desc->serial, delete_desc->lun_id);
+ retval = EINVAL;
+ break;
+ }
+ break;
+ }
+ case CTL_DUMP_STRUCTS: {
+ int i, j, k;
+ struct ctl_frontend *fe;
+
+ printf("CTL IID to WWPN map start:\n");
+ for (i = 0; i < CTL_MAX_PORTS; i++) {
+ for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
+ if (softc->wwpn_iid[i][j].in_use == 0)
+ continue;
+
+ printf("port %d iid %u WWPN %#jx\n",
+ softc->wwpn_iid[i][j].port,
+ softc->wwpn_iid[i][j].iid,
+ (uintmax_t)softc->wwpn_iid[i][j].wwpn);
+ }
+ }
+ printf("CTL IID to WWPN map end\n");
+ printf("CTL Persistent Reservation information start:\n");
+ for (i = 0; i < CTL_MAX_LUNS; i++) {
+ struct ctl_lun *lun;
+
+ lun = softc->ctl_luns[i];
+
+ if ((lun == NULL)
+ || ((lun->flags & CTL_LUN_DISABLED) != 0))
+ continue;
+
+ for (j = 0; j < (CTL_MAX_PORTS * 2); j++) {
+ for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
+ if (lun->per_res[j+k].registered == 0)
+ continue;
+ printf("LUN %d port %d iid %d key "
+ "%#jx\n", i, j, k,
+ (uintmax_t)scsi_8btou64(
+ lun->per_res[j+k].res_key.key));
+ }
+ }
+ }
+ printf("CTL Persistent Reservation information end\n");
+ printf("CTL Frontends:\n");
+ /*
+ * XXX KDM calling this without a lock. We'd likely want
+ * to drop the lock before calling the frontend's dump
+ * routine anyway.
+ */
+ STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ printf("Frontend %s Type %u pport %d vport %d WWNN "
+ "%#jx WWPN %#jx\n", fe->port_name, fe->port_type,
+ fe->physical_port, fe->virtual_port,
+ (uintmax_t)fe->wwnn, (uintmax_t)fe->wwpn);
+
+ /*
+ * Frontends are not required to support the dump
+ * routine.
+ */
+ if (fe->fe_dump == NULL)
+ continue;
+
+ fe->fe_dump();
+ }
+ printf("CTL Frontend information end\n");
+ break;
+ }
+ case CTL_LUN_REQ: {
+ struct ctl_lun_req *lun_req;
+ struct ctl_backend_driver *backend;
+
+ lun_req = (struct ctl_lun_req *)addr;
+
+ backend = ctl_backend_find(lun_req->backend);
+ if (backend == NULL) {
+ lun_req->status = CTL_LUN_ERROR;
+ snprintf(lun_req->error_str,
+ sizeof(lun_req->error_str),
+ "Backend \"%s\" not found.",
+ lun_req->backend);
+ break;
+ }
+ if (lun_req->num_be_args > 0) {
+ lun_req->kern_be_args = ctl_copyin_args(
+ lun_req->num_be_args,
+ lun_req->be_args,
+ lun_req->error_str,
+ sizeof(lun_req->error_str));
+ if (lun_req->kern_be_args == NULL) {
+ lun_req->status = CTL_LUN_ERROR;
+ break;
+ }
+ }
+
+ retval = backend->ioctl(dev, cmd, addr, flag, td);
+
+ if (lun_req->num_be_args > 0) {
+ ctl_free_args(lun_req->num_be_args,
+ lun_req->kern_be_args);
+ }
+ break;
+ }
+ case CTL_LUN_LIST: {
+ struct sbuf *sb;
+ struct ctl_lun *lun;
+ struct ctl_lun_list *list;
+
+ list = (struct ctl_lun_list *)addr;
+
+ /*
+ * Allocate a fixed length sbuf here, based on the length
+ * of the user's buffer. We could allocate an auto-extending
+ * buffer, and then tell the user how much larger our
+ * amount of data is than his buffer, but that presents
+ * some problems:
+ *
+ * 1. The sbuf(9) routines use a blocking malloc, and so
+ * we can't hold a lock while calling them with an
+ * auto-extending buffer.
+ *
+ * 2. There is not currently a LUN reference counting
+ * mechanism, outside of outstanding transactions on
+ * the LUN's OOA queue. So a LUN could go away on us
+ * while we're getting the LUN number, backend-specific
+ * information, etc. Thus, given the way things
+ * currently work, we need to hold the CTL lock while
+ * grabbing LUN information.
+ *
+ * So, from the user's standpoint, the best thing to do is
+ * allocate what he thinks is a reasonable buffer length,
+ * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error,
+ * double the buffer length and try again. (And repeat
+ * that until he succeeds.)
+ */
+ sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
+ if (sb == NULL) {
+ list->status = CTL_LUN_LIST_ERROR;
+ snprintf(list->error_str, sizeof(list->error_str),
+ "Unable to allocate %d bytes for LUN list",
+ list->alloc_len);
+ break;
+ }
+
+ sbuf_printf(sb, "<ctllunlist>\n");
+
+ mtx_lock(&softc->ctl_lock);
+
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ retval = sbuf_printf(sb, "<lun id=\"%ju\">\n",
+ (uintmax_t)lun->lun);
+
+ /*
+ * Bail out as soon as we see that we've overfilled
+ * the buffer.
+ */
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "<backend_type>%s"
+ "</backend_type>\n",
+ (lun->backend == NULL) ? "none" :
+ lun->backend->name);
+
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "<lun_type>%d</lun_type>\n",
+ lun->be_lun->lun_type);
+
+ if (retval != 0)
+ break;
+
+ if (lun->backend == NULL) {
+ retval = sbuf_printf(sb, "</lun>\n");
+ if (retval != 0)
+ break;
+ continue;
+ }
+
+ retval = sbuf_printf(sb, "<size>%ju</size>\n",
+ (lun->be_lun->maxlba > 0) ?
+ lun->be_lun->maxlba + 1 : 0);
+
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "<blocksize>%u</blocksize>\n",
+ lun->be_lun->blocksize);
+
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "<serial_number>");
+
+ if (retval != 0)
+ break;
+
+ retval = ctl_sbuf_printf_esc(sb,
+ lun->be_lun->serial_num);
+
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "</serial_number>\n");
+
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "<device_id>");
+
+ if (retval != 0)
+ break;
+
+ retval = ctl_sbuf_printf_esc(sb,lun->be_lun->device_id);
+
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "</device_id>\n");
+
+ if (retval != 0)
+ break;
+
+ if (lun->backend->lun_info == NULL) {
+ retval = sbuf_printf(sb, "</lun>\n");
+ if (retval != 0)
+ break;
+ continue;
+ }
+
+ retval =lun->backend->lun_info(lun->be_lun->be_lun, sb);
+
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "</lun>\n");
+
+ if (retval != 0)
+ break;
+ }
+ mtx_unlock(&softc->ctl_lock);
+
+ if ((retval != 0)
+ || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) {
+ retval = 0;
+ sbuf_delete(sb);
+ list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
+ snprintf(list->error_str, sizeof(list->error_str),
+ "Out of space, %d bytes is too small",
+ list->alloc_len);
+ break;
+ }
+
+ sbuf_finish(sb);
+
+ retval = copyout(sbuf_data(sb), list->lun_xml,
+ sbuf_len(sb) + 1);
+
+ list->fill_len = sbuf_len(sb) + 1;
+ list->status = CTL_LUN_LIST_OK;
+ sbuf_delete(sb);
+ break;
+ }
+ default: {
+ /* XXX KDM should we fix this? */
+#if 0
+ struct ctl_backend_driver *backend;
+ unsigned int type;
+ int found;
+
+ found = 0;
+
+ /*
+ * We encode the backend type as the ioctl type for backend
+ * ioctls. So parse it out here, and then search for a
+ * backend of this type.
+ */
+ type = _IOC_TYPE(cmd);
+
+ STAILQ_FOREACH(backend, &softc->be_list, links) {
+ if (backend->type == type) {
+ found = 1;
+ break;
+ }
+ }
+ if (found == 0) {
+ printf("ctl: unknown ioctl command %#lx or backend "
+ "%d\n", cmd, type);
+ retval = -EINVAL;
+ break;
+ }
+ retval = backend->ioctl(dev, cmd, addr, flag, td);
+#endif
+ retval = ENOTTY;
+ break;
+ }
+ }
+ return (retval);
+}
+
+uint32_t
+ctl_get_initindex(struct ctl_nexus *nexus)
+{
+ if (nexus->targ_port < CTL_MAX_PORTS)
+ return (nexus->initid.id +
+ (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
+ else
+ return (nexus->initid.id +
+ ((nexus->targ_port - CTL_MAX_PORTS) *
+ CTL_MAX_INIT_PER_PORT));
+}
+
+uint32_t
+ctl_get_resindex(struct ctl_nexus *nexus)
+{
+ return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
+}
+
+uint32_t
+ctl_port_idx(int port_num)
+{
+ if (port_num < CTL_MAX_PORTS)
+ return(port_num);
+ else
+ return(port_num - CTL_MAX_PORTS);
+}
+
+/*
+ * Note: This only works for bitmask sizes that are at least 32 bits, and
+ * that are a power of 2.
+ */
+int
+ctl_ffz(uint32_t *mask, uint32_t size)
+{
+ uint32_t num_chunks, num_pieces;
+ int i, j;
+
+ num_chunks = (size >> 5);
+ if (num_chunks == 0)
+ num_chunks++;
+ num_pieces = ctl_min((sizeof(uint32_t) * 8), size);
+
+ for (i = 0; i < num_chunks; i++) {
+ for (j = 0; j < num_pieces; j++) {
+ if ((mask[i] & (1 << j)) == 0)
+ return ((i << 5) + j);
+ }
+ }
+
+ return (-1);
+}
+
+int
+ctl_set_mask(uint32_t *mask, uint32_t bit)
+{
+ uint32_t chunk, piece;
+
+ chunk = bit >> 5;
+ piece = bit % (sizeof(uint32_t) * 8);
+
+ if ((mask[chunk] & (1 << piece)) != 0)
+ return (-1);
+ else
+ mask[chunk] |= (1 << piece);
+
+ return (0);
+}
+
+int
+ctl_clear_mask(uint32_t *mask, uint32_t bit)
+{
+ uint32_t chunk, piece;
+
+ chunk = bit >> 5;
+ piece = bit % (sizeof(uint32_t) * 8);
+
+ if ((mask[chunk] & (1 << piece)) == 0)
+ return (-1);
+ else
+ mask[chunk] &= ~(1 << piece);
+
+ return (0);
+}
+
+int
+ctl_is_set(uint32_t *mask, uint32_t bit)
+{
+ uint32_t chunk, piece;
+
+ chunk = bit >> 5;
+ piece = bit % (sizeof(uint32_t) * 8);
+
+ if ((mask[chunk] & (1 << piece)) == 0)
+ return (0);
+ else
+ return (1);
+}
+
+#ifdef unused
+/*
+ * The bus, target and lun are optional, they can be filled in later.
+ * can_wait is used to determine whether we can wait on the malloc or not.
+ */
+union ctl_io*
+ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, uint32_t targ_target,
+ uint32_t targ_lun, int can_wait)
+{
+ union ctl_io *io;
+
+ if (can_wait)
+ io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_WAITOK);
+ else
+ io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT);
+
+ if (io != NULL) {
+ io->io_hdr.io_type = io_type;
+ io->io_hdr.targ_port = targ_port;
+ /*
+ * XXX KDM this needs to change/go away. We need to move
+ * to a preallocated pool of ctl_scsiio structures.
+ */
+ io->io_hdr.nexus.targ_target.id = targ_target;
+ io->io_hdr.nexus.targ_lun = targ_lun;
+ }
+
+ return (io);
+}
+
+void
+ctl_kfree_io(union ctl_io *io)
+{
+ free(io, M_CTL);
+}
+#endif /* unused */
+
+/*
+ * ctl_softc, pool_type, total_ctl_io are passed in.
+ * npool is passed out.
+ */
+int
+ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
+ uint32_t total_ctl_io, struct ctl_io_pool **npool)
+{
+ uint32_t i;
+ union ctl_io *cur_io, *next_io;
+ struct ctl_io_pool *pool;
+ int retval;
+
+ retval = 0;
+
+ pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, M_NOWAIT);
+ if (pool == NULL) {
+ retval = -ENOMEM;
+ goto bailout;
+ }
+
+ memset(pool, 0, sizeof(*pool));
+
+ pool->type = pool_type;
+ pool->ctl_softc = ctl_softc;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ pool->id = ctl_softc->cur_pool_id++;
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ pool->flags = CTL_POOL_FLAG_NONE;
+ STAILQ_INIT(&pool->free_queue);
+
+ /*
+ * XXX KDM other options here:
+ * - allocate a page at a time
+ * - allocate one big chunk of memory.
+ * Page allocation might work well, but would take a little more
+ * tracking.
+ */
+ for (i = 0; i < total_ctl_io; i++) {
+ cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTL,
+ M_NOWAIT);
+ if (cur_io == NULL) {
+ retval = ENOMEM;
+ break;
+ }
+ cur_io->io_hdr.pool = pool;
+ STAILQ_INSERT_TAIL(&pool->free_queue, &cur_io->io_hdr, links);
+ pool->total_ctl_io++;
+ pool->free_ctl_io++;
+ }
+
+ if (retval != 0) {
+ for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
+ cur_io != NULL; cur_io = next_io) {
+ next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr,
+ links);
+ STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr,
+ ctl_io_hdr, links);
+ free(cur_io, M_CTL);
+ }
+
+ free(pool, M_CTL);
+ goto bailout;
+ }
+ mtx_lock(&ctl_softc->ctl_lock);
+ ctl_softc->num_pools++;
+ STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links);
+ /*
+ * Increment our usage count if this is an external consumer, so we
+ * can't get unloaded until the external consumer (most likely a
+ * FETD) unloads and frees his pool.
+ *
+ * XXX KDM will this increment the caller's module use count, or
+ * mine?
+ */
+#if 0
+ if ((pool_type != CTL_POOL_EMERGENCY)
+ && (pool_type != CTL_POOL_INTERNAL)
+ && (pool_type != CTL_POOL_IOCTL)
+ && (pool_type != CTL_POOL_4OTHERSC))
+ MOD_INC_USE_COUNT;
+#endif
+
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ *npool = pool;
+
+bailout:
+
+ return (retval);
+}
+
+/*
+ * Caller must hold ctl_softc->ctl_lock.
+ */
+int
+ctl_pool_acquire(struct ctl_io_pool *pool)
+{
+ if (pool == NULL)
+ return (-EINVAL);
+
+ if (pool->flags & CTL_POOL_FLAG_INVALID)
+ return (-EINVAL);
+
+ pool->refcount++;
+
+ return (0);
+}
+
+/*
+ * Caller must hold ctl_softc->ctl_lock.
+ */
+int
+ctl_pool_invalidate(struct ctl_io_pool *pool)
+{
+ if (pool == NULL)
+ return (-EINVAL);
+
+ pool->flags |= CTL_POOL_FLAG_INVALID;
+
+ return (0);
+}
+
+/*
+ * Caller must hold ctl_softc->ctl_lock.
+ */
+int
+ctl_pool_release(struct ctl_io_pool *pool)
+{
+ if (pool == NULL)
+ return (-EINVAL);
+
+ if ((--pool->refcount == 0)
+ && (pool->flags & CTL_POOL_FLAG_INVALID)) {
+ ctl_pool_free(pool->ctl_softc, pool);
+ }
+
+ return (0);
+}
+
+/*
+ * Must be called with ctl_softc->ctl_lock held.
+ */
+void
+ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool)
+{
+ union ctl_io *cur_io, *next_io;
+
+ for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
+ cur_io != NULL; cur_io = next_io) {
+ next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr,
+ links);
+ STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, ctl_io_hdr,
+ links);
+ free(cur_io, M_CTL);
+ }
+
+ STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links);
+ ctl_softc->num_pools--;
+
+ /*
+ * XXX KDM will this decrement the caller's usage count or mine?
+ */
+#if 0
+ if ((pool->type != CTL_POOL_EMERGENCY)
+ && (pool->type != CTL_POOL_INTERNAL)
+ && (pool->type != CTL_POOL_IOCTL))
+ MOD_DEC_USE_COUNT;
+#endif
+
+ free(pool, M_CTL);
+}
+
+/*
+ * This routine does not block (except for spinlocks of course).
+ * It tries to allocate a ctl_io union from the caller's pool as quickly as
+ * possible.
+ */
+union ctl_io *
+ctl_alloc_io(void *pool_ref)
+{
+ union ctl_io *io;
+ struct ctl_softc *ctl_softc;
+ struct ctl_io_pool *pool, *npool;
+ struct ctl_io_pool *emergency_pool;
+
+ pool = (struct ctl_io_pool *)pool_ref;
+
+ if (pool == NULL) {
+ printf("%s: pool is NULL\n", __func__);
+ return (NULL);
+ }
+
+ emergency_pool = NULL;
+
+ ctl_softc = pool->ctl_softc;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ /*
+ * First, try to get the io structure from the user's pool.
+ */
+ if (ctl_pool_acquire(pool) == 0) {
+ io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
+ if (io != NULL) {
+ STAILQ_REMOVE_HEAD(&pool->free_queue, links);
+ pool->total_allocated++;
+ pool->free_ctl_io--;
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return (io);
+ } else
+ ctl_pool_release(pool);
+ }
+ /*
+ * If he doesn't have any io structures left, search for an
+ * emergency pool and grab one from there.
+ */
+ STAILQ_FOREACH(npool, &ctl_softc->io_pools, links) {
+ if (npool->type != CTL_POOL_EMERGENCY)
+ continue;
+
+ if (ctl_pool_acquire(npool) != 0)
+ continue;
+
+ emergency_pool = npool;
+
+ io = (union ctl_io *)STAILQ_FIRST(&npool->free_queue);
+ if (io != NULL) {
+ STAILQ_REMOVE_HEAD(&npool->free_queue, links);
+ npool->total_allocated++;
+ npool->free_ctl_io--;
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return (io);
+ } else
+ ctl_pool_release(npool);
+ }
+
+ /* Drop the spinlock before we malloc */
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ /*
+ * The emergency pool (if it exists) didn't have one, so try an
+ * atomic (i.e. nonblocking) malloc and see if we get lucky.
+ */
+ io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT);
+ if (io != NULL) {
+ /*
+ * If the emergency pool exists but is empty, add this
+ * ctl_io to its list when it gets freed.
+ */
+ if (emergency_pool != NULL) {
+ mtx_lock(&ctl_softc->ctl_lock);
+ if (ctl_pool_acquire(emergency_pool) == 0) {
+ io->io_hdr.pool = emergency_pool;
+ emergency_pool->total_ctl_io++;
+ /*
+ * Need to bump this, otherwise
+ * total_allocated and total_freed won't
+ * match when we no longer have anything
+ * outstanding.
+ */
+ emergency_pool->total_allocated++;
+ }
+ mtx_unlock(&ctl_softc->ctl_lock);
+ } else
+ io->io_hdr.pool = NULL;
+ }
+
+ return (io);
+}
+
+static void
+ctl_free_io_internal(union ctl_io *io, int have_lock)
+{
+ if (io == NULL)
+ return;
+
+ /*
+ * If this ctl_io has a pool, return it to that pool.
+ */
+ if (io->io_hdr.pool != NULL) {
+ struct ctl_io_pool *pool;
+#if 0
+ struct ctl_softc *ctl_softc;
+ union ctl_io *tmp_io;
+ unsigned long xflags;
+ int i;
+
+ ctl_softc = control_softc;
+#endif
+
+ pool = (struct ctl_io_pool *)io->io_hdr.pool;
+
+ if (have_lock == 0)
+ mtx_lock(&pool->ctl_softc->ctl_lock);
+#if 0
+ save_flags(xflags);
+
+ for (i = 0, tmp_io = (union ctl_io *)STAILQ_FIRST(
+ &ctl_softc->task_queue); tmp_io != NULL; i++,
+ tmp_io = (union ctl_io *)STAILQ_NEXT(&tmp_io->io_hdr,
+ links)) {
+ if (tmp_io == io) {
+ printf("%s: %p is still on the task queue!\n",
+ __func__, tmp_io);
+ printf("%s: (%d): type %d "
+ "msg %d cdb %x iptl: "
+ "%d:%d:%d:%d tag 0x%04x "
+ "flg %#lx\n",
+ __func__, i,
+ tmp_io->io_hdr.io_type,
+ tmp_io->io_hdr.msg_type,
+ tmp_io->scsiio.cdb[0],
+ tmp_io->io_hdr.nexus.initid.id,
+ tmp_io->io_hdr.nexus.targ_port,
+ tmp_io->io_hdr.nexus.targ_target.id,
+ tmp_io->io_hdr.nexus.targ_lun,
+ (tmp_io->io_hdr.io_type ==
+ CTL_IO_TASK) ?
+ tmp_io->taskio.tag_num :
+ tmp_io->scsiio.tag_num,
+ xflags);
+ panic("I/O still on the task queue!");
+ }
+ }
+#endif
+ io->io_hdr.io_type = 0xff;
+ STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links);
+ pool->total_freed++;
+ pool->free_ctl_io++;
+ ctl_pool_release(pool);
+ if (have_lock == 0)
+ mtx_unlock(&pool->ctl_softc->ctl_lock);
+ } else {
+ /*
+ * Otherwise, just free it. We probably malloced it and
+ * the emergency pool wasn't available.
+ */
+ free(io, M_CTL);
+ }
+
+}
+
+void
+ctl_free_io(union ctl_io *io)
+{
+ ctl_free_io_internal(io, /*have_lock*/ 0);
+}
+
+void
+ctl_zero_io(union ctl_io *io)
+{
+ void *pool_ref;
+
+ if (io == NULL)
+ return;
+
+ /*
+ * May need to preserve linked list pointers at some point too.
+ */
+ pool_ref = io->io_hdr.pool;
+
+ memset(io, 0, sizeof(*io));
+
+ io->io_hdr.pool = pool_ref;
+}
+
+/*
+ * This routine is currently used for internal copies of ctl_ios that need
+ * to persist for some reason after we've already returned status to the
+ * FETD. (Thus the flag set.)
+ *
+ * XXX XXX
+ * Note that this makes a blind copy of all fields in the ctl_io, except
+ * for the pool reference. This includes any memory that has been
+ * allocated! That memory will no longer be valid after done has been
+ * called, so this would be VERY DANGEROUS for command that actually does
+ * any reads or writes. Right now (11/7/2005), this is only used for immediate
+ * start and stop commands, which don't transfer any data, so this is not a
+ * problem. If it is used for anything else, the caller would also need to
+ * allocate data buffer space and this routine would need to be modified to
+ * copy the data buffer(s) as well.
+ */
+void
+ctl_copy_io(union ctl_io *src, union ctl_io *dest)
+{
+ void *pool_ref;
+
+ if ((src == NULL)
+ || (dest == NULL))
+ return;
+
+ /*
+ * May need to preserve linked list pointers at some point too.
+ */
+ pool_ref = dest->io_hdr.pool;
+
+ memcpy(dest, src, ctl_min(sizeof(*src), sizeof(*dest)));
+
+ dest->io_hdr.pool = pool_ref;
+ /*
+ * We need to know that this is an internal copy, and doesn't need
+ * to get passed back to the FETD that allocated it.
+ */
+ dest->io_hdr.flags |= CTL_FLAG_INT_COPY;
+}
+
+#ifdef NEEDTOPORT
+static void
+ctl_update_power_subpage(struct copan_power_subpage *page)
+{
+ int num_luns, num_partitions, config_type;
+ struct ctl_softc *softc;
+ cs_BOOL_t aor_present, shelf_50pct_power;
+ cs_raidset_personality_t rs_type;
+ int max_active_luns;
+
+ softc = control_softc;
+
+ /* subtract out the processor LUN */
+ num_luns = softc->num_luns - 1;
+ /*
+ * Default to 7 LUNs active, which was the only number we allowed
+ * in the past.
+ */
+ max_active_luns = 7;
+
+ num_partitions = config_GetRsPartitionInfo();
+ config_type = config_GetConfigType();
+ shelf_50pct_power = config_GetShelfPowerMode();
+ aor_present = config_IsAorRsPresent();
+
+ rs_type = ddb_GetRsRaidType(1);
+ if ((rs_type != CS_RAIDSET_PERSONALITY_RAID5)
+ && (rs_type != CS_RAIDSET_PERSONALITY_RAID1)) {
+ EPRINT(0, "Unsupported RS type %d!", rs_type);
+ }
+
+
+ page->total_luns = num_luns;
+
+ switch (config_type) {
+ case 40:
+ /*
+ * In a 40 drive configuration, it doesn't matter what DC
+ * cards we have, whether we have AOR enabled or not,
+ * partitioning or not, or what type of RAIDset we have.
+ * In that scenario, we can power up every LUN we present
+ * to the user.
+ */
+ max_active_luns = num_luns;
+
+ break;
+ case 64:
+ if (shelf_50pct_power == CS_FALSE) {
+ /* 25% power */
+ if (aor_present == CS_TRUE) {
+ if (rs_type ==
+ CS_RAIDSET_PERSONALITY_RAID5) {
+ max_active_luns = 7;
+ } else if (rs_type ==
+ CS_RAIDSET_PERSONALITY_RAID1){
+ max_active_luns = 14;
+ } else {
+ /* XXX KDM now what?? */
+ }
+ } else {
+ if (rs_type ==
+ CS_RAIDSET_PERSONALITY_RAID5) {
+ max_active_luns = 8;
+ } else if (rs_type ==
+ CS_RAIDSET_PERSONALITY_RAID1){
+ max_active_luns = 16;
+ } else {
+ /* XXX KDM now what?? */
+ }
+ }
+ } else {
+ /* 50% power */
+ /*
+ * With 50% power in a 64 drive configuration, we
+ * can power all LUNs we present.
+ */
+ max_active_luns = num_luns;
+ }
+ break;
+ case 112:
+ if (shelf_50pct_power == CS_FALSE) {
+ /* 25% power */
+ if (aor_present == CS_TRUE) {
+ if (rs_type ==
+ CS_RAIDSET_PERSONALITY_RAID5) {
+ max_active_luns = 7;
+ } else if (rs_type ==
+ CS_RAIDSET_PERSONALITY_RAID1){
+ max_active_luns = 14;
+ } else {
+ /* XXX KDM now what?? */
+ }
+ } else {
+ if (rs_type ==
+ CS_RAIDSET_PERSONALITY_RAID5) {
+ max_active_luns = 8;
+ } else if (rs_type ==
+ CS_RAIDSET_PERSONALITY_RAID1){
+ max_active_luns = 16;
+ } else {
+ /* XXX KDM now what?? */
+ }
+ }
+ } else {
+ /* 50% power */
+ if (aor_present == CS_TRUE) {
+ if (rs_type ==
+ CS_RAIDSET_PERSONALITY_RAID5) {
+ max_active_luns = 14;
+ } else if (rs_type ==
+ CS_RAIDSET_PERSONALITY_RAID1){
+ /*
+ * We're assuming here that disk
+ * caching is enabled, and so we're
+ * able to power up half of each
+ * LUN, and cache all writes.
+ */
+ max_active_luns = num_luns;
+ } else {
+ /* XXX KDM now what?? */
+ }
+ } else {
+ if (rs_type ==
+ CS_RAIDSET_PERSONALITY_RAID5) {
+ max_active_luns = 15;
+ } else if (rs_type ==
+ CS_RAIDSET_PERSONALITY_RAID1){
+ max_active_luns = 30;
+ } else {
+ /* XXX KDM now what?? */
+ }
+ }
+ }
+ break;
+ default:
+ /*
+ * In this case, we have an unknown configuration, so we
+ * just use the default from above.
+ */
+ break;
+ }
+
+ page->max_active_luns = max_active_luns;
+#if 0
+ printk("%s: total_luns = %d, max_active_luns = %d\n", __func__,
+ page->total_luns, page->max_active_luns);
+#endif
+}
+#endif /* NEEDTOPORT */
+
+/*
+ * This routine could be used in the future to load default and/or saved
+ * mode page parameters for a particuar lun.
+ */
+static int
+ctl_init_page_index(struct ctl_lun *lun)
+{
+ int i;
+ struct ctl_page_index *page_index;
+ struct ctl_softc *softc;
+
+ memcpy(&lun->mode_pages.index, page_index_template,
+ sizeof(page_index_template));
+
+ softc = lun->ctl_softc;
+
+ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
+
+ page_index = &lun->mode_pages.index[i];
+ /*
+ * If this is a disk-only mode page, there's no point in
+ * setting it up. For some pages, we have to have some
+ * basic information about the disk in order to calculate the
+ * mode page data.
+ */
+ if ((lun->be_lun->lun_type != T_DIRECT)
+ && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY))
+ continue;
+
+ switch (page_index->page_code & SMPH_PC_MASK) {
+ case SMS_FORMAT_DEVICE_PAGE: {
+ struct scsi_format_page *format_page;
+
+ if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
+ panic("subpage is incorrect!");
+
+ /*
+ * Sectors per track are set above. Bytes per
+ * sector need to be set here on a per-LUN basis.
+ */
+ memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT],
+ &format_page_default,
+ sizeof(format_page_default));
+ memcpy(&lun->mode_pages.format_page[
+ CTL_PAGE_CHANGEABLE], &format_page_changeable,
+ sizeof(format_page_changeable));
+ memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT],
+ &format_page_default,
+ sizeof(format_page_default));
+ memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED],
+ &format_page_default,
+ sizeof(format_page_default));
+
+ format_page = &lun->mode_pages.format_page[
+ CTL_PAGE_CURRENT];
+ scsi_ulto2b(lun->be_lun->blocksize,
+ format_page->bytes_per_sector);
+
+ format_page = &lun->mode_pages.format_page[
+ CTL_PAGE_DEFAULT];
+ scsi_ulto2b(lun->be_lun->blocksize,
+ format_page->bytes_per_sector);
+
+ format_page = &lun->mode_pages.format_page[
+ CTL_PAGE_SAVED];
+ scsi_ulto2b(lun->be_lun->blocksize,
+ format_page->bytes_per_sector);
+
+ page_index->page_data =
+ (uint8_t *)lun->mode_pages.format_page;
+ break;
+ }
+ case SMS_RIGID_DISK_PAGE: {
+ struct scsi_rigid_disk_page *rigid_disk_page;
+ uint32_t sectors_per_cylinder;
+ uint64_t cylinders;
+#ifndef __XSCALE__
+ int shift;
+#endif /* !__XSCALE__ */
+
+ if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
+ panic("invalid subpage value %d",
+ page_index->subpage);
+
+ /*
+ * Rotation rate and sectors per track are set
+ * above. We calculate the cylinders here based on
+ * capacity. Due to the number of heads and
+ * sectors per track we're using, smaller arrays
+ * may turn out to have 0 cylinders. Linux and
+ * FreeBSD don't pay attention to these mode pages
+ * to figure out capacity, but Solaris does. It
+ * seems to deal with 0 cylinders just fine, and
+ * works out a fake geometry based on the capacity.
+ */
+ memcpy(&lun->mode_pages.rigid_disk_page[
+ CTL_PAGE_CURRENT], &rigid_disk_page_default,
+ sizeof(rigid_disk_page_default));
+ memcpy(&lun->mode_pages.rigid_disk_page[
+ CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable,
+ sizeof(rigid_disk_page_changeable));
+ memcpy(&lun->mode_pages.rigid_disk_page[
+ CTL_PAGE_DEFAULT], &rigid_disk_page_default,
+ sizeof(rigid_disk_page_default));
+ memcpy(&lun->mode_pages.rigid_disk_page[
+ CTL_PAGE_SAVED], &rigid_disk_page_default,
+ sizeof(rigid_disk_page_default));
+
+ sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK *
+ CTL_DEFAULT_HEADS;
+
+ /*
+ * The divide method here will be more accurate,
+ * probably, but results in floating point being
+ * used in the kernel on i386 (__udivdi3()). On the
+ * XScale, though, __udivdi3() is implemented in
+ * software.
+ *
+ * The shift method for cylinder calculation is
+ * accurate if sectors_per_cylinder is a power of
+ * 2. Otherwise it might be slightly off -- you
+ * might have a bit of a truncation problem.
+ */
+#ifdef __XSCALE__
+ cylinders = (lun->be_lun->maxlba + 1) /
+ sectors_per_cylinder;
+#else
+ for (shift = 31; shift > 0; shift--) {
+ if (sectors_per_cylinder & (1 << shift))
+ break;
+ }
+ cylinders = (lun->be_lun->maxlba + 1) >> shift;
+#endif
+
+ /*
+ * We've basically got 3 bytes, or 24 bits for the
+ * cylinder size in the mode page. If we're over,
+ * just round down to 2^24.
+ */
+ if (cylinders > 0xffffff)
+ cylinders = 0xffffff;
+
+ rigid_disk_page = &lun->mode_pages.rigid_disk_page[
+ CTL_PAGE_CURRENT];
+ scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
+
+ rigid_disk_page = &lun->mode_pages.rigid_disk_page[
+ CTL_PAGE_DEFAULT];
+ scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
+
+ rigid_disk_page = &lun->mode_pages.rigid_disk_page[
+ CTL_PAGE_SAVED];
+ scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
+
+ page_index->page_data =
+ (uint8_t *)lun->mode_pages.rigid_disk_page;
+ break;
+ }
+ case SMS_CACHING_PAGE: {
+
+ if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
+ panic("invalid subpage value %d",
+ page_index->subpage);
+ /*
+ * Defaults should be okay here, no calculations
+ * needed.
+ */
+ memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT],
+ &caching_page_default,
+ sizeof(caching_page_default));
+ memcpy(&lun->mode_pages.caching_page[
+ CTL_PAGE_CHANGEABLE], &caching_page_changeable,
+ sizeof(caching_page_changeable));
+ memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT],
+ &caching_page_default,
+ sizeof(caching_page_default));
+ memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED],
+ &caching_page_default,
+ sizeof(caching_page_default));
+ page_index->page_data =
+ (uint8_t *)lun->mode_pages.caching_page;
+ break;
+ }
+ case SMS_CONTROL_MODE_PAGE: {
+
+ if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
+ panic("invalid subpage value %d",
+ page_index->subpage);
+
+ /*
+ * Defaults should be okay here, no calculations
+ * needed.
+ */
+ memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT],
+ &control_page_default,
+ sizeof(control_page_default));
+ memcpy(&lun->mode_pages.control_page[
+ CTL_PAGE_CHANGEABLE], &control_page_changeable,
+ sizeof(control_page_changeable));
+ memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT],
+ &control_page_default,
+ sizeof(control_page_default));
+ memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED],
+ &control_page_default,
+ sizeof(control_page_default));
+ page_index->page_data =
+ (uint8_t *)lun->mode_pages.control_page;
+ break;
+
+ }
+ case SMS_VENDOR_SPECIFIC_PAGE:{
+ switch (page_index->subpage) {
+ case PWR_SUBPAGE_CODE: {
+ struct copan_power_subpage *current_page,
+ *saved_page;
+
+ memcpy(&lun->mode_pages.power_subpage[
+ CTL_PAGE_CURRENT],
+ &power_page_default,
+ sizeof(power_page_default));
+ memcpy(&lun->mode_pages.power_subpage[
+ CTL_PAGE_CHANGEABLE],
+ &power_page_changeable,
+ sizeof(power_page_changeable));
+ memcpy(&lun->mode_pages.power_subpage[
+ CTL_PAGE_DEFAULT],
+ &power_page_default,
+ sizeof(power_page_default));
+ memcpy(&lun->mode_pages.power_subpage[
+ CTL_PAGE_SAVED],
+ &power_page_default,
+ sizeof(power_page_default));
+ page_index->page_data =
+ (uint8_t *)lun->mode_pages.power_subpage;
+
+ current_page = (struct copan_power_subpage *)
+ (page_index->page_data +
+ (page_index->page_len *
+ CTL_PAGE_CURRENT));
+ saved_page = (struct copan_power_subpage *)
+ (page_index->page_data +
+ (page_index->page_len *
+ CTL_PAGE_SAVED));
+ break;
+ }
+ case APS_SUBPAGE_CODE: {
+ struct copan_aps_subpage *current_page,
+ *saved_page;
+
+ // This gets set multiple times but
+ // it should always be the same. It's
+ // only done during init so who cares.
+ index_to_aps_page = i;
+
+ memcpy(&lun->mode_pages.aps_subpage[
+ CTL_PAGE_CURRENT],
+ &aps_page_default,
+ sizeof(aps_page_default));
+ memcpy(&lun->mode_pages.aps_subpage[
+ CTL_PAGE_CHANGEABLE],
+ &aps_page_changeable,
+ sizeof(aps_page_changeable));
+ memcpy(&lun->mode_pages.aps_subpage[
+ CTL_PAGE_DEFAULT],
+ &aps_page_default,
+ sizeof(aps_page_default));
+ memcpy(&lun->mode_pages.aps_subpage[
+ CTL_PAGE_SAVED],
+ &aps_page_default,
+ sizeof(aps_page_default));
+ page_index->page_data =
+ (uint8_t *)lun->mode_pages.aps_subpage;
+
+ current_page = (struct copan_aps_subpage *)
+ (page_index->page_data +
+ (page_index->page_len *
+ CTL_PAGE_CURRENT));
+ saved_page = (struct copan_aps_subpage *)
+ (page_index->page_data +
+ (page_index->page_len *
+ CTL_PAGE_SAVED));
+ break;
+ }
+ case DBGCNF_SUBPAGE_CODE: {
+ struct copan_debugconf_subpage *current_page,
+ *saved_page;
+
+ memcpy(&lun->mode_pages.debugconf_subpage[
+ CTL_PAGE_CURRENT],
+ &debugconf_page_default,
+ sizeof(debugconf_page_default));
+ memcpy(&lun->mode_pages.debugconf_subpage[
+ CTL_PAGE_CHANGEABLE],
+ &debugconf_page_changeable,
+ sizeof(debugconf_page_changeable));
+ memcpy(&lun->mode_pages.debugconf_subpage[
+ CTL_PAGE_DEFAULT],
+ &debugconf_page_default,
+ sizeof(debugconf_page_default));
+ memcpy(&lun->mode_pages.debugconf_subpage[
+ CTL_PAGE_SAVED],
+ &debugconf_page_default,
+ sizeof(debugconf_page_default));
+ page_index->page_data =
+ (uint8_t *)lun->mode_pages.debugconf_subpage;
+
+ current_page = (struct copan_debugconf_subpage *)
+ (page_index->page_data +
+ (page_index->page_len *
+ CTL_PAGE_CURRENT));
+ saved_page = (struct copan_debugconf_subpage *)
+ (page_index->page_data +
+ (page_index->page_len *
+ CTL_PAGE_SAVED));
+ break;
+ }
+ default:
+ panic("invalid subpage value %d",
+ page_index->subpage);
+ break;
+ }
+ break;
+ }
+ default:
+ panic("invalid page value %d",
+ page_index->page_code & SMPH_PC_MASK);
+ break;
+ }
+ }
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+/*
+ * LUN allocation.
+ *
+ * Requirements:
+ * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he
+ * wants us to allocate the LUN and he can block.
+ * - ctl_softc is always set
+ * - be_lun is set if the LUN has a backend (needed for disk LUNs)
+ *
+ * Returns 0 for success, non-zero (errno) for failure.
+ */
+static int
+ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
+ struct ctl_be_lun *const be_lun, struct ctl_id target_id)
+{
+ struct ctl_lun *nlun, *lun;
+ struct ctl_frontend *fe;
+ int lun_number, i;
+
+ if (be_lun == NULL)
+ return (EINVAL);
+
+ /*
+ * We currently only support Direct Access or Processor LUN types.
+ */
+ switch (be_lun->lun_type) {
+ case T_DIRECT:
+ break;
+ case T_PROCESSOR:
+ break;
+ case T_SEQUENTIAL:
+ case T_CHANGER:
+ default:
+ be_lun->lun_config_status(be_lun->be_lun,
+ CTL_LUN_CONFIG_FAILURE);
+ break;
+ }
+ if (ctl_lun == NULL) {
+ lun = malloc(sizeof(*lun), M_CTL, M_WAITOK);
+ if (lun == NULL) {
+ be_lun->lun_config_status(lun->be_lun->be_lun,
+ CTL_LUN_CONFIG_FAILURE);
+ return (-ENOMEM);
+ }
+ lun->flags = CTL_LUN_MALLOCED;
+ } else
+ lun = ctl_lun;
+
+ memset(lun, 0, sizeof(*lun));
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ /*
+ * See if the caller requested a particular LUN number. If so, see
+ * if it is available. Otherwise, allocate the first available LUN.
+ */
+ if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) {
+ if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1))
+ || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) {
+ printf("ctl: requested LUN ID %d is higher "
+ "than CTL_MAX_LUNS - 1 (%d)\n",
+ be_lun->req_lun_id, CTL_MAX_LUNS - 1);
+ } else {
+ /*
+ * XXX KDM return an error, or just assign
+ * another LUN ID in this case??
+ */
+ printf("ctl: requested LUN ID %d is already "
+ "in use\n", be_lun->req_lun_id);
+ }
+ if (lun->flags & CTL_LUN_MALLOCED)
+ free(lun, M_CTL);
+ be_lun->lun_config_status(be_lun->be_lun,
+ CTL_LUN_CONFIG_FAILURE);
+ return (ENOSPC);
+ }
+ lun_number = be_lun->req_lun_id;
+ } else {
+ lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS);
+ if (lun_number == -1) {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ printf("ctl: can't allocate LUN on target %ju, out of "
+ "LUNs\n", (uintmax_t)target_id.id);
+ if (lun->flags & CTL_LUN_MALLOCED)
+ free(lun, M_CTL);
+ be_lun->lun_config_status(be_lun->be_lun,
+ CTL_LUN_CONFIG_FAILURE);
+ return (ENOSPC);
+ }
+ }
+ ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
+
+ lun->target = target_id;
+ lun->lun = lun_number;
+ lun->be_lun = be_lun;
+ /*
+ * The processor LUN is always enabled. Disk LUNs come on line
+ * disabled, and must be enabled by the backend.
+ */
+ lun->flags = CTL_LUN_DISABLED;
+ lun->backend = be_lun->be;
+ be_lun->ctl_lun = lun;
+ be_lun->lun_id = lun_number;
+ atomic_add_int(&be_lun->be->num_luns, 1);
+ if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF)
+ lun->flags |= CTL_LUN_STOPPED;
+
+ if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE)
+ lun->flags |= CTL_LUN_INOPERABLE;
+
+ if (be_lun->flags & CTL_LUN_FLAG_PRIMARY)
+ lun->flags |= CTL_LUN_PRIMARY_SC;
+
+ lun->ctl_softc = ctl_softc;
+ TAILQ_INIT(&lun->ooa_queue);
+ TAILQ_INIT(&lun->blocked_queue);
+ STAILQ_INIT(&lun->error_list);
+
+ /*
+ * Initialize the mode page index.
+ */
+ ctl_init_page_index(lun);
+
+ /*
+ * Set the poweron UA for all initiators on this LUN only.
+ */
+ for (i = 0; i < CTL_MAX_INITIATORS; i++)
+ lun->pending_sense[i].ua_pending = CTL_UA_POWERON;
+
+ /*
+ * Now, before we insert this lun on the lun list, set the lun
+ * inventory changed UA for all other luns.
+ */
+ STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+ nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE;
+ }
+ }
+
+ STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links);
+
+ ctl_softc->ctl_luns[lun_number] = lun;
+
+ ctl_softc->num_luns++;
+
+ /* Setup statistics gathering */
+ lun->stats.device_type = be_lun->lun_type;
+ lun->stats.lun_number = lun_number;
+ if (lun->stats.device_type == T_DIRECT)
+ lun->stats.blocksize = be_lun->blocksize;
+ else
+ lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
+ for (i = 0;i < CTL_MAX_PORTS;i++)
+ lun->stats.ports[i].targ_port = i;
+
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK);
+
+ /*
+ * Run through each registered FETD and bring it online if it isn't
+ * already. Enable the target ID if it hasn't been enabled, and
+ * enable this particular LUN.
+ */
+ STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) {
+ int retval;
+
+ /*
+ * XXX KDM this only works for ONE TARGET ID. We'll need
+ * to do things differently if we go to a multiple target
+ * ID scheme.
+ */
+ if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) == 0) {
+
+ retval = fe->targ_enable(fe->targ_lun_arg, target_id);
+ if (retval != 0) {
+ printf("ctl_alloc_lun: FETD %s port %d "
+ "returned error %d for targ_enable on "
+ "target %ju\n", fe->port_name,
+ fe->targ_port, retval,
+ (uintmax_t)target_id.id);
+ } else
+ fe->status |= CTL_PORT_STATUS_TARG_ONLINE;
+ }
+
+ retval = fe->lun_enable(fe->targ_lun_arg, target_id,lun_number);
+ if (retval != 0) {
+ printf("ctl_alloc_lun: FETD %s port %d returned error "
+ "%d for lun_enable on target %ju lun %d\n",
+ fe->port_name, fe->targ_port, retval,
+ (uintmax_t)target_id.id, lun_number);
+ } else
+ fe->status |= CTL_PORT_STATUS_LUN_ONLINE;
+ }
+ return (0);
+}
+
+/*
+ * Delete a LUN.
+ * Assumptions:
+ * - caller holds ctl_softc->ctl_lock.
+ * - LUN has already been marked invalid and any pending I/O has been taken
+ * care of.
+ */
+static int
+ctl_free_lun(struct ctl_lun *lun)
+{
+ struct ctl_softc *softc;
+#if 0
+ struct ctl_frontend *fe;
+#endif
+ struct ctl_lun *nlun;
+ union ctl_io *io, *next_io;
+ int i;
+
+ softc = lun->ctl_softc;
+
+ STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links);
+
+ ctl_clear_mask(softc->ctl_lun_mask, lun->lun);
+
+ softc->ctl_luns[lun->lun] = NULL;
+
+ if (TAILQ_FIRST(&lun->ooa_queue) != NULL) {
+ printf("ctl_free_lun: aieee!! freeing a LUN with "
+ "outstanding I/O!!\n");
+ }
+
+ /*
+ * If we have anything pending on the RtR queue, remove it.
+ */
+ for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); io != NULL;
+ io = next_io) {
+ next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
+ if ((io->io_hdr.nexus.targ_target.id == lun->target.id)
+ && (io->io_hdr.nexus.targ_lun == lun->lun))
+ STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr,
+ ctl_io_hdr, links);
+ }
+
+ /*
+ * Then remove everything from the blocked queue.
+ */
+ for (io = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); io != NULL;
+ io = next_io) {
+ next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,blocked_links);
+ TAILQ_REMOVE(&lun->blocked_queue, &io->io_hdr, blocked_links);
+ io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
+ }
+
+ /*
+ * Now clear out the OOA queue, and free all the I/O.
+ * XXX KDM should we notify the FETD here? We probably need to
+ * quiesce the LUN before deleting it.
+ */
+ for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); io != NULL;
+ io = next_io) {
+ next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links);
+ TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
+ ctl_free_io_internal(io, /*have_lock*/ 1);
+ }
+
+ softc->num_luns--;
+
+ /*
+ * XXX KDM this scheme only works for a single target/multiple LUN
+ * setup. It needs to be revamped for a multiple target scheme.
+ *
+ * XXX KDM this results in fe->lun_disable() getting called twice,
+ * once when ctl_disable_lun() is called, and a second time here.
+ * We really need to re-think the LUN disable semantics. There
+ * should probably be several steps/levels to LUN removal:
+ * - disable
+ * - invalidate
+ * - free
+ *
+ * Right now we only have a disable method when communicating to
+ * the front end ports, at least for individual LUNs.
+ */
+#if 0
+ STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ int retval;
+
+ retval = fe->lun_disable(fe->targ_lun_arg, lun->target,
+ lun->lun);
+ if (retval != 0) {
+ printf("ctl_free_lun: FETD %s port %d returned error "
+ "%d for lun_disable on target %ju lun %jd\n",
+ fe->port_name, fe->targ_port, retval,
+ (uintmax_t)lun->target.id, (intmax_t)lun->lun);
+ }
+
+ if (STAILQ_FIRST(&softc->lun_list) == NULL) {
+ fe->status &= ~CTL_PORT_STATUS_LUN_ONLINE;
+
+ retval = fe->targ_disable(fe->targ_lun_arg,lun->target);
+ if (retval != 0) {
+ printf("ctl_free_lun: FETD %s port %d "
+ "returned error %d for targ_disable on "
+ "target %ju\n", fe->port_name,
+ fe->targ_port, retval,
+ (uintmax_t)lun->target.id);
+ } else
+ fe->status &= ~CTL_PORT_STATUS_TARG_ONLINE;
+
+ if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) != 0)
+ continue;
+
+#if 0
+ fe->port_offline(fe->onoff_arg);
+ fe->status &= ~CTL_PORT_STATUS_ONLINE;
+#endif
+ }
+ }
+#endif
+
+ /*
+ * Tell the backend to free resources, if this LUN has a backend.
+ */
+ atomic_subtract_int(&lun->be_lun->be->num_luns, 1);
+ lun->be_lun->lun_shutdown(lun->be_lun->be_lun);
+
+ if (lun->flags & CTL_LUN_MALLOCED)
+ free(lun, M_CTL);
+
+ STAILQ_FOREACH(nlun, &softc->lun_list, links) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+ nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE;
+ }
+ }
+
+ return (0);
+}
+
+static void
+ctl_create_lun(struct ctl_be_lun *be_lun)
+{
+ struct ctl_softc *ctl_softc;
+
+ ctl_softc = control_softc;
+
+ /*
+ * ctl_alloc_lun() should handle all potential failure cases.
+ */
+ ctl_alloc_lun(ctl_softc, NULL, be_lun, ctl_softc->target);
+}
+
+int
+ctl_add_lun(struct ctl_be_lun *be_lun)
+{
+ struct ctl_softc *ctl_softc;
+
+ ctl_softc = control_softc;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links);
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ ctl_wakeup_thread();
+
+ return (0);
+}
+
+int
+ctl_enable_lun(struct ctl_be_lun *be_lun)
+{
+ struct ctl_softc *ctl_softc;
+ struct ctl_frontend *fe, *nfe;
+ struct ctl_lun *lun;
+ int retval;
+
+ ctl_softc = control_softc;
+
+ lun = (struct ctl_lun *)be_lun->ctl_lun;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ if ((lun->flags & CTL_LUN_DISABLED) == 0) {
+ /*
+ * eh? Why did we get called if the LUN is already
+ * enabled?
+ */
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return (0);
+ }
+ lun->flags &= ~CTL_LUN_DISABLED;
+
+ for (fe = STAILQ_FIRST(&ctl_softc->fe_list); fe != NULL; fe = nfe) {
+ nfe = STAILQ_NEXT(fe, links);
+
+ /*
+ * Drop the lock while we call the FETD's enable routine.
+ * This can lead to a callback into CTL (at least in the
+ * case of the internal initiator frontend.
+ */
+ mtx_unlock(&ctl_softc->ctl_lock);
+ retval = fe->lun_enable(fe->targ_lun_arg, lun->target,lun->lun);
+ mtx_lock(&ctl_softc->ctl_lock);
+ if (retval != 0) {
+ printf("%s: FETD %s port %d returned error "
+ "%d for lun_enable on target %ju lun %jd\n",
+ __func__, fe->port_name, fe->targ_port, retval,
+ (uintmax_t)lun->target.id, (intmax_t)lun->lun);
+ }
+#if 0
+ else {
+ /* NOTE: TODO: why does lun enable affect port status? */
+ fe->status |= CTL_PORT_STATUS_LUN_ONLINE;
+ }
+#endif
+ }
+
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ return (0);
+}
+
+int
+ctl_disable_lun(struct ctl_be_lun *be_lun)
+{
+ struct ctl_softc *ctl_softc;
+ struct ctl_frontend *fe;
+ struct ctl_lun *lun;
+ int retval;
+
+ ctl_softc = control_softc;
+
+ lun = (struct ctl_lun *)be_lun->ctl_lun;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+
+ if (lun->flags & CTL_LUN_DISABLED) {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return (0);
+ }
+ lun->flags |= CTL_LUN_DISABLED;
+
+ STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ /*
+ * Drop the lock before we call the frontend's disable
+ * routine, to avoid lock order reversals.
+ *
+ * XXX KDM what happens if the frontend list changes while
+ * we're traversing it? It's unlikely, but should be handled.
+ */
+ retval = fe->lun_disable(fe->targ_lun_arg, lun->target,
+ lun->lun);
+ mtx_lock(&ctl_softc->ctl_lock);
+ if (retval != 0) {
+ printf("ctl_alloc_lun: FETD %s port %d returned error "
+ "%d for lun_disable on target %ju lun %jd\n",
+ fe->port_name, fe->targ_port, retval,
+ (uintmax_t)lun->target.id, (intmax_t)lun->lun);
+ }
+ }
+
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ return (0);
+}
+
+int
+ctl_start_lun(struct ctl_be_lun *be_lun)
+{
+ struct ctl_softc *ctl_softc;
+ struct ctl_lun *lun;
+
+ ctl_softc = control_softc;
+
+ lun = (struct ctl_lun *)be_lun->ctl_lun;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ lun->flags &= ~CTL_LUN_STOPPED;
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ return (0);
+}
+
+int
+ctl_stop_lun(struct ctl_be_lun *be_lun)
+{
+ struct ctl_softc *ctl_softc;
+ struct ctl_lun *lun;
+
+ ctl_softc = control_softc;
+
+ lun = (struct ctl_lun *)be_lun->ctl_lun;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ lun->flags |= CTL_LUN_STOPPED;
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ return (0);
+}
+
+int
+ctl_lun_offline(struct ctl_be_lun *be_lun)
+{
+ struct ctl_softc *ctl_softc;
+ struct ctl_lun *lun;
+
+ ctl_softc = control_softc;
+
+ lun = (struct ctl_lun *)be_lun->ctl_lun;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ lun->flags |= CTL_LUN_OFFLINE;
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ return (0);
+}
+
+int
+ctl_lun_online(struct ctl_be_lun *be_lun)
+{
+ struct ctl_softc *ctl_softc;
+ struct ctl_lun *lun;
+
+ ctl_softc = control_softc;
+
+ lun = (struct ctl_lun *)be_lun->ctl_lun;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ lun->flags &= ~CTL_LUN_OFFLINE;
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ return (0);
+}
+
+int
+ctl_invalidate_lun(struct ctl_be_lun *be_lun)
+{
+ struct ctl_softc *ctl_softc;
+ struct ctl_lun *lun;
+
+ ctl_softc = control_softc;
+
+ lun = (struct ctl_lun *)be_lun->ctl_lun;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+
+ /*
+ * The LUN needs to be disabled before it can be marked invalid.
+ */
+ if ((lun->flags & CTL_LUN_DISABLED) == 0) {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return (-1);
+ }
+ /*
+ * Mark the LUN invalid.
+ */
+ lun->flags |= CTL_LUN_INVALID;
+
+ /*
+ * If there is nothing in the OOA queue, go ahead and free the LUN.
+ * If we have something in the OOA queue, we'll free it when the
+ * last I/O completes.
+ */
+ if (TAILQ_FIRST(&lun->ooa_queue) == NULL)
+ ctl_free_lun(lun);
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ return (0);
+}
+
+int
+ctl_lun_inoperable(struct ctl_be_lun *be_lun)
+{
+ struct ctl_softc *ctl_softc;
+ struct ctl_lun *lun;
+
+ ctl_softc = control_softc;
+ lun = (struct ctl_lun *)be_lun->ctl_lun;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ lun->flags |= CTL_LUN_INOPERABLE;
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ return (0);
+}
+
+int
+ctl_lun_operable(struct ctl_be_lun *be_lun)
+{
+ struct ctl_softc *ctl_softc;
+ struct ctl_lun *lun;
+
+ ctl_softc = control_softc;
+ lun = (struct ctl_lun *)be_lun->ctl_lun;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ lun->flags &= ~CTL_LUN_INOPERABLE;
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ return (0);
+}
+
+int
+ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
+ int lock)
+{
+ struct ctl_softc *softc;
+ struct ctl_lun *lun;
+ struct copan_aps_subpage *current_sp;
+ struct ctl_page_index *page_index;
+ int i;
+
+ softc = control_softc;
+
+ mtx_lock(&softc->ctl_lock);
+
+ lun = (struct ctl_lun *)be_lun->ctl_lun;
+
+ page_index = NULL;
+ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
+ if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) !=
+ APS_PAGE_CODE)
+ continue;
+
+ if (lun->mode_pages.index[i].subpage != APS_SUBPAGE_CODE)
+ continue;
+ page_index = &lun->mode_pages.index[i];
+ }
+
+ if (page_index == NULL) {
+ mtx_unlock(&softc->ctl_lock);
+ printf("%s: APS subpage not found for lun %ju!\n", __func__,
+ (uintmax_t)lun->lun);
+ return (1);
+ }
+#if 0
+ if ((softc->aps_locked_lun != 0)
+ && (softc->aps_locked_lun != lun->lun)) {
+ printf("%s: attempt to lock LUN %llu when %llu is already "
+ "locked\n");
+ mtx_unlock(&softc->ctl_lock);
+ return (1);
+ }
+#endif
+
+ current_sp = (struct copan_aps_subpage *)(page_index->page_data +
+ (page_index->page_len * CTL_PAGE_CURRENT));
+
+ if (lock != 0) {
+ current_sp->lock_active = APS_LOCK_ACTIVE;
+ softc->aps_locked_lun = lun->lun;
+ } else {
+ current_sp->lock_active = 0;
+ softc->aps_locked_lun = 0;
+ }
+
+
+ /*
+ * If we're in HA mode, try to send the lock message to the other
+ * side.
+ */
+ if (ctl_is_single == 0) {
+ int isc_retval;
+ union ctl_ha_msg lock_msg;
+
+ lock_msg.hdr.nexus = *nexus;
+ lock_msg.hdr.msg_type = CTL_MSG_APS_LOCK;
+ if (lock != 0)
+ lock_msg.aps.lock_flag = 1;
+ else
+ lock_msg.aps.lock_flag = 0;
+ isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &lock_msg,
+ sizeof(lock_msg), 0);
+ if (isc_retval > CTL_HA_STATUS_SUCCESS) {
+ printf("%s: APS (lock=%d) error returned from "
+ "ctl_ha_msg_send: %d\n", __func__, lock, isc_retval);
+ mtx_unlock(&softc->ctl_lock);
+ return (1);
+ }
+ }
+
+ mtx_unlock(&softc->ctl_lock);
+
+ return (0);
+}
+
+/*
+ * Backend "memory move is complete" callback for requests that never
+ * make it down to say RAIDCore's configuration code.
+ */
+int
+ctl_config_move_done(union ctl_io *io)
+{
+ int retval;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+
+ CTL_DEBUG_PRINT(("ctl_config_move_done\n"));
+ /*
+ * XXX KDM this shouldn't happen, but what if it does?
+ */
+ if (io->io_hdr.io_type != CTL_IO_SCSI)
+ panic("I/O type isn't CTL_IO_SCSI!");
+
+ if ((io->io_hdr.port_status == 0)
+ && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
+ && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
+ io->io_hdr.status = CTL_SUCCESS;
+ else if ((io->io_hdr.port_status != 0)
+ && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
+ && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){
+ /*
+ * For hardware error sense keys, the sense key
+ * specific value is defined to be a retry count,
+ * but we use it to pass back an internal FETD
+ * error code. XXX KDM Hopefully the FETD is only
+ * using 16 bits for an error code, since that's
+ * all the space we have in the sks field.
+ */
+ ctl_set_internal_failure(&io->scsiio,
+ /*sks_valid*/ 1,
+ /*retry_count*/
+ io->io_hdr.port_status);
+ free(io->scsiio.kern_data_ptr, M_CTL);
+ ctl_done(io);
+ goto bailout;
+ }
+
+ if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
+ || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
+ || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) {
+ /*
+ * XXX KDM just assuming a single pointer here, and not a
+ * S/G list. If we start using S/G lists for config data,
+ * we'll need to know how to clean them up here as well.
+ */
+ free(io->scsiio.kern_data_ptr, M_CTL);
+ /* Hopefully the user has already set the status... */
+ ctl_done(io);
+ } else {
+ /*
+ * XXX KDM now we need to continue data movement. Some
+ * options:
+ * - call ctl_scsiio() again? We don't do this for data
+ * writes, because for those at least we know ahead of
+ * time where the write will go and how long it is. For
+ * config writes, though, that information is largely
+ * contained within the write itself, thus we need to
+ * parse out the data again.
+ *
+ * - Call some other function once the data is in?
+ */
+
+ /*
+ * XXX KDM call ctl_scsiio() again for now, and check flag
+ * bits to see whether we're allocated or not.
+ */
+ retval = ctl_scsiio(&io->scsiio);
+ }
+bailout:
+ return (retval);
+}
+
+/*
+ * This gets called by a backend driver when it is done with a
+ * configuration write.
+ */
+void
+ctl_config_write_done(union ctl_io *io)
+{
+ /*
+ * If the IO_CONT flag is set, we need to call the supplied
+ * function to continue processing the I/O, instead of completing
+ * the I/O just yet.
+ *
+ * If there is an error, though, we don't want to keep processing.
+ * Instead, just send status back to the initiator.
+ */
+ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT)
+ && (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)
+ || ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))) {
+ io->scsiio.io_cont(io);
+ return;
+ }
+ /*
+ * Since a configuration write can be done for commands that actually
+ * have data allocated, like write buffer, and commands that have
+ * no data, like start/stop unit, we need to check here.
+ */
+ if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT)
+ free(io->scsiio.kern_data_ptr, M_CTL);
+ ctl_done(io);
+}
+
+/*
+ * SCSI release command.
+ */
+int
+ctl_scsi_release(struct ctl_scsiio *ctsio)
+{
+ int length, longid, thirdparty_id, resv_id;
+ struct ctl_softc *ctl_softc;
+ struct ctl_lun *lun;
+
+ length = 0;
+ resv_id = 0;
+
+ CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ ctl_softc = control_softc;
+
+ switch (ctsio->cdb[0]) {
+ case RELEASE: {
+ struct scsi_release *cdb;
+
+ cdb = (struct scsi_release *)ctsio->cdb;
+ if ((cdb->byte2 & 0x1f) != 0) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ break;
+ }
+ case RELEASE_10: {
+ struct scsi_release_10 *cdb;
+
+ cdb = (struct scsi_release_10 *)ctsio->cdb;
+
+ if ((cdb->byte2 & SR10_EXTENT) != 0) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+
+ }
+
+ if ((cdb->byte2 & SR10_3RDPTY) != 0) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 4);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ if (cdb->byte2 & SR10_LONGID)
+ longid = 1;
+ else
+ thirdparty_id = cdb->thirdparty_id;
+
+ resv_id = cdb->resv_id;
+ length = scsi_2btoul(cdb->length);
+ break;
+ }
+ }
+
+
+ /*
+ * XXX KDM right now, we only support LUN reservation. We don't
+ * support 3rd party reservations, or extent reservations, which
+ * might actually need the parameter list. If we've gotten this
+ * far, we've got a LUN reservation. Anything else got kicked out
+ * above. So, according to SPC, ignore the length.
+ */
+ length = 0;
+
+ if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
+ && (length > 0)) {
+ ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ ctsio->io_hdr.status = SCSI_STATUS_BUSY;
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ ctsio->kern_data_len = length;
+ ctsio->kern_total_len = length;
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ if (length > 0)
+ thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
+
+ mtx_lock(&ctl_softc->ctl_lock);
+
+ /*
+ * According to SPC, it is not an error for an intiator to attempt
+ * to release a reservation on a LUN that isn't reserved, or that
+ * is reserved by another initiator. The reservation can only be
+ * released, though, by the initiator who made it or by one of
+ * several reset type events.
+ */
+ if (lun->flags & CTL_LUN_RESERVED) {
+ if ((ctsio->io_hdr.nexus.initid.id == lun->rsv_nexus.initid.id)
+ && (ctsio->io_hdr.nexus.targ_port == lun->rsv_nexus.targ_port)
+ && (ctsio->io_hdr.nexus.targ_target.id ==
+ lun->rsv_nexus.targ_target.id)) {
+ lun->flags &= ~CTL_LUN_RESERVED;
+ }
+ }
+
+ ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.status = CTL_SUCCESS;
+
+ if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
+ }
+
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_scsi_reserve(struct ctl_scsiio *ctsio)
+{
+ int extent, thirdparty, longid;
+ int resv_id, length;
+ uint64_t thirdparty_id;
+ struct ctl_softc *ctl_softc;
+ struct ctl_lun *lun;
+
+ extent = 0;
+ thirdparty = 0;
+ longid = 0;
+ resv_id = 0;
+ length = 0;
+ thirdparty_id = 0;
+
+ CTL_DEBUG_PRINT(("ctl_reserve\n"));
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ ctl_softc = control_softc;
+
+ switch (ctsio->cdb[0]) {
+ case RESERVE: {
+ struct scsi_reserve *cdb;
+
+ cdb = (struct scsi_reserve *)ctsio->cdb;
+ if ((cdb->byte2 & 0x1f) != 0) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ resv_id = cdb->resv_id;
+ length = scsi_2btoul(cdb->length);
+ break;
+ }
+ case RESERVE_10: {
+ struct scsi_reserve_10 *cdb;
+
+ cdb = (struct scsi_reserve_10 *)ctsio->cdb;
+
+ if ((cdb->byte2 & SR10_EXTENT) != 0) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ if ((cdb->byte2 & SR10_3RDPTY) != 0) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 4);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ if (cdb->byte2 & SR10_LONGID)
+ longid = 1;
+ else
+ thirdparty_id = cdb->thirdparty_id;
+
+ resv_id = cdb->resv_id;
+ length = scsi_2btoul(cdb->length);
+ break;
+ }
+ }
+
+ /*
+ * XXX KDM right now, we only support LUN reservation. We don't
+ * support 3rd party reservations, or extent reservations, which
+ * might actually need the parameter list. If we've gotten this
+ * far, we've got a LUN reservation. Anything else got kicked out
+ * above. So, according to SPC, ignore the length.
+ */
+ length = 0;
+
+ if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
+ && (length > 0)) {
+ ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ ctsio->io_hdr.status = SCSI_STATUS_BUSY;
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ ctsio->kern_data_len = length;
+ ctsio->kern_total_len = length;
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ if (length > 0)
+ thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ if (lun->flags & CTL_LUN_RESERVED) {
+ if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id)
+ || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port)
+ || (ctsio->io_hdr.nexus.targ_target.id !=
+ lun->rsv_nexus.targ_target.id)) {
+ ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ goto bailout;
+ }
+ }
+
+ lun->flags |= CTL_LUN_RESERVED;
+ lun->rsv_nexus = ctsio->io_hdr.nexus;
+
+ ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.status = CTL_SUCCESS;
+
+bailout:
+ if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
+ }
+
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_start_stop(struct ctl_scsiio *ctsio)
+{
+ struct scsi_start_stop_unit *cdb;
+ struct ctl_lun *lun;
+ struct ctl_softc *ctl_softc;
+ int retval;
+
+ CTL_DEBUG_PRINT(("ctl_start_stop\n"));
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ ctl_softc = control_softc;
+ retval = 0;
+
+ cdb = (struct scsi_start_stop_unit *)ctsio->cdb;
+
+ /*
+ * XXX KDM
+ * We don't support the immediate bit on a stop unit. In order to
+ * do that, we would need to code up a way to know that a stop is
+ * pending, and hold off any new commands until it completes, one
+ * way or another. Then we could accept or reject those commands
+ * depending on its status. We would almost need to do the reverse
+ * of what we do below for an immediate start -- return the copy of
+ * the ctl_io to the FETD with status to send to the host (and to
+ * free the copy!) and then free the original I/O once the stop
+ * actually completes. That way, the OOA queue mechanism can work
+ * to block commands that shouldn't proceed. Another alternative
+ * would be to put the copy in the queue in place of the original,
+ * and return the original back to the caller. That could be
+ * slightly safer..
+ */
+ if ((cdb->byte2 & SSS_IMMED)
+ && ((cdb->how & SSS_START) == 0)) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * We don't support the power conditions field. We need to check
+ * this prior to checking the load/eject and start/stop bits.
+ */
+ if ((cdb->how & SSS_PC_MASK) != SSS_PC_START_VALID) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 4,
+ /*bit_valid*/ 1,
+ /*bit*/ 4);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * Media isn't removable, so we can't load or eject it.
+ */
+ if ((cdb->how & SSS_LOEJ) != 0) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 4,
+ /*bit_valid*/ 1,
+ /*bit*/ 1);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ if ((lun->flags & CTL_LUN_PR_RESERVED)
+ && ((cdb->how & SSS_START)==0)) {
+ uint32_t residx;
+
+ residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ if (!lun->per_res[residx].registered
+ || (lun->pr_res_idx!=residx && lun->res_type < 4)) {
+
+ ctl_set_reservation_conflict(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ }
+
+ /*
+ * If there is no backend on this device, we can't start or stop
+ * it. In theory we shouldn't get any start/stop commands in the
+ * first place at this level if the LUN doesn't have a backend.
+ * That should get stopped by the command decode code.
+ */
+ if (lun->backend == NULL) {
+ ctl_set_invalid_opcode(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * XXX KDM Copan-specific offline behavior.
+ * Figure out a reasonable way to port this?
+ */
+#ifdef NEEDTOPORT
+ mtx_lock(&ctl_softc->ctl_lock);
+
+ if (((cdb->byte2 & SSS_ONOFFLINE) == 0)
+ && (lun->flags & CTL_LUN_OFFLINE)) {
+ /*
+ * If the LUN is offline, and the on/offline bit isn't set,
+ * reject the start or stop. Otherwise, let it through.
+ */
+ mtx_unlock(&ctl_softc->ctl_lock);
+ ctl_set_lun_not_ready(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ } else {
+ mtx_unlock(&ctl_softc->ctl_lock);
+#endif /* NEEDTOPORT */
+ /*
+ * This could be a start or a stop when we're online,
+ * or a stop/offline or start/online. A start or stop when
+ * we're offline is covered in the case above.
+ */
+ /*
+ * In the non-immediate case, we send the request to
+ * the backend and return status to the user when
+ * it is done.
+ *
+ * In the immediate case, we allocate a new ctl_io
+ * to hold a copy of the request, and send that to
+ * the backend. We then set good status on the
+ * user's request and return it immediately.
+ */
+ if (cdb->byte2 & SSS_IMMED) {
+ union ctl_io *new_io;
+
+ new_io = ctl_alloc_io(ctsio->io_hdr.pool);
+ if (new_io == NULL) {
+ ctl_set_busy(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ } else {
+ ctl_copy_io((union ctl_io *)ctsio,
+ new_io);
+ retval = lun->backend->config_write(new_io);
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ }
+ } else {
+ retval = lun->backend->config_write(
+ (union ctl_io *)ctsio);
+ }
+#ifdef NEEDTOPORT
+ }
+#endif
+ return (retval);
+}
+
+/*
+ * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but
+ * we don't really do anything with the LBA and length fields if the user
+ * passes them in. Instead we'll just flush out the cache for the entire
+ * LUN.
+ */
+int
+ctl_sync_cache(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct ctl_softc *ctl_softc;
+ uint64_t starting_lba;
+ uint32_t block_count;
+ int reladr, immed;
+ int retval;
+
+ CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ ctl_softc = control_softc;
+ retval = 0;
+ reladr = 0;
+ immed = 0;
+
+ switch (ctsio->cdb[0]) {
+ case SYNCHRONIZE_CACHE: {
+ struct scsi_sync_cache *cdb;
+ cdb = (struct scsi_sync_cache *)ctsio->cdb;
+
+ if (cdb->byte2 & SSC_RELADR)
+ reladr = 1;
+
+ if (cdb->byte2 & SSC_IMMED)
+ immed = 1;
+
+ starting_lba = scsi_4btoul(cdb->begin_lba);
+ block_count = scsi_2btoul(cdb->lb_count);
+ break;
+ }
+ case SYNCHRONIZE_CACHE_16: {
+ struct scsi_sync_cache_16 *cdb;
+ cdb = (struct scsi_sync_cache_16 *)ctsio->cdb;
+
+ if (cdb->byte2 & SSC_RELADR)
+ reladr = 1;
+
+ if (cdb->byte2 & SSC_IMMED)
+ immed = 1;
+
+ starting_lba = scsi_8btou64(cdb->begin_lba);
+ block_count = scsi_4btoul(cdb->lb_count);
+ break;
+ }
+ default:
+ ctl_set_invalid_opcode(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ goto bailout;
+ break; /* NOTREACHED */
+ }
+
+ if (immed) {
+ /*
+ * We don't support the immediate bit. Since it's in the
+ * same place for the 10 and 16 byte SYNCHRONIZE CACHE
+ * commands, we can just return the same error in either
+ * case.
+ */
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 1);
+ ctl_done((union ctl_io *)ctsio);
+ goto bailout;
+ }
+
+ if (reladr) {
+ /*
+ * We don't support the reladr bit either. It can only be
+ * used with linked commands, and we don't support linked
+ * commands. Since the bit is in the same place for the
+ * 10 and 16 byte SYNCHRONIZE CACHE * commands, we can
+ * just return the same error in either case.
+ */
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ goto bailout;
+ }
+
+ /*
+ * We check the LBA and length, but don't do anything with them.
+ * A SYNCHRONIZE CACHE will cause the entire cache for this lun to
+ * get flushed. This check will just help satisfy anyone who wants
+ * to see an error for an out of range LBA.
+ */
+ if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) {
+ ctl_set_lba_out_of_range(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ goto bailout;
+ }
+
+ /*
+ * If this LUN has no backend, we can't flush the cache anyway.
+ */
+ if (lun->backend == NULL) {
+ ctl_set_invalid_opcode(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ goto bailout;
+ }
+
+ /*
+ * Check to see whether we're configured to send the SYNCHRONIZE
+ * CACHE command directly to the back end.
+ */
+ mtx_lock(&ctl_softc->ctl_lock);
+ if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC)
+ && (++(lun->sync_count) >= lun->sync_interval)) {
+ lun->sync_count = 0;
+ mtx_unlock(&ctl_softc->ctl_lock);
+ retval = lun->backend->config_write((union ctl_io *)ctsio);
+ } else {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ }
+
+bailout:
+
+ return (retval);
+}
+
+int
+ctl_format(struct ctl_scsiio *ctsio)
+{
+ struct scsi_format *cdb;
+ struct ctl_lun *lun;
+ struct ctl_softc *ctl_softc;
+ int length, defect_list_len;
+
+ CTL_DEBUG_PRINT(("ctl_format\n"));
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ ctl_softc = control_softc;
+
+ cdb = (struct scsi_format *)ctsio->cdb;
+
+ length = 0;
+ if (cdb->byte2 & SF_FMTDATA) {
+ if (cdb->byte2 & SF_LONGLIST)
+ length = sizeof(struct scsi_format_header_long);
+ else
+ length = sizeof(struct scsi_format_header_short);
+ }
+
+ if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
+ && (length > 0)) {
+ ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ ctsio->io_hdr.status = SCSI_STATUS_BUSY;
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ ctsio->kern_data_len = length;
+ ctsio->kern_total_len = length;
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ defect_list_len = 0;
+
+ if (cdb->byte2 & SF_FMTDATA) {
+ if (cdb->byte2 & SF_LONGLIST) {
+ struct scsi_format_header_long *header;
+
+ header = (struct scsi_format_header_long *)
+ ctsio->kern_data_ptr;
+
+ defect_list_len = scsi_4btoul(header->defect_list_len);
+ if (defect_list_len != 0) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 0,
+ /*field*/ 2,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ goto bailout;
+ }
+ } else {
+ struct scsi_format_header_short *header;
+
+ header = (struct scsi_format_header_short *)
+ ctsio->kern_data_ptr;
+
+ defect_list_len = scsi_2btoul(header->defect_list_len);
+ if (defect_list_len != 0) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 0,
+ /*field*/ 2,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ goto bailout;
+ }
+ }
+ }
+
+ /*
+ * The format command will clear out the "Medium format corrupted"
+ * status if set by the configuration code. That status is really
+ * just a way to notify the host that we have lost the media, and
+ * get them to issue a command that will basically make them think
+ * they're blowing away the media.
+ */
+ mtx_lock(&ctl_softc->ctl_lock);
+ lun->flags &= ~CTL_LUN_INOPERABLE;
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.status = CTL_SUCCESS;
+bailout:
+
+ if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
+ }
+
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_write_buffer(struct ctl_scsiio *ctsio)
+{
+ struct scsi_write_buffer *cdb;
+ struct copan_page_header *header;
+ struct ctl_lun *lun;
+ struct ctl_softc *ctl_softc;
+ int buffer_offset, len;
+ int retval;
+
+ header = NULL;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ ctl_softc = control_softc;
+ cdb = (struct scsi_write_buffer *)ctsio->cdb;
+
+ if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 4);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ if (cdb->buffer_id != 0) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ len = scsi_3btoul(cdb->length);
+ buffer_offset = scsi_3btoul(cdb->offset);
+
+ if (len > sizeof(lun->write_buffer)) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 6,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ if (buffer_offset != 0) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 3,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * If we've got a kernel request that hasn't been malloced yet,
+ * malloc it and tell the caller the data buffer is here.
+ */
+ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
+ ctsio->kern_data_ptr = lun->write_buffer;
+ ctsio->kern_data_len = len;
+ ctsio->kern_total_len = len;
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ ctl_done((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+/*
+ * Note that this function currently doesn't actually do anything inside
+ * CTL to enforce things if the DQue bit is turned on.
+ *
+ * Also note that this function can't be used in the default case, because
+ * the DQue bit isn't set in the changeable mask for the control mode page
+ * anyway. This is just here as an example for how to implement a page
+ * handler, and a placeholder in case we want to allow the user to turn
+ * tagged queueing on and off.
+ *
+ * The D_SENSE bit handling is functional, however, and will turn
+ * descriptor sense on and off for a given LUN.
+ */
+int
+ctl_control_page_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index, uint8_t *page_ptr)
+{
+ struct scsi_control_page *current_cp, *saved_cp, *user_cp;
+ struct ctl_lun *lun;
+ struct ctl_softc *softc;
+ int set_ua;
+ uint32_t initidx;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
+ set_ua = 0;
+
+ user_cp = (struct scsi_control_page *)page_ptr;
+ current_cp = (struct scsi_control_page *)
+ (page_index->page_data + (page_index->page_len *
+ CTL_PAGE_CURRENT));
+ saved_cp = (struct scsi_control_page *)
+ (page_index->page_data + (page_index->page_len *
+ CTL_PAGE_SAVED));
+
+ softc = control_softc;
+
+ mtx_lock(&softc->ctl_lock);
+ if (((current_cp->rlec & SCP_DSENSE) == 0)
+ && ((user_cp->rlec & SCP_DSENSE) != 0)) {
+ /*
+ * Descriptor sense is currently turned off and the user
+ * wants to turn it on.
+ */
+ current_cp->rlec |= SCP_DSENSE;
+ saved_cp->rlec |= SCP_DSENSE;
+ lun->flags |= CTL_LUN_SENSE_DESC;
+ set_ua = 1;
+ } else if (((current_cp->rlec & SCP_DSENSE) != 0)
+ && ((user_cp->rlec & SCP_DSENSE) == 0)) {
+ /*
+ * Descriptor sense is currently turned on, and the user
+ * wants to turn it off.
+ */
+ current_cp->rlec &= ~SCP_DSENSE;
+ saved_cp->rlec &= ~SCP_DSENSE;
+ lun->flags &= ~CTL_LUN_SENSE_DESC;
+ set_ua = 1;
+ }
+ if (current_cp->queue_flags & SCP_QUEUE_DQUE) {
+ if (user_cp->queue_flags & SCP_QUEUE_DQUE) {
+#ifdef NEEDTOPORT
+ csevent_log(CSC_CTL | CSC_SHELF_SW |
+ CTL_UNTAG_TO_UNTAG,
+ csevent_LogType_Trace,
+ csevent_Severity_Information,
+ csevent_AlertLevel_Green,
+ csevent_FRU_Firmware,
+ csevent_FRU_Unknown,
+ "Received untagged to untagged transition");
+#endif /* NEEDTOPORT */
+ } else {
+#ifdef NEEDTOPORT
+ csevent_log(CSC_CTL | CSC_SHELF_SW |
+ CTL_UNTAG_TO_TAG,
+ csevent_LogType_ConfigChange,
+ csevent_Severity_Information,
+ csevent_AlertLevel_Green,
+ csevent_FRU_Firmware,
+ csevent_FRU_Unknown,
+ "Received untagged to tagged "
+ "queueing transition");
+#endif /* NEEDTOPORT */
+
+ current_cp->queue_flags &= ~SCP_QUEUE_DQUE;
+ saved_cp->queue_flags &= ~SCP_QUEUE_DQUE;
+ set_ua = 1;
+ }
+ } else {
+ if (user_cp->queue_flags & SCP_QUEUE_DQUE) {
+#ifdef NEEDTOPORT
+ csevent_log(CSC_CTL | CSC_SHELF_SW |
+ CTL_TAG_TO_UNTAG,
+ csevent_LogType_ConfigChange,
+ csevent_Severity_Warning,
+ csevent_AlertLevel_Yellow,
+ csevent_FRU_Firmware,
+ csevent_FRU_Unknown,
+ "Received tagged queueing to untagged "
+ "transition");
+#endif /* NEEDTOPORT */
+
+ current_cp->queue_flags |= SCP_QUEUE_DQUE;
+ saved_cp->queue_flags |= SCP_QUEUE_DQUE;
+ set_ua = 1;
+ } else {
+#ifdef NEEDTOPORT
+ csevent_log(CSC_CTL | CSC_SHELF_SW |
+ CTL_TAG_TO_TAG,
+ csevent_LogType_Trace,
+ csevent_Severity_Information,
+ csevent_AlertLevel_Green,
+ csevent_FRU_Firmware,
+ csevent_FRU_Unknown,
+ "Received tagged queueing to tagged "
+ "queueing transition");
+#endif /* NEEDTOPORT */
+ }
+ }
+ if (set_ua != 0) {
+ int i;
+ /*
+ * Let other initiators know that the mode
+ * parameters for this LUN have changed.
+ */
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+ if (i == initidx)
+ continue;
+
+ lun->pending_sense[i].ua_pending |=
+ CTL_UA_MODE_CHANGE;
+ }
+ }
+ mtx_unlock(&softc->ctl_lock);
+
+ return (0);
+}
+
+int
+ctl_power_sp_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index, uint8_t *page_ptr)
+{
+ return (0);
+}
+
+int
+ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index, int pc)
+{
+ struct copan_power_subpage *page;
+
+ page = (struct copan_power_subpage *)page_index->page_data +
+ (page_index->page_len * pc);
+
+ switch (pc) {
+ case SMS_PAGE_CTRL_CHANGEABLE >> 6:
+ /*
+ * We don't update the changable bits for this page.
+ */
+ break;
+ case SMS_PAGE_CTRL_CURRENT >> 6:
+ case SMS_PAGE_CTRL_DEFAULT >> 6:
+ case SMS_PAGE_CTRL_SAVED >> 6:
+#ifdef NEEDTOPORT
+ ctl_update_power_subpage(page);
+#endif
+ break;
+ default:
+#ifdef NEEDTOPORT
+ EPRINT(0, "Invalid PC %d!!", pc);
+#endif
+ break;
+ }
+ return (0);
+}
+
+
+int
+ctl_aps_sp_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index, uint8_t *page_ptr)
+{
+ struct copan_aps_subpage *user_sp;
+ struct copan_aps_subpage *current_sp;
+ union ctl_modepage_info *modepage_info;
+ struct ctl_softc *softc;
+ struct ctl_lun *lun;
+ int retval;
+
+ retval = CTL_RETVAL_COMPLETE;
+ current_sp = (struct copan_aps_subpage *)(page_index->page_data +
+ (page_index->page_len * CTL_PAGE_CURRENT));
+ softc = control_softc;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ user_sp = (struct copan_aps_subpage *)page_ptr;
+
+ modepage_info = (union ctl_modepage_info *)
+ ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
+
+ modepage_info->header.page_code = page_index->page_code & SMPH_PC_MASK;
+ modepage_info->header.subpage = page_index->subpage;
+ modepage_info->aps.lock_active = user_sp->lock_active;
+
+ mtx_lock(&softc->ctl_lock);
+
+ /*
+ * If there is a request to lock the LUN and another LUN is locked
+ * this is an error. If the requested LUN is already locked ignore
+ * the request. If no LUN is locked attempt to lock it.
+ * if there is a request to unlock the LUN and the LUN is currently
+ * locked attempt to unlock it. Otherwise ignore the request. i.e.
+ * if another LUN is locked or no LUN is locked.
+ */
+ if (user_sp->lock_active & APS_LOCK_ACTIVE) {
+ if (softc->aps_locked_lun == lun->lun) {
+ /*
+ * This LUN is already locked, so we're done.
+ */
+ retval = CTL_RETVAL_COMPLETE;
+ } else if (softc->aps_locked_lun == 0) {
+ /*
+ * No one has the lock, pass the request to the
+ * backend.
+ */
+ retval = lun->backend->config_write(
+ (union ctl_io *)ctsio);
+ } else {
+ /*
+ * Someone else has the lock, throw out the request.
+ */
+ ctl_set_already_locked(ctsio);
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_done((union ctl_io *)ctsio);
+
+ /*
+ * Set the return value so that ctl_do_mode_select()
+ * won't try to complete the command. We already
+ * completed it here.
+ */
+ retval = CTL_RETVAL_ERROR;
+ }
+ } else if (softc->aps_locked_lun == lun->lun) {
+ /*
+ * This LUN is locked, so pass the unlock request to the
+ * backend.
+ */
+ retval = lun->backend->config_write((union ctl_io *)ctsio);
+ }
+ mtx_unlock(&softc->ctl_lock);
+
+ return (retval);
+}
+
+int
+ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index,
+ uint8_t *page_ptr)
+{
+ uint8_t *c;
+ int i;
+
+ c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs;
+ ctl_time_io_secs =
+ (c[0] << 8) |
+ (c[1] << 0) |
+ 0;
+ CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs));
+ printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs);
+ printf("page data:");
+ for (i=0; i<8; i++)
+ printf(" %.2x",page_ptr[i]);
+ printf("\n");
+ return (0);
+}
+
+int
+ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index,
+ int pc)
+{
+ struct copan_debugconf_subpage *page;
+
+ page = (struct copan_debugconf_subpage *)page_index->page_data +
+ (page_index->page_len * pc);
+
+ switch (pc) {
+ case SMS_PAGE_CTRL_CHANGEABLE >> 6:
+ case SMS_PAGE_CTRL_DEFAULT >> 6:
+ case SMS_PAGE_CTRL_SAVED >> 6:
+ /*
+ * We don't update the changable or default bits for this page.
+ */
+ break;
+ case SMS_PAGE_CTRL_CURRENT >> 6:
+ page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8;
+ page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0;
+ break;
+ default:
+#ifdef NEEDTOPORT
+ EPRINT(0, "Invalid PC %d!!", pc);
+#endif /* NEEDTOPORT */
+ break;
+ }
+ return (0);
+}
+
+
+static int
+ctl_do_mode_select(union ctl_io *io)
+{
+ struct scsi_mode_page_header *page_header;
+ struct ctl_page_index *page_index;
+ struct ctl_scsiio *ctsio;
+ int control_dev, page_len;
+ int page_len_offset, page_len_size;
+ union ctl_modepage_info *modepage_info;
+ struct ctl_lun *lun;
+ int *len_left, *len_used;
+ int retval, i;
+
+ ctsio = &io->scsiio;
+ page_index = NULL;
+ page_len = 0;
+ retval = CTL_RETVAL_COMPLETE;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ if (lun->be_lun->lun_type != T_DIRECT)
+ control_dev = 1;
+ else
+ control_dev = 0;
+
+ modepage_info = (union ctl_modepage_info *)
+ ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
+ len_left = &modepage_info->header.len_left;
+ len_used = &modepage_info->header.len_used;
+
+do_next_page:
+
+ page_header = (struct scsi_mode_page_header *)
+ (ctsio->kern_data_ptr + *len_used);
+
+ if (*len_left == 0) {
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ } else if (*len_left < sizeof(struct scsi_mode_page_header)) {
+
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_param_len_error(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+
+ } else if ((page_header->page_code & SMPH_SPF)
+ && (*len_left < sizeof(struct scsi_mode_page_header_sp))) {
+
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_param_len_error(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+
+ /*
+ * XXX KDM should we do something with the block descriptor?
+ */
+ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
+
+ if ((control_dev != 0)
+ && (lun->mode_pages.index[i].page_flags &
+ CTL_PAGE_FLAG_DISK_ONLY))
+ continue;
+
+ if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) !=
+ (page_header->page_code & SMPH_PC_MASK))
+ continue;
+
+ /*
+ * If neither page has a subpage code, then we've got a
+ * match.
+ */
+ if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0)
+ && ((page_header->page_code & SMPH_SPF) == 0)) {
+ page_index = &lun->mode_pages.index[i];
+ page_len = page_header->page_length;
+ break;
+ }
+
+ /*
+ * If both pages have subpages, then the subpage numbers
+ * have to match.
+ */
+ if ((lun->mode_pages.index[i].page_code & SMPH_SPF)
+ && (page_header->page_code & SMPH_SPF)) {
+ struct scsi_mode_page_header_sp *sph;
+
+ sph = (struct scsi_mode_page_header_sp *)page_header;
+
+ if (lun->mode_pages.index[i].subpage ==
+ sph->subpage) {
+ page_index = &lun->mode_pages.index[i];
+ page_len = scsi_2btoul(sph->page_length);
+ break;
+ }
+ }
+ }
+
+ /*
+ * If we couldn't find the page, or if we don't have a mode select
+ * handler for it, send back an error to the user.
+ */
+ if ((page_index == NULL)
+ || (page_index->select_handler == NULL)) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 0,
+ /*field*/ *len_used,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ if (page_index->page_code & SMPH_SPF) {
+ page_len_offset = 2;
+ page_len_size = 2;
+ } else {
+ page_len_size = 1;
+ page_len_offset = 1;
+ }
+
+ /*
+ * If the length the initiator gives us isn't the one we specify in
+ * the mode page header, or if they didn't specify enough data in
+ * the CDB to avoid truncating this page, kick out the request.
+ */
+ if ((page_len != (page_index->page_len - page_len_offset -
+ page_len_size))
+ || (*len_left < page_index->page_len)) {
+
+
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 0,
+ /*field*/ *len_used + page_len_offset,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * Run through the mode page, checking to make sure that the bits
+ * the user changed are actually legal for him to change.
+ */
+ for (i = 0; i < page_index->page_len; i++) {
+ uint8_t *user_byte, *change_mask, *current_byte;
+ int bad_bit;
+ int j;
+
+ user_byte = (uint8_t *)page_header + i;
+ change_mask = page_index->page_data +
+ (page_index->page_len * CTL_PAGE_CHANGEABLE) + i;
+ current_byte = page_index->page_data +
+ (page_index->page_len * CTL_PAGE_CURRENT) + i;
+
+ /*
+ * Check to see whether the user set any bits in this byte
+ * that he is not allowed to set.
+ */
+ if ((*user_byte & ~(*change_mask)) ==
+ (*current_byte & ~(*change_mask)))
+ continue;
+
+ /*
+ * Go through bit by bit to determine which one is illegal.
+ */
+ bad_bit = 0;
+ for (j = 7; j >= 0; j--) {
+ if ((((1 << i) & ~(*change_mask)) & *user_byte) !=
+ (((1 << i) & ~(*change_mask)) & *current_byte)) {
+ bad_bit = i;
+ break;
+ }
+ }
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 0,
+ /*field*/ *len_used + i,
+ /*bit_valid*/ 1,
+ /*bit*/ bad_bit);
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * Decrement these before we call the page handler, since we may
+ * end up getting called back one way or another before the handler
+ * returns to this context.
+ */
+ *len_left -= page_index->page_len;
+ *len_used += page_index->page_len;
+
+ retval = page_index->select_handler(ctsio, page_index,
+ (uint8_t *)page_header);
+
+ /*
+ * If the page handler returns CTL_RETVAL_QUEUED, then we need to
+ * wait until this queued command completes to finish processing
+ * the mode page. If it returns anything other than
+ * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have
+ * already set the sense information, freed the data pointer, and
+ * completed the io for us.
+ */
+ if (retval != CTL_RETVAL_COMPLETE)
+ goto bailout_no_done;
+
+ /*
+ * If the initiator sent us more than one page, parse the next one.
+ */
+ if (*len_left > 0)
+ goto do_next_page;
+
+ ctl_set_success(ctsio);
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_done((union ctl_io *)ctsio);
+
+bailout_no_done:
+
+ return (CTL_RETVAL_COMPLETE);
+
+}
+
+int
+ctl_mode_select(struct ctl_scsiio *ctsio)
+{
+ int param_len, pf, sp;
+ int header_size, bd_len;
+ int len_left, len_used;
+ struct ctl_page_index *page_index;
+ struct ctl_lun *lun;
+ int control_dev, page_len;
+ union ctl_modepage_info *modepage_info;
+ int retval;
+
+ pf = 0;
+ sp = 0;
+ page_len = 0;
+ len_used = 0;
+ len_left = 0;
+ retval = 0;
+ bd_len = 0;
+ page_index = NULL;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ if (lun->be_lun->lun_type != T_DIRECT)
+ control_dev = 1;
+ else
+ control_dev = 0;
+
+ switch (ctsio->cdb[0]) {
+ case MODE_SELECT_6: {
+ struct scsi_mode_select_6 *cdb;
+
+ cdb = (struct scsi_mode_select_6 *)ctsio->cdb;
+
+ pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
+ sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
+
+ param_len = cdb->length;
+ header_size = sizeof(struct scsi_mode_header_6);
+ break;
+ }
+ case MODE_SELECT_10: {
+ struct scsi_mode_select_10 *cdb;
+
+ cdb = (struct scsi_mode_select_10 *)ctsio->cdb;
+
+ pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
+ sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
+
+ param_len = scsi_2btoul(cdb->length);
+ header_size = sizeof(struct scsi_mode_header_10);
+ break;
+ }
+ default:
+ ctl_set_invalid_opcode(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ break; /* NOTREACHED */
+ }
+
+ /*
+ * From SPC-3:
+ * "A parameter list length of zero indicates that the Data-Out Buffer
+ * shall be empty. This condition shall not be considered as an error."
+ */
+ if (param_len == 0) {
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * Since we'll hit this the first time through, prior to
+ * allocation, we don't need to free a data buffer here.
+ */
+ if (param_len < header_size) {
+ ctl_set_param_len_error(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * Allocate the data buffer and grab the user's data. In theory,
+ * we shouldn't have to sanity check the parameter list length here
+ * because the maximum size is 64K. We should be able to malloc
+ * that much without too many problems.
+ */
+ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
+ ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctl_set_busy(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ ctsio->kern_data_len = param_len;
+ ctsio->kern_total_len = param_len;
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ switch (ctsio->cdb[0]) {
+ case MODE_SELECT_6: {
+ struct scsi_mode_header_6 *mh6;
+
+ mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr;
+ bd_len = mh6->blk_desc_len;
+ break;
+ }
+ case MODE_SELECT_10: {
+ struct scsi_mode_header_10 *mh10;
+
+ mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr;
+ bd_len = scsi_2btoul(mh10->blk_desc_len);
+ break;
+ }
+ default:
+ panic("Invalid CDB type %#x", ctsio->cdb[0]);
+ break;
+ }
+
+ if (param_len < (header_size + bd_len)) {
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_param_len_error(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * Set the IO_CONT flag, so that if this I/O gets passed to
+ * ctl_config_write_done(), it'll get passed back to
+ * ctl_do_mode_select() for further processing, or completion if
+ * we're all done.
+ */
+ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
+ ctsio->io_cont = ctl_do_mode_select;
+
+ modepage_info = (union ctl_modepage_info *)
+ ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
+
+ memset(modepage_info, 0, sizeof(*modepage_info));
+
+ len_left = param_len - header_size - bd_len;
+ len_used = header_size + bd_len;
+
+ modepage_info->header.len_left = len_left;
+ modepage_info->header.len_used = len_used;
+
+ return (ctl_do_mode_select((union ctl_io *)ctsio));
+}
+
+int
+ctl_mode_sense(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ int pc, page_code, dbd, llba, subpage;
+ int alloc_len, page_len, header_len, total_len;
+ struct scsi_mode_block_descr *block_desc;
+ struct ctl_page_index *page_index;
+ int control_dev;
+
+ dbd = 0;
+ llba = 0;
+ block_desc = NULL;
+ page_index = NULL;
+
+ CTL_DEBUG_PRINT(("ctl_mode_sense\n"));
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ if (lun->be_lun->lun_type != T_DIRECT)
+ control_dev = 1;
+ else
+ control_dev = 0;
+
+ switch (ctsio->cdb[0]) {
+ case MODE_SENSE_6: {
+ struct scsi_mode_sense_6 *cdb;
+
+ cdb = (struct scsi_mode_sense_6 *)ctsio->cdb;
+
+ header_len = sizeof(struct scsi_mode_hdr_6);
+ if (cdb->byte2 & SMS_DBD)
+ dbd = 1;
+ else
+ header_len += sizeof(struct scsi_mode_block_descr);
+
+ pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
+ page_code = cdb->page & SMS_PAGE_CODE;
+ subpage = cdb->subpage;
+ alloc_len = cdb->length;
+ break;
+ }
+ case MODE_SENSE_10: {
+ struct scsi_mode_sense_10 *cdb;
+
+ cdb = (struct scsi_mode_sense_10 *)ctsio->cdb;
+
+ header_len = sizeof(struct scsi_mode_hdr_10);
+
+ if (cdb->byte2 & SMS_DBD)
+ dbd = 1;
+ else
+ header_len += sizeof(struct scsi_mode_block_descr);
+ if (cdb->byte2 & SMS10_LLBAA)
+ llba = 1;
+ pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
+ page_code = cdb->page & SMS_PAGE_CODE;
+ subpage = cdb->subpage;
+ alloc_len = scsi_2btoul(cdb->length);
+ break;
+ }
+ default:
+ ctl_set_invalid_opcode(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ break; /* NOTREACHED */
+ }
+
+ /*
+ * We have to make a first pass through to calculate the size of
+ * the pages that match the user's query. Then we allocate enough
+ * memory to hold it, and actually copy the data into the buffer.
+ */
+ switch (page_code) {
+ case SMS_ALL_PAGES_PAGE: {
+ int i;
+
+ page_len = 0;
+
+ /*
+ * At the moment, values other than 0 and 0xff here are
+ * reserved according to SPC-3.
+ */
+ if ((subpage != SMS_SUBPAGE_PAGE_0)
+ && (subpage != SMS_SUBPAGE_ALL)) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 3,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
+ if ((control_dev != 0)
+ && (lun->mode_pages.index[i].page_flags &
+ CTL_PAGE_FLAG_DISK_ONLY))
+ continue;
+
+ /*
+ * We don't use this subpage if the user didn't
+ * request all subpages.
+ */
+ if ((lun->mode_pages.index[i].subpage != 0)
+ && (subpage == SMS_SUBPAGE_PAGE_0))
+ continue;
+
+#if 0
+ printf("found page %#x len %d\n",
+ lun->mode_pages.index[i].page_code &
+ SMPH_PC_MASK,
+ lun->mode_pages.index[i].page_len);
+#endif
+ page_len += lun->mode_pages.index[i].page_len;
+ }
+ break;
+ }
+ default: {
+ int i;
+
+ page_len = 0;
+
+ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
+ /* Look for the right page code */
+ if ((lun->mode_pages.index[i].page_code &
+ SMPH_PC_MASK) != page_code)
+ continue;
+
+ /* Look for the right subpage or the subpage wildcard*/
+ if ((lun->mode_pages.index[i].subpage != subpage)
+ && (subpage != SMS_SUBPAGE_ALL))
+ continue;
+
+ /* Make sure the page is supported for this dev type */
+ if ((control_dev != 0)
+ && (lun->mode_pages.index[i].page_flags &
+ CTL_PAGE_FLAG_DISK_ONLY))
+ continue;
+
+#if 0
+ printf("found page %#x len %d\n",
+ lun->mode_pages.index[i].page_code &
+ SMPH_PC_MASK,
+ lun->mode_pages.index[i].page_len);
+#endif
+
+ page_len += lun->mode_pages.index[i].page_len;
+ }
+
+ if (page_len == 0) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 1,
+ /*bit*/ 5);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ break;
+ }
+ }
+
+ total_len = header_len + page_len;
+#if 0
+ printf("header_len = %d, page_len = %d, total_len = %d\n",
+ header_len, page_len, total_len);
+#endif
+
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ ctsio->scsi_status = SCSI_STATUS_BUSY;
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ memset(ctsio->kern_data_ptr, 0, total_len);
+
+ switch (ctsio->cdb[0]) {
+ case MODE_SENSE_6: {
+ struct scsi_mode_hdr_6 *header;
+
+ header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr;
+
+ header->datalen = ctl_min(total_len - 1, 254);
+
+ if (dbd)
+ header->block_descr_len = 0;
+ else
+ header->block_descr_len =
+ sizeof(struct scsi_mode_block_descr);
+ block_desc = (struct scsi_mode_block_descr *)&header[1];
+ break;
+ }
+ case MODE_SENSE_10: {
+ struct scsi_mode_hdr_10 *header;
+ int datalen;
+
+ header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr;
+
+ datalen = ctl_min(total_len - 2, 65533);
+ scsi_ulto2b(datalen, header->datalen);
+ if (dbd)
+ scsi_ulto2b(0, header->block_descr_len);
+ else
+ scsi_ulto2b(sizeof(struct scsi_mode_block_descr),
+ header->block_descr_len);
+ block_desc = (struct scsi_mode_block_descr *)&header[1];
+ break;
+ }
+ default:
+ panic("invalid CDB type %#x", ctsio->cdb[0]);
+ break; /* NOTREACHED */
+ }
+
+ /*
+ * If we've got a disk, use its blocksize in the block
+ * descriptor. Otherwise, just set it to 0.
+ */
+ if (dbd == 0) {
+ if (control_dev != 0)
+ scsi_ulto3b(lun->be_lun->blocksize,
+ block_desc->block_len);
+ else
+ scsi_ulto3b(0, block_desc->block_len);
+ }
+
+ switch (page_code) {
+ case SMS_ALL_PAGES_PAGE: {
+ int i, data_used;
+
+ data_used = header_len;
+ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
+ struct ctl_page_index *page_index;
+
+ page_index = &lun->mode_pages.index[i];
+
+ if ((control_dev != 0)
+ && (page_index->page_flags &
+ CTL_PAGE_FLAG_DISK_ONLY))
+ continue;
+
+ /*
+ * We don't use this subpage if the user didn't
+ * request all subpages. We already checked (above)
+ * to make sure the user only specified a subpage
+ * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case.
+ */
+ if ((page_index->subpage != 0)
+ && (subpage == SMS_SUBPAGE_PAGE_0))
+ continue;
+
+ /*
+ * Call the handler, if it exists, to update the
+ * page to the latest values.
+ */
+ if (page_index->sense_handler != NULL)
+ page_index->sense_handler(ctsio, page_index,pc);
+
+ memcpy(ctsio->kern_data_ptr + data_used,
+ page_index->page_data +
+ (page_index->page_len * pc),
+ page_index->page_len);
+ data_used += page_index->page_len;
+ }
+ break;
+ }
+ default: {
+ int i, data_used;
+
+ data_used = header_len;
+
+ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
+ struct ctl_page_index *page_index;
+
+ page_index = &lun->mode_pages.index[i];
+
+ /* Look for the right page code */
+ if ((page_index->page_code & SMPH_PC_MASK) != page_code)
+ continue;
+
+ /* Look for the right subpage or the subpage wildcard*/
+ if ((page_index->subpage != subpage)
+ && (subpage != SMS_SUBPAGE_ALL))
+ continue;
+
+ /* Make sure the page is supported for this dev type */
+ if ((control_dev != 0)
+ && (page_index->page_flags &
+ CTL_PAGE_FLAG_DISK_ONLY))
+ continue;
+
+ /*
+ * Call the handler, if it exists, to update the
+ * page to the latest values.
+ */
+ if (page_index->sense_handler != NULL)
+ page_index->sense_handler(ctsio, page_index,pc);
+
+ memcpy(ctsio->kern_data_ptr + data_used,
+ page_index->page_data +
+ (page_index->page_len * pc),
+ page_index->page_len);
+ data_used += page_index->page_len;
+ }
+ break;
+ }
+ }
+
+ ctsio->scsi_status = SCSI_STATUS_OK;
+
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_read_capacity(struct ctl_scsiio *ctsio)
+{
+ struct scsi_read_capacity *cdb;
+ struct scsi_read_capacity_data *data;
+ struct ctl_lun *lun;
+ uint32_t lba;
+
+ CTL_DEBUG_PRINT(("ctl_read_capacity\n"));
+
+ cdb = (struct scsi_read_capacity *)ctsio->cdb;
+
+ lba = scsi_4btoul(cdb->addr);
+ if (((cdb->pmi & SRC_PMI) == 0)
+ && (lba != 0)) {
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ ctsio->scsi_status = SCSI_STATUS_BUSY;
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr;
+ ctsio->residual = 0;
+ ctsio->kern_data_len = sizeof(*data);
+ ctsio->kern_total_len = sizeof(*data);
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+
+ memset(data, 0, sizeof(*data));
+
+ /*
+ * If the maximum LBA is greater than 0xfffffffe, the user must
+ * issue a SERVICE ACTION IN (16) command, with the read capacity
+ * serivce action set.
+ */
+ if (lun->be_lun->maxlba > 0xfffffffe)
+ scsi_ulto4b(0xffffffff, data->addr);
+ else
+ scsi_ulto4b(lun->be_lun->maxlba, data->addr);
+
+ /*
+ * XXX KDM this may not be 512 bytes...
+ */
+ scsi_ulto4b(lun->be_lun->blocksize, data->length);
+
+ ctsio->scsi_status = SCSI_STATUS_OK;
+
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+ctl_read_capacity_16(struct ctl_scsiio *ctsio)
+{
+ struct scsi_read_capacity_16 *cdb;
+ struct scsi_read_capacity_data_long *data;
+ struct ctl_lun *lun;
+ uint64_t lba;
+ uint32_t alloc_len;
+
+ CTL_DEBUG_PRINT(("ctl_read_capacity_16\n"));
+
+ cdb = (struct scsi_read_capacity_16 *)ctsio->cdb;
+
+ alloc_len = scsi_4btoul(cdb->alloc_len);
+ lba = scsi_8btou64(cdb->addr);
+
+ if ((cdb->reladr & SRC16_PMI)
+ && (lba != 0)) {
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ ctsio->scsi_status = SCSI_STATUS_BUSY;
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr;
+
+ if (sizeof(*data) < alloc_len) {
+ ctsio->residual = alloc_len - sizeof(*data);
+ ctsio->kern_data_len = sizeof(*data);
+ ctsio->kern_total_len = sizeof(*data);
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+
+ memset(data, 0, sizeof(*data));
+
+ scsi_u64to8b(lun->be_lun->maxlba, data->addr);
+ /* XXX KDM this may not be 512 bytes... */
+ scsi_ulto4b(lun->be_lun->blocksize, data->length);
+
+ ctsio->scsi_status = SCSI_STATUS_OK;
+
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_service_action_in(struct ctl_scsiio *ctsio)
+{
+ struct scsi_service_action_in *cdb;
+ int retval;
+
+ CTL_DEBUG_PRINT(("ctl_service_action_in\n"));
+
+ cdb = (struct scsi_service_action_in *)ctsio->cdb;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ switch (cdb->service_action) {
+ case SRC16_SERVICE_ACTION:
+ retval = ctl_read_capacity_16(ctsio);
+ break;
+ default:
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 4);
+ ctl_done((union ctl_io *)ctsio);
+ break;
+ }
+
+ return (retval);
+}
+
+int
+ctl_maintenance_in(struct ctl_scsiio *ctsio)
+{
+ struct scsi_maintenance_in *cdb;
+ int retval;
+ int alloc_len, total_len = 0;
+ int num_target_port_groups;
+ struct ctl_lun *lun;
+ struct ctl_softc *softc;
+ struct scsi_target_group_data *rtg_ptr;
+ struct scsi_target_port_group_descriptor *tpg_desc_ptr1, *tpg_desc_ptr2;
+ struct scsi_target_port_descriptor *tp_desc_ptr1_1, *tp_desc_ptr1_2,
+ *tp_desc_ptr2_1, *tp_desc_ptr2_2;
+
+ CTL_DEBUG_PRINT(("ctl_maintenance_in\n"));
+
+ cdb = (struct scsi_maintenance_in *)ctsio->cdb;
+ softc = control_softc;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ retval = CTL_RETVAL_COMPLETE;
+ mtx_lock(&softc->ctl_lock);
+
+ if ((cdb->byte2 & SERVICE_ACTION_MASK) != SA_RPRT_TRGT_GRP) {
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 4);
+ ctl_done((union ctl_io *)ctsio);
+ return(retval);
+ }
+
+ if (ctl_is_single)
+ num_target_port_groups = NUM_TARGET_PORT_GROUPS - 1;
+ else
+ num_target_port_groups = NUM_TARGET_PORT_GROUPS;
+
+ total_len = sizeof(struct scsi_target_group_data) +
+ sizeof(struct scsi_target_port_group_descriptor) *
+ num_target_port_groups +
+ sizeof(struct scsi_target_port_descriptor) *
+ NUM_PORTS_PER_GRP * num_target_port_groups;
+
+ alloc_len = scsi_4btoul(cdb->length);
+
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ ctsio->scsi_status = SCSI_STATUS_BUSY;
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ memset(ctsio->kern_data_ptr, 0, total_len);
+
+ ctsio->kern_sg_entries = 0;
+
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+
+ rtg_ptr = (struct scsi_target_group_data *)ctsio->kern_data_ptr;
+
+ tpg_desc_ptr1 = &rtg_ptr->groups[0];
+ tp_desc_ptr1_1 = &tpg_desc_ptr1->descriptors[0];
+ tp_desc_ptr1_2 = (struct scsi_target_port_descriptor *)
+ &tp_desc_ptr1_1->desc_list[0];
+
+
+
+ if (ctl_is_single == 0) {
+ tpg_desc_ptr2 = (struct scsi_target_port_group_descriptor *)
+ &tp_desc_ptr1_2->desc_list[0];
+ tp_desc_ptr2_1 = &tpg_desc_ptr2->descriptors[0];
+ tp_desc_ptr2_2 = (struct scsi_target_port_descriptor *)
+ &tp_desc_ptr2_1->desc_list[0];
+ } else {
+ tpg_desc_ptr2 = NULL;
+ tp_desc_ptr2_1 = NULL;
+ tp_desc_ptr2_2 = NULL;
+ }
+
+ scsi_ulto4b(total_len - 4, rtg_ptr->length);
+ if (ctl_is_single == 0) {
+ if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) {
+ if (lun->flags & CTL_LUN_PRIMARY_SC) {
+ tpg_desc_ptr1->pref_state = TPG_PRIMARY;
+ tpg_desc_ptr2->pref_state =
+ TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
+ } else {
+ tpg_desc_ptr1->pref_state =
+ TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
+ tpg_desc_ptr2->pref_state = TPG_PRIMARY;
+ }
+ } else {
+ if (lun->flags & CTL_LUN_PRIMARY_SC) {
+ tpg_desc_ptr1->pref_state =
+ TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
+ tpg_desc_ptr2->pref_state = TPG_PRIMARY;
+ } else {
+ tpg_desc_ptr1->pref_state = TPG_PRIMARY;
+ tpg_desc_ptr2->pref_state =
+ TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
+ }
+ }
+ } else {
+ tpg_desc_ptr1->pref_state = TPG_PRIMARY;
+ }
+ tpg_desc_ptr1->support = 0;
+ tpg_desc_ptr1->target_port_group[1] = 1;
+ tpg_desc_ptr1->status = TPG_IMPLICIT;
+ tpg_desc_ptr1->target_port_count= NUM_PORTS_PER_GRP;
+
+ if (ctl_is_single == 0) {
+ tpg_desc_ptr2->support = 0;
+ tpg_desc_ptr2->target_port_group[1] = 2;
+ tpg_desc_ptr2->status = TPG_IMPLICIT;
+ tpg_desc_ptr2->target_port_count = NUM_PORTS_PER_GRP;
+
+ tp_desc_ptr1_1->relative_target_port_identifier[1] = 1;
+ tp_desc_ptr1_2->relative_target_port_identifier[1] = 2;
+
+ tp_desc_ptr2_1->relative_target_port_identifier[1] = 9;
+ tp_desc_ptr2_2->relative_target_port_identifier[1] = 10;
+ } else {
+ if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) {
+ tp_desc_ptr1_1->relative_target_port_identifier[1] = 1;
+ tp_desc_ptr1_2->relative_target_port_identifier[1] = 2;
+ } else {
+ tp_desc_ptr1_1->relative_target_port_identifier[1] = 9;
+ tp_desc_ptr1_2->relative_target_port_identifier[1] = 10;
+ }
+ }
+
+ mtx_unlock(&softc->ctl_lock);
+
+ ctsio->be_move_done = ctl_config_move_done;
+
+ CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
+ ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1],
+ ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3],
+ ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5],
+ ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7]));
+
+ ctl_datamove((union ctl_io *)ctsio);
+ return(retval);
+}
+
+int
+ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
+{
+ struct scsi_per_res_in *cdb;
+ int alloc_len, total_len = 0;
+ /* struct scsi_per_res_in_rsrv in_data; */
+ struct ctl_lun *lun;
+ struct ctl_softc *softc;
+
+ CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n"));
+
+ softc = control_softc;
+
+ cdb = (struct scsi_per_res_in *)ctsio->cdb;
+
+ alloc_len = scsi_2btoul(cdb->length);
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+retry:
+ mtx_lock(&softc->ctl_lock);
+ switch (cdb->action) {
+ case SPRI_RK: /* read keys */
+ total_len = sizeof(struct scsi_per_res_in_keys) +
+ lun->pr_key_count *
+ sizeof(struct scsi_per_res_key);
+ break;
+ case SPRI_RR: /* read reservation */
+ if (lun->flags & CTL_LUN_PR_RESERVED)
+ total_len = sizeof(struct scsi_per_res_in_rsrv);
+ else
+ total_len = sizeof(struct scsi_per_res_in_header);
+ break;
+ case SPRI_RC: /* report capabilities */
+ total_len = sizeof(struct scsi_per_res_cap);
+ break;
+ case SPRI_RS: /* read full status */
+ default:
+ mtx_unlock(&softc->ctl_lock);
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ break; /* NOTREACHED */
+ }
+ mtx_unlock(&softc->ctl_lock);
+
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ ctsio->scsi_status = SCSI_STATUS_BUSY;
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+
+ memset(ctsio->kern_data_ptr, 0, total_len);
+
+ mtx_lock(&softc->ctl_lock);
+ switch (cdb->action) {
+ case SPRI_RK: { // read keys
+ struct scsi_per_res_in_keys *res_keys;
+ int i, key_count;
+
+ res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr;
+
+ /*
+ * We had to drop the lock to allocate our buffer, which
+ * leaves time for someone to come in with another
+ * persistent reservation. (That is unlikely, though,
+ * since this should be the only persistent reservation
+ * command active right now.)
+ */
+ if (total_len != (sizeof(struct scsi_per_res_in_keys) +
+ (lun->pr_key_count *
+ sizeof(struct scsi_per_res_key)))){
+ mtx_unlock(&softc->ctl_lock);
+ free(ctsio->kern_data_ptr, M_CTL);
+ printf("%s: reservation length changed, retrying\n",
+ __func__);
+ goto retry;
+ }
+
+ scsi_ulto4b(lun->PRGeneration, res_keys->header.generation);
+
+ scsi_ulto4b(sizeof(struct scsi_per_res_key) *
+ lun->pr_key_count, res_keys->header.length);
+
+ for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) {
+ if (!lun->per_res[i].registered)
+ continue;
+
+ /*
+ * We used lun->pr_key_count to calculate the
+ * size to allocate. If it turns out the number of
+ * initiators with the registered flag set is
+ * larger than that (i.e. they haven't been kept in
+ * sync), we've got a problem.
+ */
+ if (key_count >= lun->pr_key_count) {
+#ifdef NEEDTOPORT
+ csevent_log(CSC_CTL | CSC_SHELF_SW |
+ CTL_PR_ERROR,
+ csevent_LogType_Fault,
+ csevent_AlertLevel_Yellow,
+ csevent_FRU_ShelfController,
+ csevent_FRU_Firmware,
+ csevent_FRU_Unknown,
+ "registered keys %d >= key "
+ "count %d", key_count,
+ lun->pr_key_count);
+#endif
+ key_count++;
+ continue;
+ }
+ memcpy(res_keys->keys[key_count].key,
+ lun->per_res[i].res_key.key,
+ ctl_min(sizeof(res_keys->keys[key_count].key),
+ sizeof(lun->per_res[i].res_key)));
+ key_count++;
+ }
+ break;
+ }
+ case SPRI_RR: { // read reservation
+ struct scsi_per_res_in_rsrv *res;
+ int tmp_len, header_only;
+
+ res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr;
+
+ scsi_ulto4b(lun->PRGeneration, res->header.generation);
+
+ if (lun->flags & CTL_LUN_PR_RESERVED)
+ {
+ tmp_len = sizeof(struct scsi_per_res_in_rsrv);
+ scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data),
+ res->header.length);
+ header_only = 0;
+ } else {
+ tmp_len = sizeof(struct scsi_per_res_in_header);
+ scsi_ulto4b(0, res->header.length);
+ header_only = 1;
+ }
+
+ /*
+ * We had to drop the lock to allocate our buffer, which
+ * leaves time for someone to come in with another
+ * persistent reservation. (That is unlikely, though,
+ * since this should be the only persistent reservation
+ * command active right now.)
+ */
+ if (tmp_len != total_len) {
+ mtx_unlock(&softc->ctl_lock);
+ free(ctsio->kern_data_ptr, M_CTL);
+ printf("%s: reservation status changed, retrying\n",
+ __func__);
+ goto retry;
+ }
+
+ /*
+ * No reservation held, so we're done.
+ */
+ if (header_only != 0)
+ break;
+
+ /*
+ * If the registration is an All Registrants type, the key
+ * is 0, since it doesn't really matter.
+ */
+ if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
+ memcpy(res->data.reservation,
+ &lun->per_res[lun->pr_res_idx].res_key,
+ sizeof(struct scsi_per_res_key));
+ }
+ res->data.scopetype = lun->res_type;
+ break;
+ }
+ case SPRI_RC: //report capabilities
+ {
+ struct scsi_per_res_cap *res_cap;
+ uint16_t type_mask;
+
+ res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr;
+ scsi_ulto2b(sizeof(*res_cap), res_cap->length);
+ res_cap->flags2 |= SPRI_TMV;
+ type_mask = SPRI_TM_WR_EX_AR |
+ SPRI_TM_EX_AC_RO |
+ SPRI_TM_WR_EX_RO |
+ SPRI_TM_EX_AC |
+ SPRI_TM_WR_EX |
+ SPRI_TM_EX_AC_AR;
+ scsi_ulto2b(type_mask, res_cap->type_mask);
+ break;
+ }
+ case SPRI_RS: //read full status
+ default:
+ /*
+ * This is a bug, because we just checked for this above,
+ * and should have returned an error.
+ */
+ panic("Invalid PR type %x", cdb->action);
+ break; /* NOTREACHED */
+ }
+ mtx_unlock(&softc->ctl_lock);
+
+ ctsio->be_move_done = ctl_config_move_done;
+
+ CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
+ ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1],
+ ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3],
+ ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5],
+ ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7]));
+
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+/*
+ * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if
+ * it should return.
+ */
+static int
+ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
+ uint64_t sa_res_key, uint8_t type, uint32_t residx,
+ struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb,
+ struct scsi_per_res_out_parms* param)
+{
+ union ctl_ha_msg persis_io;
+ int retval, i;
+ int isc_retval;
+
+ retval = 0;
+
+ if (sa_res_key == 0) {
+ mtx_lock(&softc->ctl_lock);
+ if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
+ /* validate scope and type */
+ if ((cdb->scope_type & SPR_SCOPE_MASK) !=
+ SPR_LU_SCOPE) {
+ mtx_unlock(&softc->ctl_lock);
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 1,
+ /*bit*/ 4);
+ ctl_done((union ctl_io *)ctsio);
+ return (1);
+ }
+
+ if (type>8 || type==2 || type==4 || type==0) {
+ mtx_unlock(&softc->ctl_lock);
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 1,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (1);
+ }
+
+ /* temporarily unregister this nexus */
+ lun->per_res[residx].registered = 0;
+
+ /*
+ * Unregister everybody else and build UA for
+ * them
+ */
+ for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ if (lun->per_res[i].registered == 0)
+ continue;
+
+ if (!persis_offset
+ && i <CTL_MAX_INITIATORS)
+ lun->pending_sense[i].ua_pending |=
+ CTL_UA_REG_PREEMPT;
+ else if (persis_offset
+ && i >= persis_offset)
+ lun->pending_sense[i-persis_offset
+ ].ua_pending |=
+ CTL_UA_REG_PREEMPT;
+ lun->per_res[i].registered = 0;
+ memset(&lun->per_res[i].res_key, 0,
+ sizeof(struct scsi_per_res_key));
+ }
+ lun->per_res[residx].registered = 1;
+ lun->pr_key_count = 1;
+ lun->res_type = type;
+ if (lun->res_type != SPR_TYPE_WR_EX_AR
+ && lun->res_type != SPR_TYPE_EX_AC_AR)
+ lun->pr_res_idx = residx;
+
+ mtx_unlock(&softc->ctl_lock);
+ /* send msg to other side */
+ persis_io.hdr.nexus = ctsio->io_hdr.nexus;
+ persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
+ persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
+ persis_io.pr.pr_info.residx = lun->pr_res_idx;
+ persis_io.pr.pr_info.res_type = type;
+ memcpy(persis_io.pr.pr_info.sa_res_key,
+ param->serv_act_res_key,
+ sizeof(param->serv_act_res_key));
+ if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ &persis_io, sizeof(persis_io), 0)) >
+ CTL_HA_STATUS_SUCCESS) {
+ printf("CTL:Persis Out error returned "
+ "from ctl_ha_msg_send %d\n",
+ isc_retval);
+ }
+ } else {
+ /* not all registrants */
+ mtx_unlock(&softc->ctl_lock);
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 0,
+ /*field*/ 8,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (1);
+ }
+ } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
+ || !(lun->flags & CTL_LUN_PR_RESERVED)) {
+ int found = 0;
+
+ mtx_lock(&softc->ctl_lock);
+ if (res_key == sa_res_key) {
+ /* special case */
+ /*
+ * The spec implies this is not good but doesn't
+ * say what to do. There are two choices either
+ * generate a res conflict or check condition
+ * with illegal field in parameter data. Since
+ * that is what is done when the sa_res_key is
+ * zero I'll take that approach since this has
+ * to do with the sa_res_key.
+ */
+ mtx_unlock(&softc->ctl_lock);
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 0,
+ /*field*/ 8,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (1);
+ }
+
+ for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ if (lun->per_res[i].registered
+ && memcmp(param->serv_act_res_key,
+ lun->per_res[i].res_key.key,
+ sizeof(struct scsi_per_res_key)) != 0)
+ continue;
+
+ found = 1;
+ lun->per_res[i].registered = 0;
+ memset(&lun->per_res[i].res_key, 0,
+ sizeof(struct scsi_per_res_key));
+ lun->pr_key_count--;
+
+ if (!persis_offset
+ && i < CTL_MAX_INITIATORS)
+ lun->pending_sense[i].ua_pending |=
+ CTL_UA_REG_PREEMPT;
+ else if (persis_offset
+ && i >= persis_offset)
+ lun->pending_sense[i-persis_offset].ua_pending|=
+ CTL_UA_REG_PREEMPT;
+ }
+ mtx_unlock(&softc->ctl_lock);
+ if (!found) {
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_reservation_conflict(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ /* send msg to other side */
+ persis_io.hdr.nexus = ctsio->io_hdr.nexus;
+ persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
+ persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
+ persis_io.pr.pr_info.residx = lun->pr_res_idx;
+ persis_io.pr.pr_info.res_type = type;
+ memcpy(persis_io.pr.pr_info.sa_res_key,
+ param->serv_act_res_key,
+ sizeof(param->serv_act_res_key));
+ if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ &persis_io, sizeof(persis_io), 0)) >
+ CTL_HA_STATUS_SUCCESS) {
+ printf("CTL:Persis Out error returned from "
+ "ctl_ha_msg_send %d\n", isc_retval);
+ }
+ } else {
+ /* Reserved but not all registrants */
+ /* sa_res_key is res holder */
+ if (memcmp(param->serv_act_res_key,
+ lun->per_res[lun->pr_res_idx].res_key.key,
+ sizeof(struct scsi_per_res_key)) == 0) {
+ /* validate scope and type */
+ if ((cdb->scope_type & SPR_SCOPE_MASK) !=
+ SPR_LU_SCOPE) {
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 1,
+ /*bit*/ 4);
+ ctl_done((union ctl_io *)ctsio);
+ return (1);
+ }
+
+ if (type>8 || type==2 || type==4 || type==0) {
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 1,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (1);
+ }
+
+ /*
+ * Do the following:
+ * if sa_res_key != res_key remove all
+ * registrants w/sa_res_key and generate UA
+ * for these registrants(Registrations
+ * Preempted) if it wasn't an exclusive
+ * reservation generate UA(Reservations
+ * Preempted) for all other registered nexuses
+ * if the type has changed. Establish the new
+ * reservation and holder. If res_key and
+ * sa_res_key are the same do the above
+ * except don't unregister the res holder.
+ */
+
+ /*
+ * Temporarily unregister so it won't get
+ * removed or UA generated
+ */
+ lun->per_res[residx].registered = 0;
+ for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ if (lun->per_res[i].registered == 0)
+ continue;
+
+ if (memcmp(param->serv_act_res_key,
+ lun->per_res[i].res_key.key,
+ sizeof(struct scsi_per_res_key)) == 0) {
+ lun->per_res[i].registered = 0;
+ memset(&lun->per_res[i].res_key,
+ 0,
+ sizeof(struct scsi_per_res_key));
+ lun->pr_key_count--;
+
+ if (!persis_offset
+ && i < CTL_MAX_INITIATORS)
+ lun->pending_sense[i
+ ].ua_pending |=
+ CTL_UA_REG_PREEMPT;
+ else if (persis_offset
+ && i >= persis_offset)
+ lun->pending_sense[
+ i-persis_offset].ua_pending |=
+ CTL_UA_REG_PREEMPT;
+ } else if (type != lun->res_type
+ && (lun->res_type == SPR_TYPE_WR_EX_RO
+ || lun->res_type ==SPR_TYPE_EX_AC_RO)){
+ if (!persis_offset
+ && i < CTL_MAX_INITIATORS)
+ lun->pending_sense[i
+ ].ua_pending |=
+ CTL_UA_RES_RELEASE;
+ else if (persis_offset
+ && i >= persis_offset)
+ lun->pending_sense[
+ i-persis_offset
+ ].ua_pending |=
+ CTL_UA_RES_RELEASE;
+ }
+ }
+ lun->per_res[residx].registered = 1;
+ lun->res_type = type;
+ if (lun->res_type != SPR_TYPE_WR_EX_AR
+ && lun->res_type != SPR_TYPE_EX_AC_AR)
+ lun->pr_res_idx = residx;
+ else
+ lun->pr_res_idx =
+ CTL_PR_ALL_REGISTRANTS;
+
+ persis_io.hdr.nexus = ctsio->io_hdr.nexus;
+ persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
+ persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
+ persis_io.pr.pr_info.residx = lun->pr_res_idx;
+ persis_io.pr.pr_info.res_type = type;
+ memcpy(persis_io.pr.pr_info.sa_res_key,
+ param->serv_act_res_key,
+ sizeof(param->serv_act_res_key));
+ if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ &persis_io, sizeof(persis_io), 0)) >
+ CTL_HA_STATUS_SUCCESS) {
+ printf("CTL:Persis Out error returned "
+ "from ctl_ha_msg_send %d\n",
+ isc_retval);
+ }
+ } else {
+ /*
+ * sa_res_key is not the res holder just
+ * remove registrants
+ */
+ int found=0;
+ mtx_lock(&softc->ctl_lock);
+
+ for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ if (memcmp(param->serv_act_res_key,
+ lun->per_res[i].res_key.key,
+ sizeof(struct scsi_per_res_key)) != 0)
+ continue;
+
+ found = 1;
+ lun->per_res[i].registered = 0;
+ memset(&lun->per_res[i].res_key, 0,
+ sizeof(struct scsi_per_res_key));
+ lun->pr_key_count--;
+
+ if (!persis_offset
+ && i < CTL_MAX_INITIATORS)
+ lun->pending_sense[i].ua_pending |=
+ CTL_UA_REG_PREEMPT;
+ else if (persis_offset
+ && i >= persis_offset)
+ lun->pending_sense[
+ i-persis_offset].ua_pending |=
+ CTL_UA_REG_PREEMPT;
+ }
+
+ if (!found) {
+ mtx_unlock(&softc->ctl_lock);
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_reservation_conflict(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (1);
+ }
+ mtx_unlock(&softc->ctl_lock);
+ persis_io.hdr.nexus = ctsio->io_hdr.nexus;
+ persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
+ persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
+ persis_io.pr.pr_info.residx = lun->pr_res_idx;
+ persis_io.pr.pr_info.res_type = type;
+ memcpy(persis_io.pr.pr_info.sa_res_key,
+ param->serv_act_res_key,
+ sizeof(param->serv_act_res_key));
+ if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ &persis_io, sizeof(persis_io), 0)) >
+ CTL_HA_STATUS_SUCCESS) {
+ printf("CTL:Persis Out error returned "
+ "from ctl_ha_msg_send %d\n",
+ isc_retval);
+ }
+ }
+ }
+
+ lun->PRGeneration++;
+
+ return (retval);
+}
+
+static void
+ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
+{
+ int i;
+
+ if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
+ || lun->pr_res_idx == CTL_PR_NO_RESERVATION
+ || memcmp(&lun->per_res[lun->pr_res_idx].res_key,
+ msg->pr.pr_info.sa_res_key,
+ sizeof(struct scsi_per_res_key)) != 0) {
+ uint64_t sa_res_key;
+ sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key);
+
+ if (sa_res_key == 0) {
+ /* temporarily unregister this nexus */
+ lun->per_res[msg->pr.pr_info.residx].registered = 0;
+
+ /*
+ * Unregister everybody else and build UA for
+ * them
+ */
+ for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ if (lun->per_res[i].registered == 0)
+ continue;
+
+ if (!persis_offset
+ && i < CTL_MAX_INITIATORS)
+ lun->pending_sense[i].ua_pending |=
+ CTL_UA_REG_PREEMPT;
+ else if (persis_offset && i >= persis_offset)
+ lun->pending_sense[i -
+ persis_offset].ua_pending |=
+ CTL_UA_REG_PREEMPT;
+ lun->per_res[i].registered = 0;
+ memset(&lun->per_res[i].res_key, 0,
+ sizeof(struct scsi_per_res_key));
+ }
+
+ lun->per_res[msg->pr.pr_info.residx].registered = 1;
+ lun->pr_key_count = 1;
+ lun->res_type = msg->pr.pr_info.res_type;
+ if (lun->res_type != SPR_TYPE_WR_EX_AR
+ && lun->res_type != SPR_TYPE_EX_AC_AR)
+ lun->pr_res_idx = msg->pr.pr_info.residx;
+ } else {
+ for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ if (memcmp(msg->pr.pr_info.sa_res_key,
+ lun->per_res[i].res_key.key,
+ sizeof(struct scsi_per_res_key)) != 0)
+ continue;
+
+ lun->per_res[i].registered = 0;
+ memset(&lun->per_res[i].res_key, 0,
+ sizeof(struct scsi_per_res_key));
+ lun->pr_key_count--;
+
+ if (!persis_offset
+ && i < persis_offset)
+ lun->pending_sense[i].ua_pending |=
+ CTL_UA_REG_PREEMPT;
+ else if (persis_offset
+ && i >= persis_offset)
+ lun->pending_sense[i -
+ persis_offset].ua_pending |=
+ CTL_UA_REG_PREEMPT;
+ }
+ }
+ } else {
+ /*
+ * Temporarily unregister so it won't get removed
+ * or UA generated
+ */
+ lun->per_res[msg->pr.pr_info.residx].registered = 0;
+ for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ if (lun->per_res[i].registered == 0)
+ continue;
+
+ if (memcmp(msg->pr.pr_info.sa_res_key,
+ lun->per_res[i].res_key.key,
+ sizeof(struct scsi_per_res_key)) == 0) {
+ lun->per_res[i].registered = 0;
+ memset(&lun->per_res[i].res_key, 0,
+ sizeof(struct scsi_per_res_key));
+ lun->pr_key_count--;
+ if (!persis_offset
+ && i < CTL_MAX_INITIATORS)
+ lun->pending_sense[i].ua_pending |=
+ CTL_UA_REG_PREEMPT;
+ else if (persis_offset
+ && i >= persis_offset)
+ lun->pending_sense[i -
+ persis_offset].ua_pending |=
+ CTL_UA_REG_PREEMPT;
+ } else if (msg->pr.pr_info.res_type != lun->res_type
+ && (lun->res_type == SPR_TYPE_WR_EX_RO
+ || lun->res_type == SPR_TYPE_EX_AC_RO)) {
+ if (!persis_offset
+ && i < persis_offset)
+ lun->pending_sense[i
+ ].ua_pending |=
+ CTL_UA_RES_RELEASE;
+ else if (persis_offset
+ && i >= persis_offset)
+ lun->pending_sense[i -
+ persis_offset].ua_pending |=
+ CTL_UA_RES_RELEASE;
+ }
+ }
+ lun->per_res[msg->pr.pr_info.residx].registered = 1;
+ lun->res_type = msg->pr.pr_info.res_type;
+ if (lun->res_type != SPR_TYPE_WR_EX_AR
+ && lun->res_type != SPR_TYPE_EX_AC_AR)
+ lun->pr_res_idx = msg->pr.pr_info.residx;
+ else
+ lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
+ }
+ lun->PRGeneration++;
+
+}
+
+
+int
+ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
+{
+ int retval;
+ int isc_retval;
+ u_int32_t param_len;
+ struct scsi_per_res_out *cdb;
+ struct ctl_lun *lun;
+ struct scsi_per_res_out_parms* param;
+ struct ctl_softc *softc;
+ uint32_t residx;
+ uint64_t res_key, sa_res_key;
+ uint8_t type;
+ union ctl_ha_msg persis_io;
+ int i;
+
+ CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n"));
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ softc = control_softc;
+
+ cdb = (struct scsi_per_res_out *)ctsio->cdb;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ /*
+ * We only support whole-LUN scope. The scope & type are ignored for
+ * register, register and ignore existing key and clear.
+ * We sometimes ignore scope and type on preempts too!!
+ * Verify reservation type here as well.
+ */
+ type = cdb->scope_type & SPR_TYPE_MASK;
+ if ((cdb->action == SPRO_RESERVE)
+ || (cdb->action == SPRO_RELEASE)) {
+ if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) {
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 1,
+ /*bit*/ 4);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ if (type>8 || type==2 || type==4 || type==0) {
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 1,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ }
+
+ switch (cdb->action & SPRO_ACTION_MASK) {
+ case SPRO_REGISTER:
+ case SPRO_RESERVE:
+ case SPRO_RELEASE:
+ case SPRO_CLEAR:
+ case SPRO_PREEMPT:
+ case SPRO_REG_IGNO:
+ break;
+ case SPRO_REG_MOVE:
+ case SPRO_PRE_ABO:
+ default:
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ break; /* NOTREACHED */
+ }
+
+ param_len = scsi_4btoul(cdb->length);
+
+ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
+ ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctl_set_busy(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ ctsio->kern_data_len = param_len;
+ ctsio->kern_total_len = param_len;
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr;
+
+ residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ res_key = scsi_8btou64(param->res_key.key);
+ sa_res_key = scsi_8btou64(param->serv_act_res_key);
+
+ /*
+ * Validate the reservation key here except for SPRO_REG_IGNO
+ * This must be done for all other service actions
+ */
+ if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) {
+ mtx_lock(&softc->ctl_lock);
+ if (lun->per_res[residx].registered) {
+ if (memcmp(param->res_key.key,
+ lun->per_res[residx].res_key.key,
+ ctl_min(sizeof(param->res_key),
+ sizeof(lun->per_res[residx].res_key))) != 0) {
+ /*
+ * The current key passed in doesn't match
+ * the one the initiator previously
+ * registered.
+ */
+ mtx_unlock(&softc->ctl_lock);
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_reservation_conflict(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) {
+ /*
+ * We are not registered
+ */
+ mtx_unlock(&softc->ctl_lock);
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_reservation_conflict(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ } else if (res_key != 0) {
+ /*
+ * We are not registered and trying to register but
+ * the register key isn't zero.
+ */
+ mtx_unlock(&softc->ctl_lock);
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_reservation_conflict(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ mtx_unlock(&softc->ctl_lock);
+ }
+
+ switch (cdb->action & SPRO_ACTION_MASK) {
+ case SPRO_REGISTER:
+ case SPRO_REG_IGNO: {
+
+#if 0
+ printf("Registration received\n");
+#endif
+
+ /*
+ * We don't support any of these options, as we report in
+ * the read capabilities request (see
+ * ctl_persistent_reserve_in(), above).
+ */
+ if ((param->flags & SPR_SPEC_I_PT)
+ || (param->flags & SPR_ALL_TG_PT)
+ || (param->flags & SPR_APTPL)) {
+ int bit_ptr;
+
+ if (param->flags & SPR_APTPL)
+ bit_ptr = 0;
+ else if (param->flags & SPR_ALL_TG_PT)
+ bit_ptr = 2;
+ else /* SPR_SPEC_I_PT */
+ bit_ptr = 3;
+
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 0,
+ /*field*/ 20,
+ /*bit_valid*/ 1,
+ /*bit*/ bit_ptr);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ mtx_lock(&softc->ctl_lock);
+
+ /*
+ * The initiator wants to clear the
+ * key/unregister.
+ */
+ if (sa_res_key == 0) {
+ if ((res_key == 0
+ && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER)
+ || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO
+ && !lun->per_res[residx].registered)) {
+ mtx_unlock(&softc->ctl_lock);
+ goto done;
+ }
+
+ lun->per_res[residx].registered = 0;
+ memset(&lun->per_res[residx].res_key,
+ 0, sizeof(lun->per_res[residx].res_key));
+ lun->pr_key_count--;
+
+ if (residx == lun->pr_res_idx) {
+ lun->flags &= ~CTL_LUN_PR_RESERVED;
+ lun->pr_res_idx = CTL_PR_NO_RESERVATION;
+
+ if ((lun->res_type == SPR_TYPE_WR_EX_RO
+ || lun->res_type == SPR_TYPE_EX_AC_RO)
+ && lun->pr_key_count) {
+ /*
+ * If the reservation is a registrants
+ * only type we need to generate a UA
+ * for other registered inits. The
+ * sense code should be RESERVATIONS
+ * RELEASED
+ */
+
+ for (i = 0; i < CTL_MAX_INITIATORS;i++){
+ if (lun->per_res[
+ i+persis_offset].registered
+ == 0)
+ continue;
+ lun->pending_sense[i
+ ].ua_pending |=
+ CTL_UA_RES_RELEASE;
+ }
+ }
+ lun->res_type = 0;
+ } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
+ if (lun->pr_key_count==0) {
+ lun->flags &= ~CTL_LUN_PR_RESERVED;
+ lun->res_type = 0;
+ lun->pr_res_idx = CTL_PR_NO_RESERVATION;
+ }
+ }
+ persis_io.hdr.nexus = ctsio->io_hdr.nexus;
+ persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
+ persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY;
+ persis_io.pr.pr_info.residx = residx;
+ if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ &persis_io, sizeof(persis_io), 0 )) >
+ CTL_HA_STATUS_SUCCESS) {
+ printf("CTL:Persis Out error returned from "
+ "ctl_ha_msg_send %d\n", isc_retval);
+ }
+ mtx_unlock(&softc->ctl_lock);
+ } else /* sa_res_key != 0 */ {
+
+ /*
+ * If we aren't registered currently then increment
+ * the key count and set the registered flag.
+ */
+ if (!lun->per_res[residx].registered) {
+ lun->pr_key_count++;
+ lun->per_res[residx].registered = 1;
+ }
+
+ memcpy(&lun->per_res[residx].res_key,
+ param->serv_act_res_key,
+ ctl_min(sizeof(param->serv_act_res_key),
+ sizeof(lun->per_res[residx].res_key)));
+
+ persis_io.hdr.nexus = ctsio->io_hdr.nexus;
+ persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
+ persis_io.pr.pr_info.action = CTL_PR_REG_KEY;
+ persis_io.pr.pr_info.residx = residx;
+ memcpy(persis_io.pr.pr_info.sa_res_key,
+ param->serv_act_res_key,
+ sizeof(param->serv_act_res_key));
+ mtx_unlock(&softc->ctl_lock);
+ if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ &persis_io, sizeof(persis_io), 0)) >
+ CTL_HA_STATUS_SUCCESS) {
+ printf("CTL:Persis Out error returned from "
+ "ctl_ha_msg_send %d\n", isc_retval);
+ }
+ }
+ lun->PRGeneration++;
+
+ break;
+ }
+ case SPRO_RESERVE:
+#if 0
+ printf("Reserve executed type %d\n", type);
+#endif
+ mtx_lock(&softc->ctl_lock);
+ if (lun->flags & CTL_LUN_PR_RESERVED) {
+ /*
+ * if this isn't the reservation holder and it's
+ * not a "all registrants" type or if the type is
+ * different then we have a conflict
+ */
+ if ((lun->pr_res_idx != residx
+ && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS)
+ || lun->res_type != type) {
+ mtx_unlock(&softc->ctl_lock);
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_reservation_conflict(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ } else /* create a reservation */ {
+ /*
+ * If it's not an "all registrants" type record
+ * reservation holder
+ */
+ if (type != SPR_TYPE_WR_EX_AR
+ && type != SPR_TYPE_EX_AC_AR)
+ lun->pr_res_idx = residx; /* Res holder */
+ else
+ lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
+
+ lun->flags |= CTL_LUN_PR_RESERVED;
+ lun->res_type = type;
+
+ mtx_unlock(&softc->ctl_lock);
+
+ /* send msg to other side */
+ persis_io.hdr.nexus = ctsio->io_hdr.nexus;
+ persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
+ persis_io.pr.pr_info.action = CTL_PR_RESERVE;
+ persis_io.pr.pr_info.residx = lun->pr_res_idx;
+ persis_io.pr.pr_info.res_type = type;
+ if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ &persis_io, sizeof(persis_io), 0)) >
+ CTL_HA_STATUS_SUCCESS) {
+ printf("CTL:Persis Out error returned from "
+ "ctl_ha_msg_send %d\n", isc_retval);
+ }
+ }
+ break;
+
+ case SPRO_RELEASE:
+ mtx_lock(&softc->ctl_lock);
+ if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) {
+ /* No reservation exists return good status */
+ mtx_unlock(&softc->ctl_lock);
+ goto done;
+ }
+ /*
+ * Is this nexus a reservation holder?
+ */
+ if (lun->pr_res_idx != residx
+ && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
+ /*
+ * not a res holder return good status but
+ * do nothing
+ */
+ mtx_unlock(&softc->ctl_lock);
+ goto done;
+ }
+
+ if (lun->res_type != type) {
+ mtx_unlock(&softc->ctl_lock);
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_illegal_pr_release(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /* okay to release */
+ lun->flags &= ~CTL_LUN_PR_RESERVED;
+ lun->pr_res_idx = CTL_PR_NO_RESERVATION;
+ lun->res_type = 0;
+
+ /*
+ * if this isn't an exclusive access
+ * res generate UA for all other
+ * registrants.
+ */
+ if (type != SPR_TYPE_EX_AC
+ && type != SPR_TYPE_WR_EX) {
+ /*
+ * temporarily unregister so we don't generate UA
+ */
+ lun->per_res[residx].registered = 0;
+
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+ if (lun->per_res[i+persis_offset].registered
+ == 0)
+ continue;
+ lun->pending_sense[i].ua_pending |=
+ CTL_UA_RES_RELEASE;
+ }
+
+ lun->per_res[residx].registered = 1;
+ }
+ mtx_unlock(&softc->ctl_lock);
+ /* Send msg to other side */
+ persis_io.hdr.nexus = ctsio->io_hdr.nexus;
+ persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
+ persis_io.pr.pr_info.action = CTL_PR_RELEASE;
+ if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
+ printf("CTL:Persis Out error returned from "
+ "ctl_ha_msg_send %d\n", isc_retval);
+ }
+ break;
+
+ case SPRO_CLEAR:
+ /* send msg to other side */
+
+ mtx_lock(&softc->ctl_lock);
+ lun->flags &= ~CTL_LUN_PR_RESERVED;
+ lun->res_type = 0;
+ lun->pr_key_count = 0;
+ lun->pr_res_idx = CTL_PR_NO_RESERVATION;
+
+
+ memset(&lun->per_res[residx].res_key,
+ 0, sizeof(lun->per_res[residx].res_key));
+ lun->per_res[residx].registered = 0;
+
+ for (i=0; i < 2*CTL_MAX_INITIATORS; i++)
+ if (lun->per_res[i].registered) {
+ if (!persis_offset && i < CTL_MAX_INITIATORS)
+ lun->pending_sense[i].ua_pending |=
+ CTL_UA_RES_PREEMPT;
+ else if (persis_offset && i >= persis_offset)
+ lun->pending_sense[i-persis_offset
+ ].ua_pending |= CTL_UA_RES_PREEMPT;
+
+ memset(&lun->per_res[i].res_key,
+ 0, sizeof(struct scsi_per_res_key));
+ lun->per_res[i].registered = 0;
+ }
+ lun->PRGeneration++;
+ mtx_unlock(&softc->ctl_lock);
+ persis_io.hdr.nexus = ctsio->io_hdr.nexus;
+ persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
+ persis_io.pr.pr_info.action = CTL_PR_CLEAR;
+ if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
+ printf("CTL:Persis Out error returned from "
+ "ctl_ha_msg_send %d\n", isc_retval);
+ }
+ break;
+
+ case SPRO_PREEMPT: {
+ int nretval;
+
+ nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type,
+ residx, ctsio, cdb, param);
+ if (nretval != 0)
+ return (CTL_RETVAL_COMPLETE);
+ break;
+ }
+ case SPRO_REG_MOVE:
+ case SPRO_PRE_ABO:
+ default:
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ break; /* NOTREACHED */
+ }
+
+done:
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+
+ return (retval);
+}
+
+/*
+ * This routine is for handling a message from the other SC pertaining to
+ * persistent reserve out. All the error checking will have been done
+ * so only perorming the action need be done here to keep the two
+ * in sync.
+ */
+static void
+ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
+{
+ struct ctl_lun *lun;
+ struct ctl_softc *softc;
+ int i;
+
+ softc = control_softc;
+
+ mtx_lock(&softc->ctl_lock);
+
+ lun = softc->ctl_luns[msg->hdr.nexus.targ_lun];
+ switch(msg->pr.pr_info.action) {
+ case CTL_PR_REG_KEY:
+ if (!lun->per_res[msg->pr.pr_info.residx].registered) {
+ lun->per_res[msg->pr.pr_info.residx].registered = 1;
+ lun->pr_key_count++;
+ }
+ lun->PRGeneration++;
+ memcpy(&lun->per_res[msg->pr.pr_info.residx].res_key,
+ msg->pr.pr_info.sa_res_key,
+ sizeof(struct scsi_per_res_key));
+ break;
+
+ case CTL_PR_UNREG_KEY:
+ lun->per_res[msg->pr.pr_info.residx].registered = 0;
+ memset(&lun->per_res[msg->pr.pr_info.residx].res_key,
+ 0, sizeof(struct scsi_per_res_key));
+ lun->pr_key_count--;
+
+ /* XXX Need to see if the reservation has been released */
+ /* if so do we need to generate UA? */
+ if (msg->pr.pr_info.residx == lun->pr_res_idx) {
+ lun->flags &= ~CTL_LUN_PR_RESERVED;
+ lun->pr_res_idx = CTL_PR_NO_RESERVATION;
+
+ if ((lun->res_type == SPR_TYPE_WR_EX_RO
+ || lun->res_type == SPR_TYPE_EX_AC_RO)
+ && lun->pr_key_count) {
+ /*
+ * If the reservation is a registrants
+ * only type we need to generate a UA
+ * for other registered inits. The
+ * sense code should be RESERVATIONS
+ * RELEASED
+ */
+
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+ if (lun->per_res[i+
+ persis_offset].registered == 0)
+ continue;
+
+ lun->pending_sense[i
+ ].ua_pending |=
+ CTL_UA_RES_RELEASE;
+ }
+ }
+ lun->res_type = 0;
+ } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
+ if (lun->pr_key_count==0) {
+ lun->flags &= ~CTL_LUN_PR_RESERVED;
+ lun->res_type = 0;
+ lun->pr_res_idx = CTL_PR_NO_RESERVATION;
+ }
+ }
+ lun->PRGeneration++;
+ break;
+
+ case CTL_PR_RESERVE:
+ lun->flags |= CTL_LUN_PR_RESERVED;
+ lun->res_type = msg->pr.pr_info.res_type;
+ lun->pr_res_idx = msg->pr.pr_info.residx;
+
+ break;
+
+ case CTL_PR_RELEASE:
+ /*
+ * if this isn't an exclusive access res generate UA for all
+ * other registrants.
+ */
+ if (lun->res_type != SPR_TYPE_EX_AC
+ && lun->res_type != SPR_TYPE_WR_EX) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++)
+ if (lun->per_res[i+persis_offset].registered)
+ lun->pending_sense[i].ua_pending |=
+ CTL_UA_RES_RELEASE;
+ }
+
+ lun->flags &= ~CTL_LUN_PR_RESERVED;
+ lun->pr_res_idx = CTL_PR_NO_RESERVATION;
+ lun->res_type = 0;
+ break;
+
+ case CTL_PR_PREEMPT:
+ ctl_pro_preempt_other(lun, msg);
+ break;
+ case CTL_PR_CLEAR:
+ lun->flags &= ~CTL_LUN_PR_RESERVED;
+ lun->res_type = 0;
+ lun->pr_key_count = 0;
+ lun->pr_res_idx = CTL_PR_NO_RESERVATION;
+
+ for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ if (lun->per_res[i].registered == 0)
+ continue;
+ if (!persis_offset
+ && i < CTL_MAX_INITIATORS)
+ lun->pending_sense[i].ua_pending |=
+ CTL_UA_RES_PREEMPT;
+ else if (persis_offset
+ && i >= persis_offset)
+ lun->pending_sense[i-persis_offset].ua_pending|=
+ CTL_UA_RES_PREEMPT;
+ memset(&lun->per_res[i].res_key, 0,
+ sizeof(struct scsi_per_res_key));
+ lun->per_res[i].registered = 0;
+ }
+ lun->PRGeneration++;
+ break;
+ }
+
+ mtx_unlock(&softc->ctl_lock);
+}
+
+int
+ctl_read_write(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct ctl_lba_len lbalen;
+ uint64_t lba;
+ uint32_t num_blocks;
+ int reladdr, fua, dpo, ebp;
+ int retval;
+ int isread;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
+
+ reladdr = 0;
+ fua = 0;
+ dpo = 0;
+ ebp = 0;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10
+ || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16;
+ if (lun->flags & CTL_LUN_PR_RESERVED && isread) {
+ uint32_t residx;
+
+ /*
+ * XXX KDM need a lock here.
+ */
+ residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ if ((lun->res_type == SPR_TYPE_EX_AC
+ && residx != lun->pr_res_idx)
+ || ((lun->res_type == SPR_TYPE_EX_AC_RO
+ || lun->res_type == SPR_TYPE_EX_AC_AR)
+ && !lun->per_res[residx].registered)) {
+ ctl_set_reservation_conflict(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ }
+
+ switch (ctsio->cdb[0]) {
+ case READ_6:
+ case WRITE_6: {
+ struct scsi_rw_6 *cdb;
+
+ cdb = (struct scsi_rw_6 *)ctsio->cdb;
+
+ lba = scsi_3btoul(cdb->addr);
+ /* only 5 bits are valid in the most significant address byte */
+ lba &= 0x1fffff;
+ num_blocks = cdb->length;
+ /*
+ * This is correct according to SBC-2.
+ */
+ if (num_blocks == 0)
+ num_blocks = 256;
+ break;
+ }
+ case READ_10:
+ case WRITE_10: {
+ struct scsi_rw_10 *cdb;
+
+ cdb = (struct scsi_rw_10 *)ctsio->cdb;
+
+ if (cdb->byte2 & SRW10_RELADDR)
+ reladdr = 1;
+ if (cdb->byte2 & SRW10_FUA)
+ fua = 1;
+ if (cdb->byte2 & SRW10_DPO)
+ dpo = 1;
+
+ if ((cdb->opcode == WRITE_10)
+ && (cdb->byte2 & SRW10_EBP))
+ ebp = 1;
+
+ lba = scsi_4btoul(cdb->addr);
+ num_blocks = scsi_2btoul(cdb->length);
+ break;
+ }
+ case WRITE_VERIFY_10: {
+ struct scsi_write_verify_10 *cdb;
+
+ cdb = (struct scsi_write_verify_10 *)ctsio->cdb;
+
+ /*
+ * XXX KDM we should do actual write verify support at some
+ * point. This is obviously fake, we're just translating
+ * things to a write. So we don't even bother checking the
+ * BYTCHK field, since we don't do any verification. If
+ * the user asks for it, we'll just pretend we did it.
+ */
+ if (cdb->byte2 & SWV_DPO)
+ dpo = 1;
+
+ lba = scsi_4btoul(cdb->addr);
+ num_blocks = scsi_2btoul(cdb->length);
+ break;
+ }
+ case READ_12:
+ case WRITE_12: {
+ struct scsi_rw_12 *cdb;
+
+ cdb = (struct scsi_rw_12 *)ctsio->cdb;
+
+ if (cdb->byte2 & SRW12_RELADDR)
+ reladdr = 1;
+ if (cdb->byte2 & SRW12_FUA)
+ fua = 1;
+ if (cdb->byte2 & SRW12_DPO)
+ dpo = 1;
+ lba = scsi_4btoul(cdb->addr);
+ num_blocks = scsi_4btoul(cdb->length);
+ break;
+ }
+ case WRITE_VERIFY_12: {
+ struct scsi_write_verify_12 *cdb;
+
+ cdb = (struct scsi_write_verify_12 *)ctsio->cdb;
+
+ if (cdb->byte2 & SWV_DPO)
+ dpo = 1;
+
+ lba = scsi_4btoul(cdb->addr);
+ num_blocks = scsi_4btoul(cdb->length);
+
+ break;
+ }
+ case READ_16:
+ case WRITE_16: {
+ struct scsi_rw_16 *cdb;
+
+ cdb = (struct scsi_rw_16 *)ctsio->cdb;
+
+ if (cdb->byte2 & SRW12_RELADDR)
+ reladdr = 1;
+ if (cdb->byte2 & SRW12_FUA)
+ fua = 1;
+ if (cdb->byte2 & SRW12_DPO)
+ dpo = 1;
+
+ lba = scsi_8btou64(cdb->addr);
+ num_blocks = scsi_4btoul(cdb->length);
+ break;
+ }
+ case WRITE_VERIFY_16: {
+ struct scsi_write_verify_16 *cdb;
+
+ cdb = (struct scsi_write_verify_16 *)ctsio->cdb;
+
+ if (cdb->byte2 & SWV_DPO)
+ dpo = 1;
+
+ lba = scsi_8btou64(cdb->addr);
+ num_blocks = scsi_4btoul(cdb->length);
+ break;
+ }
+ default:
+ /*
+ * We got a command we don't support. This shouldn't
+ * happen, commands should be filtered out above us.
+ */
+ ctl_set_invalid_opcode(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+ break; /* NOTREACHED */
+ }
+
+ /*
+ * XXX KDM what do we do with the DPO and FUA bits? FUA might be
+ * interesting for us, but if RAIDCore is in write-back mode,
+ * getting it to do write-through for a particular transaction may
+ * not be possible.
+ */
+ /*
+ * We don't support relative addressing. That also requires
+ * supporting linked commands, which we don't do.
+ */
+ if (reladdr != 0) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * The first check is to make sure we're in bounds, the second
+ * check is to catch wrap-around problems. If the lba + num blocks
+ * is less than the lba, then we've wrapped around and the block
+ * range is invalid anyway.
+ */
+ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
+ || ((lba + num_blocks) < lba)) {
+ ctl_set_lba_out_of_range(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * According to SBC-3, a transfer length of 0 is not an error.
+ * Note that this cannot happen with WRITE(6) or READ(6), since 0
+ * translates to 256 blocks for those commands.
+ */
+ if (num_blocks == 0) {
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ lbalen.lba = lba;
+ lbalen.len = num_blocks;
+ memcpy(ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &lbalen,
+ sizeof(lbalen));
+
+ CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
+
+ retval = lun->backend->data_submit((union ctl_io *)ctsio);
+
+ return (retval);
+}
+
+int
+ctl_report_luns(struct ctl_scsiio *ctsio)
+{
+ struct scsi_report_luns *cdb;
+ struct scsi_report_luns_data *lun_data;
+ struct ctl_lun *lun, *request_lun;
+ int num_luns, retval;
+ uint32_t alloc_len, lun_datalen;
+ int num_filled, well_known;
+ uint32_t initidx;
+
+ retval = CTL_RETVAL_COMPLETE;
+ well_known = 0;
+
+ cdb = (struct scsi_report_luns *)ctsio->cdb;
+
+ CTL_DEBUG_PRINT(("ctl_report_luns\n"));
+
+ mtx_lock(&control_softc->ctl_lock);
+ num_luns = control_softc->num_luns;
+ mtx_unlock(&control_softc->ctl_lock);
+
+ switch (cdb->select_report) {
+ case RPL_REPORT_DEFAULT:
+ case RPL_REPORT_ALL:
+ break;
+ case RPL_REPORT_WELLKNOWN:
+ well_known = 1;
+ num_luns = 0;
+ break;
+ default:
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (retval);
+ break; /* NOTREACHED */
+ }
+
+ alloc_len = scsi_4btoul(cdb->length);
+ /*
+ * The initiator has to allocate at least 16 bytes for this request,
+ * so he can at least get the header and the first LUN. Otherwise
+ * we reject the request (per SPC-3 rev 14, section 6.21).
+ */
+ if (alloc_len < (sizeof(struct scsi_report_luns_data) +
+ sizeof(struct scsi_report_luns_lundata))) {
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 6,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (retval);
+ }
+
+ request_lun = (struct ctl_lun *)
+ ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ lun_datalen = sizeof(*lun_data) +
+ (num_luns * sizeof(struct scsi_report_luns_lundata));
+
+ ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ ctsio->scsi_status = SCSI_STATUS_BUSY;
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr;
+ ctsio->kern_sg_entries = 0;
+
+ if (lun_datalen < alloc_len) {
+ ctsio->residual = alloc_len - lun_datalen;
+ ctsio->kern_data_len = lun_datalen;
+ ctsio->kern_total_len = lun_datalen;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+
+ initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
+
+ memset(lun_data, 0, lun_datalen);
+
+ /*
+ * We set this to the actual data length, regardless of how much
+ * space we actually have to return results. If the user looks at
+ * this value, he'll know whether or not he allocated enough space
+ * and reissue the command if necessary. We don't support well
+ * known logical units, so if the user asks for that, return none.
+ */
+ scsi_ulto4b(lun_datalen - 8, lun_data->length);
+
+ mtx_lock(&control_softc->ctl_lock);
+ for (num_filled = 0, lun = STAILQ_FIRST(&control_softc->lun_list);
+ (lun != NULL) && (num_filled < num_luns);
+ lun = STAILQ_NEXT(lun, links)) {
+
+ if (lun->lun <= 0xff) {
+ /*
+ * Peripheral addressing method, bus number 0.
+ */
+ lun_data->luns[num_filled].lundata[0] =
+ RPL_LUNDATA_ATYP_PERIPH;
+ lun_data->luns[num_filled].lundata[1] = lun->lun;
+ num_filled++;
+ } else if (lun->lun <= 0x3fff) {
+ /*
+ * Flat addressing method.
+ */
+ lun_data->luns[num_filled].lundata[0] =
+ RPL_LUNDATA_ATYP_FLAT |
+ (lun->lun & RPL_LUNDATA_FLAT_LUN_MASK);
+#ifdef OLDCTLHEADERS
+ (SRLD_ADDR_FLAT << SRLD_ADDR_SHIFT) |
+ (lun->lun & SRLD_BUS_LUN_MASK);
+#endif
+ lun_data->luns[num_filled].lundata[1] =
+#ifdef OLDCTLHEADERS
+ lun->lun >> SRLD_BUS_LUN_BITS;
+#endif
+ lun->lun >> RPL_LUNDATA_FLAT_LUN_BITS;
+ num_filled++;
+ } else {
+ printf("ctl_report_luns: bogus LUN number %jd, "
+ "skipping\n", (intmax_t)lun->lun);
+ }
+ /*
+ * According to SPC-3, rev 14 section 6.21:
+ *
+ * "The execution of a REPORT LUNS command to any valid and
+ * installed logical unit shall clear the REPORTED LUNS DATA
+ * HAS CHANGED unit attention condition for all logical
+ * units of that target with respect to the requesting
+ * initiator. A valid and installed logical unit is one
+ * having a PERIPHERAL QUALIFIER of 000b in the standard
+ * INQUIRY data (see 6.4.2)."
+ *
+ * If request_lun is NULL, the LUN this report luns command
+ * was issued to is either disabled or doesn't exist. In that
+ * case, we shouldn't clear any pending lun change unit
+ * attention.
+ */
+ if (request_lun != NULL)
+ lun->pending_sense[initidx].ua_pending &=
+ ~CTL_UA_LUN_CHANGE;
+ }
+ mtx_unlock(&control_softc->ctl_lock);
+
+ /*
+ * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy
+ * this request.
+ */
+ ctsio->scsi_status = SCSI_STATUS_OK;
+
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (retval);
+}
+
+int
+ctl_request_sense(struct ctl_scsiio *ctsio)
+{
+ struct scsi_request_sense *cdb;
+ struct scsi_sense_data *sense_ptr;
+ struct ctl_lun *lun;
+ uint32_t initidx;
+ int have_error;
+ ctl_sense_format sense_format;
+
+ cdb = (struct scsi_request_sense *)ctsio->cdb;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ CTL_DEBUG_PRINT(("ctl_request_sense\n"));
+
+ /*
+ * Determine which sense format the user wants.
+ */
+ if (cdb->byte2 & SRS_DESC)
+ sense_format = CTL_SENSE_DESCRIPTOR;
+ else
+ sense_format = CTL_SENSE_FIXED;
+
+ ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ ctsio->scsi_status = SCSI_STATUS_BUSY;
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr;
+ ctsio->kern_sg_entries = 0;
+
+ /*
+ * struct scsi_sense_data, which is currently set to 256 bytes, is
+ * larger than the largest allowed value for the length field in the
+ * REQUEST SENSE CDB, which is 252 bytes as of SPC-4.
+ */
+ ctsio->residual = 0;
+ ctsio->kern_data_len = cdb->length;
+ ctsio->kern_total_len = cdb->length;
+
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+
+ /*
+ * If we don't have a LUN, we don't have any pending sense.
+ */
+ if (lun == NULL)
+ goto no_sense;
+
+ have_error = 0;
+ initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
+ /*
+ * Check for pending sense, and then for pending unit attentions.
+ * Pending sense gets returned first, then pending unit attentions.
+ */
+ mtx_lock(&lun->ctl_softc->ctl_lock);
+ if (ctl_is_set(lun->have_ca, initidx)) {
+ ctl_sense_format stored_format;
+
+ /*
+ * Check to see which sense format was used for the stored
+ * sense data.
+ */
+ stored_format = ctl_get_sense_format(
+ &lun->pending_sense[initidx].sense);
+
+ /*
+ * If the user requested a different sense format than the
+ * one we stored, then we need to convert it to the other
+ * format. If we're going from descriptor to fixed format
+ * sense data, we may lose things in translation, depending
+ * on what options were used.
+ */
+ if ((stored_format == CTL_SENSE_FIXED)
+ && (sense_format == CTL_SENSE_DESCRIPTOR))
+ ctl_sense_to_desc((struct scsi_sense_data_fixed *)
+ &lun->pending_sense[initidx].sense,
+ (struct scsi_sense_data_desc *)sense_ptr);
+ else if ((stored_format == CTL_SENSE_DESCRIPTOR)
+ && (sense_format == CTL_SENSE_FIXED))
+ ctl_sense_to_fixed((struct scsi_sense_data_desc *)
+ &lun->pending_sense[initidx].sense,
+ (struct scsi_sense_data_fixed *)sense_ptr);
+ else
+ memcpy(sense_ptr, &lun->pending_sense[initidx].sense,
+ ctl_min(sizeof(*sense_ptr),
+ sizeof(lun->pending_sense[initidx].sense)));
+
+ ctl_clear_mask(lun->have_ca, initidx);
+ have_error = 1;
+ } else if (lun->pending_sense[initidx].ua_pending != CTL_UA_NONE) {
+ ctl_ua_type ua_type;
+
+ ua_type = ctl_build_ua(lun->pending_sense[initidx].ua_pending,
+ sense_ptr, sense_format);
+ if (ua_type != CTL_UA_NONE) {
+ have_error = 1;
+ /* We're reporting this UA, so clear it */
+ lun->pending_sense[initidx].ua_pending &= ~ua_type;
+ }
+ }
+ mtx_unlock(&lun->ctl_softc->ctl_lock);
+
+ /*
+ * We already have a pending error, return it.
+ */
+ if (have_error != 0) {
+ /*
+ * We report the SCSI status as OK, since the status of the
+ * request sense command itself is OK.
+ */
+ ctsio->scsi_status = SCSI_STATUS_OK;
+
+ /*
+ * We report 0 for the sense length, because we aren't doing
+ * autosense in this case. We're reporting sense as
+ * parameter data.
+ */
+ ctsio->sense_len = 0;
+
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+no_sense:
+
+ /*
+ * No sense information to report, so we report that everything is
+ * okay.
+ */
+ ctl_set_sense_data(sense_ptr,
+ lun,
+ sense_format,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_NO_SENSE,
+ /*asc*/ 0x00,
+ /*ascq*/ 0x00,
+ SSD_ELEM_NONE);
+
+ ctsio->scsi_status = SCSI_STATUS_OK;
+
+ /*
+ * We report 0 for the sense length, because we aren't doing
+ * autosense in this case. We're reporting sense as parameter data.
+ */
+ ctsio->sense_len = 0;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_tur(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ CTL_DEBUG_PRINT(("ctl_tur\n"));
+
+ if (lun == NULL)
+ return (-EINVAL);
+
+ ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.status = CTL_SUCCESS;
+
+ ctl_done((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+#ifdef notyet
+static int
+ctl_cmddt_inquiry(struct ctl_scsiio *ctsio)
+{
+
+}
+#endif
+
+static int
+ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
+{
+ struct scsi_vpd_supported_pages *pages;
+ int sup_page_size;
+ struct ctl_lun *lun;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ sup_page_size = sizeof(struct scsi_vpd_supported_pages) +
+ SCSI_EVPD_NUM_SUPPORTED_PAGES;
+ /*
+ * XXX KDM GFP_??? We probably don't want to wait here,
+ * unless we end up having a process/thread context.
+ */
+ ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ ctsio->scsi_status = SCSI_STATUS_BUSY;
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr;
+ ctsio->kern_sg_entries = 0;
+
+ if (sup_page_size < alloc_len) {
+ ctsio->residual = alloc_len - sup_page_size;
+ ctsio->kern_data_len = sup_page_size;
+ ctsio->kern_total_len = sup_page_size;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+
+ memset(pages, 0, sup_page_size);
+
+ /*
+ * The control device is always connected. The disk device, on the
+ * other hand, may not be online all the time. Need to change this
+ * to figure out whether the disk device is actually online or not.
+ */
+ if (lun != NULL)
+ pages->device = (SID_QUAL_LU_CONNECTED << 5) |
+ lun->be_lun->lun_type;
+ else
+ pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
+
+ pages->length = SCSI_EVPD_NUM_SUPPORTED_PAGES;
+ /* Supported VPD pages */
+ pages->page_list[0] = SVPD_SUPPORTED_PAGES;
+ /* Serial Number */
+ pages->page_list[1] = SVPD_UNIT_SERIAL_NUMBER;
+ /* Device Identification */
+ pages->page_list[2] = SVPD_DEVICE_ID;
+
+ ctsio->scsi_status = SCSI_STATUS_OK;
+
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
+{
+ struct scsi_vpd_unit_serial_number *sn_ptr;
+ struct ctl_lun *lun;
+#ifndef CTL_USE_BACKEND_SN
+ char tmpstr[32];
+#endif
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ /* XXX KDM which malloc flags here?? */
+ ctsio->kern_data_ptr = malloc(sizeof(*sn_ptr), M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ ctsio->scsi_status = SCSI_STATUS_BUSY;
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr;
+ ctsio->kern_sg_entries = 0;
+
+ if (sizeof(*sn_ptr) < alloc_len) {
+ ctsio->residual = alloc_len - sizeof(*sn_ptr);
+ ctsio->kern_data_len = sizeof(*sn_ptr);
+ ctsio->kern_total_len = sizeof(*sn_ptr);
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+
+ memset(sn_ptr, 0, sizeof(*sn_ptr));
+
+ /*
+ * The control device is always connected. The disk device, on the
+ * other hand, may not be online all the time. Need to change this
+ * to figure out whether the disk device is actually online or not.
+ */
+ if (lun != NULL)
+ sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
+ lun->be_lun->lun_type;
+ else
+ sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
+
+ sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER;
+ sn_ptr->length = ctl_min(sizeof(*sn_ptr) - 4, CTL_SN_LEN);
+#ifdef CTL_USE_BACKEND_SN
+ /*
+ * If we don't have a LUN, we just leave the serial number as
+ * all spaces.
+ */
+ memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num));
+ if (lun != NULL) {
+ strncpy((char *)sn_ptr->serial_num,
+ (char *)lun->be_lun->serial_num, CTL_SN_LEN);
+ }
+#else
+ /*
+ * Note that we're using a non-unique serial number here,
+ */
+ snprintf(tmpstr, sizeof(tmpstr), "MYSERIALNUMIS000");
+ memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num));
+ strncpy(sn_ptr->serial_num, tmpstr, ctl_min(CTL_SN_LEN,
+ ctl_min(sizeof(tmpstr), sizeof(*sn_ptr) - 4)));
+#endif
+ ctsio->scsi_status = SCSI_STATUS_OK;
+
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+
+static int
+ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
+{
+ struct scsi_vpd_device_id *devid_ptr;
+ struct scsi_vpd_id_descriptor *desc, *desc1;
+ struct scsi_vpd_id_descriptor *desc2, *desc3; /* for types 4h and 5h */
+ struct scsi_vpd_id_t10 *t10id;
+ struct ctl_softc *ctl_softc;
+ struct ctl_lun *lun;
+ struct ctl_frontend *fe;
+#ifndef CTL_USE_BACKEND_SN
+ char tmpstr[32];
+#endif /* CTL_USE_BACKEND_SN */
+ int devid_len;
+
+ ctl_softc = control_softc;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ devid_len = sizeof(struct scsi_vpd_device_id) +
+ sizeof(struct scsi_vpd_id_descriptor) +
+ sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN +
+ sizeof(struct scsi_vpd_id_descriptor) + CTL_WWPN_LEN +
+ sizeof(struct scsi_vpd_id_descriptor) +
+ sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
+ sizeof(struct scsi_vpd_id_descriptor) +
+ sizeof(struct scsi_vpd_id_trgt_port_grp_id);
+
+ /* XXX KDM which malloc flags here ?? */
+ ctsio->kern_data_ptr = malloc(devid_len, M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ ctsio->scsi_status = SCSI_STATUS_BUSY;
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
+ ctsio->kern_sg_entries = 0;
+
+ if (devid_len < alloc_len) {
+ ctsio->residual = alloc_len - devid_len;
+ ctsio->kern_data_len = devid_len;
+ ctsio->kern_total_len = devid_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+
+ desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
+ t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
+ desc1 = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+ sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN);
+ desc2 = (struct scsi_vpd_id_descriptor *)(&desc1->identifier[0] +
+ CTL_WWPN_LEN);
+ desc3 = (struct scsi_vpd_id_descriptor *)(&desc2->identifier[0] +
+ sizeof(struct scsi_vpd_id_rel_trgt_port_id));
+ memset(devid_ptr, 0, devid_len);
+
+ /*
+ * The control device is always connected. The disk device, on the
+ * other hand, may not be online all the time.
+ */
+ if (lun != NULL)
+ devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
+ lun->be_lun->lun_type;
+ else
+ devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
+
+ devid_ptr->page_code = SVPD_DEVICE_ID;
+
+ scsi_ulto2b(devid_len - 4, devid_ptr->length);
+
+ mtx_lock(&ctl_softc->ctl_lock);
+
+ fe = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
+
+ /*
+ * For Fibre channel,
+ */
+ if (fe->port_type == CTL_PORT_FC)
+ {
+ desc->proto_codeset = (SCSI_PROTO_FC << 4) |
+ SVPD_ID_CODESET_ASCII;
+ desc1->proto_codeset = (SCSI_PROTO_FC << 4) |
+ SVPD_ID_CODESET_BINARY;
+ }
+ else
+ {
+ desc->proto_codeset = (SCSI_PROTO_SPI << 4) |
+ SVPD_ID_CODESET_ASCII;
+ desc1->proto_codeset = (SCSI_PROTO_SPI << 4) |
+ SVPD_ID_CODESET_BINARY;
+ }
+ desc2->proto_codeset = desc3->proto_codeset = desc1->proto_codeset;
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ /*
+ * We're using a LUN association here. i.e., this device ID is a
+ * per-LUN identifier.
+ */
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
+ desc->length = sizeof(*t10id) + CTL_DEVID_LEN;
+ strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
+
+ /*
+ * desc1 is for the WWPN which is a port asscociation.
+ */
+ desc1->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_NAA;
+ desc1->length = CTL_WWPN_LEN;
+ /* XXX Call Reggie's get_WWNN func here then add port # to the end */
+ /* For testing just create the WWPN */
+#if 0
+ ddb_GetWWNN((char *)desc1->identifier);
+
+ /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */
+ /* This is so Copancontrol will return something sane */
+ if (ctsio->io_hdr.nexus.targ_port!=0 &&
+ ctsio->io_hdr.nexus.targ_port!=8)
+ desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port-1;
+ else
+ desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port;
+#endif
+
+ be64enc(desc1->identifier, fe->wwpn);
+
+ /*
+ * desc2 is for the Relative Target Port(type 4h) identifier
+ */
+ desc2->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT
+ | SVPD_ID_TYPE_RELTARG;
+ desc2->length = 4;
+//#if 0
+ /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */
+ /* This is so Copancontrol will return something sane */
+ if (ctsio->io_hdr.nexus.targ_port!=0 &&
+ ctsio->io_hdr.nexus.targ_port!=8)
+ desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port - 1;
+ else
+ desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port;
+//#endif
+
+ /*
+ * desc3 is for the Target Port Group(type 5h) identifier
+ */
+ desc3->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT
+ | SVPD_ID_TYPE_TPORTGRP;
+ desc3->length = 4;
+ if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS || ctl_is_single)
+ desc3->identifier[3] = 1;
+ else
+ desc3->identifier[3] = 2;
+
+#ifdef CTL_USE_BACKEND_SN
+ /*
+ * If we've actually got a backend, copy the device id from the
+ * per-LUN data. Otherwise, set it to all spaces.
+ */
+ if (lun != NULL) {
+ /*
+ * Copy the backend's LUN ID.
+ */
+ strncpy((char *)t10id->vendor_spec_id,
+ (char *)lun->be_lun->device_id, CTL_DEVID_LEN);
+ } else {
+ /*
+ * No backend, set this to spaces.
+ */
+ memset(t10id->vendor_spec_id, 0x20, CTL_DEVID_LEN);
+ }
+#else
+ snprintf(tmpstr, sizeof(tmpstr), "MYDEVICEIDIS%4d",
+ (lun != NULL) ? (int)lun->lun : 0);
+ strncpy(t10id->vendor_spec_id, tmpstr, ctl_min(CTL_DEVID_LEN,
+ sizeof(tmpstr)));
+#endif
+
+ ctsio->scsi_status = SCSI_STATUS_OK;
+
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
+{
+ struct scsi_inquiry *cdb;
+ int alloc_len, retval;
+
+ cdb = (struct scsi_inquiry *)ctsio->cdb;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ alloc_len = scsi_2btoul(cdb->length);
+
+ switch (cdb->page_code) {
+ case SVPD_SUPPORTED_PAGES:
+ retval = ctl_inquiry_evpd_supported(ctsio, alloc_len);
+ break;
+ case SVPD_UNIT_SERIAL_NUMBER:
+ retval = ctl_inquiry_evpd_serial(ctsio, alloc_len);
+ break;
+ case SVPD_DEVICE_ID:
+ retval = ctl_inquiry_evpd_devid(ctsio, alloc_len);
+ break;
+ default:
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ retval = CTL_RETVAL_COMPLETE;
+ break;
+ }
+
+ return (retval);
+}
+
+static int
+ctl_inquiry_std(struct ctl_scsiio *ctsio)
+{
+ struct scsi_inquiry_data *inq_ptr;
+ struct scsi_inquiry *cdb;
+ struct ctl_softc *ctl_softc;
+ struct ctl_lun *lun;
+ uint32_t alloc_len;
+ int is_fc;
+
+ ctl_softc = control_softc;
+
+ /*
+ * Figure out whether we're talking to a Fibre Channel port or not.
+ * We treat the ioctl front end, and any SCSI adapters, as packetized
+ * SCSI front ends.
+ */
+ mtx_lock(&ctl_softc->ctl_lock);
+ if (ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type !=
+ CTL_PORT_FC)
+ is_fc = 0;
+ else
+ is_fc = 1;
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ cdb = (struct scsi_inquiry *)ctsio->cdb;
+ alloc_len = scsi_2btoul(cdb->length);
+
+ /*
+ * We malloc the full inquiry data size here and fill it
+ * in. If the user only asks for less, we'll give him
+ * that much.
+ */
+ /* XXX KDM what malloc flags should we use here?? */
+ ctsio->kern_data_ptr = malloc(sizeof(*inq_ptr), M_CTL, M_WAITOK);
+ if (ctsio->kern_data_ptr == NULL) {
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ ctsio->scsi_status = SCSI_STATUS_BUSY;
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr;
+ ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+
+ if (sizeof(*inq_ptr) < alloc_len) {
+ ctsio->residual = alloc_len - sizeof(*inq_ptr);
+ ctsio->kern_data_len = sizeof(*inq_ptr);
+ ctsio->kern_total_len = sizeof(*inq_ptr);
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+
+ memset(inq_ptr, 0, sizeof(*inq_ptr));
+
+ /*
+ * The control device is always connected. The disk device, on the
+ * other hand, may not be online all the time. If we don't have a
+ * LUN mapping, we'll just say it's offline.
+ */
+ if (lun != NULL)
+ inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
+ lun->be_lun->lun_type;
+ else
+ inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
+
+ /* RMB in byte 2 is 0 */
+ inq_ptr->version = SCSI_REV_SPC3;
+
+ /*
+ * According to SAM-3, even if a device only supports a single
+ * level of LUN addressing, it should still set the HISUP bit:
+ *
+ * 4.9.1 Logical unit numbers overview
+ *
+ * All logical unit number formats described in this standard are
+ * hierarchical in structure even when only a single level in that
+ * hierarchy is used. The HISUP bit shall be set to one in the
+ * standard INQUIRY data (see SPC-2) when any logical unit number
+ * format described in this standard is used. Non-hierarchical
+ * formats are outside the scope of this standard.
+ *
+ * Therefore we set the HiSup bit here.
+ *
+ * The reponse format is 2, per SPC-3.
+ */
+ inq_ptr->response_format = SID_HiSup | 2;
+
+ inq_ptr->additional_length = sizeof(*inq_ptr) - 4;
+ CTL_DEBUG_PRINT(("additional_length = %d\n",
+ inq_ptr->additional_length));
+
+ inq_ptr->spc3_flags = SPC3_SID_TPGS_IMPLICIT;
+ /* 16 bit addressing */
+ if (is_fc == 0)
+ inq_ptr->spc2_flags = SPC2_SID_ADDR16;
+ /* XXX set the SID_MultiP bit here if we're actually going to
+ respond on multiple ports */
+ inq_ptr->spc2_flags |= SPC2_SID_MultiP;
+
+ /* 16 bit data bus, synchronous transfers */
+ /* XXX these flags don't apply for FC */
+ if (is_fc == 0)
+ inq_ptr->flags = SID_WBus16 | SID_Sync;
+ /*
+ * XXX KDM do we want to support tagged queueing on the control
+ * device at all?
+ */
+ if ((lun == NULL)
+ || (lun->be_lun->lun_type != T_PROCESSOR))
+ inq_ptr->flags |= SID_CmdQue;
+ /*
+ * Per SPC-3, unused bytes in ASCII strings are filled with spaces.
+ * We have 8 bytes for the vendor name, and 16 bytes for the device
+ * name and 4 bytes for the revision.
+ */
+ strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor));
+ if (lun == NULL) {
+ strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT);
+ } else {
+ switch (lun->be_lun->lun_type) {
+ case T_DIRECT:
+ strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT);
+ break;
+ case T_PROCESSOR:
+ strcpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT);
+ break;
+ default:
+ strcpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT);
+ break;
+ }
+ }
+
+ /*
+ * XXX make this a macro somewhere so it automatically gets
+ * incremented when we make changes.
+ */
+ strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
+
+ /*
+ * For parallel SCSI, we support double transition and single
+ * transition clocking. We also support QAS (Quick Arbitration
+ * and Selection) and Information Unit transfers on both the
+ * control and array devices.
+ */
+ if (is_fc == 0)
+ inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS |
+ SID_SPI_IUS;
+
+ /* SAM-3 */
+ scsi_ulto2b(0x0060, inq_ptr->version1);
+ /* SPC-3 (no version claimed) XXX should we claim a version? */
+ scsi_ulto2b(0x0300, inq_ptr->version2);
+ if (is_fc) {
+ /* FCP-2 ANSI INCITS.350:2003 */
+ scsi_ulto2b(0x0917, inq_ptr->version3);
+ } else {
+ /* SPI-4 ANSI INCITS.362:200x */
+ scsi_ulto2b(0x0B56, inq_ptr->version3);
+ }
+
+ if (lun == NULL) {
+ /* SBC-2 (no version claimed) XXX should we claim a version? */
+ scsi_ulto2b(0x0320, inq_ptr->version4);
+ } else {
+ switch (lun->be_lun->lun_type) {
+ case T_DIRECT:
+ /*
+ * SBC-2 (no version claimed) XXX should we claim a
+ * version?
+ */
+ scsi_ulto2b(0x0320, inq_ptr->version4);
+ break;
+ case T_PROCESSOR:
+ default:
+ break;
+ }
+ }
+ sprintf((char *)inq_ptr->vendor_specific1, "Copyright (C) 2004, COPAN "
+ "Systems, Inc. All Rights Reserved.");
+
+ ctsio->scsi_status = SCSI_STATUS_OK;
+ if (ctsio->kern_data_len > 0) {
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+ } else {
+ ctsio->io_hdr.status = CTL_SUCCESS;
+ ctl_done((union ctl_io *)ctsio);
+ }
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_inquiry(struct ctl_scsiio *ctsio)
+{
+ struct scsi_inquiry *cdb;
+ int retval;
+
+ cdb = (struct scsi_inquiry *)ctsio->cdb;
+
+ retval = 0;
+
+ CTL_DEBUG_PRINT(("ctl_inquiry\n"));
+
+ /*
+ * Right now, we don't support the CmdDt inquiry information.
+ * This would be nice to support in the future. When we do
+ * support it, we should change this test so that it checks to make
+ * sure SI_EVPD and SI_CMDDT aren't both set at the same time.
+ */
+#ifdef notyet
+ if (((cdb->byte2 & SI_EVPD)
+ && (cdb->byte2 & SI_CMDDT)))
+#endif
+ if (cdb->byte2 & SI_CMDDT) {
+ /*
+ * Point to the SI_CMDDT bit. We might change this
+ * when we support SI_CMDDT, but since both bits would be
+ * "wrong", this should probably just stay as-is then.
+ */
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 1);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ if (cdb->byte2 & SI_EVPD)
+ retval = ctl_inquiry_evpd(ctsio);
+#ifdef notyet
+ else if (cdb->byte2 & SI_CMDDT)
+ retval = ctl_inquiry_cmddt(ctsio);
+#endif
+ else
+ retval = ctl_inquiry_std(ctsio);
+
+ return (retval);
+}
+
+/*
+ * For known CDB types, parse the LBA and length.
+ */
+static int
+ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len)
+{
+ if (io->io_hdr.io_type != CTL_IO_SCSI)
+ return (1);
+
+ switch (io->scsiio.cdb[0]) {
+ case READ_6:
+ case WRITE_6: {
+ struct scsi_rw_6 *cdb;
+
+ cdb = (struct scsi_rw_6 *)io->scsiio.cdb;
+
+ *lba = scsi_3btoul(cdb->addr);
+ /* only 5 bits are valid in the most significant address byte */
+ *lba &= 0x1fffff;
+ *len = cdb->length;
+ break;
+ }
+ case READ_10:
+ case WRITE_10: {
+ struct scsi_rw_10 *cdb;
+
+ cdb = (struct scsi_rw_10 *)io->scsiio.cdb;
+
+ *lba = scsi_4btoul(cdb->addr);
+ *len = scsi_2btoul(cdb->length);
+ break;
+ }
+ case WRITE_VERIFY_10: {
+ struct scsi_write_verify_10 *cdb;
+
+ cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb;
+
+ *lba = scsi_4btoul(cdb->addr);
+ *len = scsi_2btoul(cdb->length);
+ break;
+ }
+ case READ_12:
+ case WRITE_12: {
+ struct scsi_rw_12 *cdb;
+
+ cdb = (struct scsi_rw_12 *)io->scsiio.cdb;
+
+ *lba = scsi_4btoul(cdb->addr);
+ *len = scsi_4btoul(cdb->length);
+ break;
+ }
+ case WRITE_VERIFY_12: {
+ struct scsi_write_verify_12 *cdb;
+
+ cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb;
+
+ *lba = scsi_4btoul(cdb->addr);
+ *len = scsi_4btoul(cdb->length);
+ break;
+ }
+ case READ_16:
+ case WRITE_16: {
+ struct scsi_rw_16 *cdb;
+
+ cdb = (struct scsi_rw_16 *)io->scsiio.cdb;
+
+ *lba = scsi_8btou64(cdb->addr);
+ *len = scsi_4btoul(cdb->length);
+ break;
+ }
+ case WRITE_VERIFY_16: {
+ struct scsi_write_verify_16 *cdb;
+
+ cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb;
+
+
+ *lba = scsi_8btou64(cdb->addr);
+ *len = scsi_4btoul(cdb->length);
+ break;
+ }
+ default:
+ return (1);
+ break; /* NOTREACHED */
+ }
+
+ return (0);
+}
+
+static ctl_action
+ctl_extent_check_lba(uint64_t lba1, uint32_t len1, uint64_t lba2, uint32_t len2)
+{
+ uint64_t endlba1, endlba2;
+
+ endlba1 = lba1 + len1 - 1;
+ endlba2 = lba2 + len2 - 1;
+
+ if ((endlba1 < lba2)
+ || (endlba2 < lba1))
+ return (CTL_ACTION_PASS);
+ else
+ return (CTL_ACTION_BLOCK);
+}
+
+static ctl_action
+ctl_extent_check(union ctl_io *io1, union ctl_io *io2)
+{
+ uint64_t lba1, lba2;
+ uint32_t len1, len2;
+ int retval;
+
+ retval = ctl_get_lba_len(io1, &lba1, &len1);
+ if (retval != 0)
+ return (CTL_ACTION_ERROR);
+
+ retval = ctl_get_lba_len(io2, &lba2, &len2);
+ if (retval != 0)
+ return (CTL_ACTION_ERROR);
+
+ return (ctl_extent_check_lba(lba1, len1, lba2, len2));
+}
+
+static ctl_action
+ctl_check_for_blockage(union ctl_io *pending_io, union ctl_io *ooa_io)
+{
+ struct ctl_cmd_entry *pending_entry, *ooa_entry;
+ ctl_serialize_action *serialize_row;
+
+ /*
+ * The initiator attempted multiple untagged commands at the same
+ * time. Can't do that.
+ */
+ if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
+ && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
+ && ((pending_io->io_hdr.nexus.targ_port ==
+ ooa_io->io_hdr.nexus.targ_port)
+ && (pending_io->io_hdr.nexus.initid.id ==
+ ooa_io->io_hdr.nexus.initid.id))
+ && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0))
+ return (CTL_ACTION_OVERLAP);
+
+ /*
+ * The initiator attempted to send multiple tagged commands with
+ * the same ID. (It's fine if different initiators have the same
+ * tag ID.)
+ *
+ * Even if all of those conditions are true, we don't kill the I/O
+ * if the command ahead of us has been aborted. We won't end up
+ * sending it to the FETD, and it's perfectly legal to resend a
+ * command with the same tag number as long as the previous
+ * instance of this tag number has been aborted somehow.
+ */
+ if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
+ && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
+ && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num)
+ && ((pending_io->io_hdr.nexus.targ_port ==
+ ooa_io->io_hdr.nexus.targ_port)
+ && (pending_io->io_hdr.nexus.initid.id ==
+ ooa_io->io_hdr.nexus.initid.id))
+ && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0))
+ return (CTL_ACTION_OVERLAP_TAG);
+
+ /*
+ * If we get a head of queue tag, SAM-3 says that we should
+ * immediately execute it.
+ *
+ * What happens if this command would normally block for some other
+ * reason? e.g. a request sense with a head of queue tag
+ * immediately after a write. Normally that would block, but this
+ * will result in its getting executed immediately...
+ *
+ * We currently return "pass" instead of "skip", so we'll end up
+ * going through the rest of the queue to check for overlapped tags.
+ *
+ * XXX KDM check for other types of blockage first??
+ */
+ if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
+ return (CTL_ACTION_PASS);
+
+ /*
+ * Ordered tags have to block until all items ahead of them
+ * have completed. If we get called with an ordered tag, we always
+ * block, if something else is ahead of us in the queue.
+ */
+ if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED)
+ return (CTL_ACTION_BLOCK);
+
+ /*
+ * Simple tags get blocked until all head of queue and ordered tags
+ * ahead of them have completed. I'm lumping untagged commands in
+ * with simple tags here. XXX KDM is that the right thing to do?
+ */
+ if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
+ || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE))
+ && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
+ || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED)))
+ return (CTL_ACTION_BLOCK);
+
+ pending_entry = &ctl_cmd_table[pending_io->scsiio.cdb[0]];
+ ooa_entry = &ctl_cmd_table[ooa_io->scsiio.cdb[0]];
+
+ serialize_row = ctl_serialize_table[ooa_entry->seridx];
+
+ switch (serialize_row[pending_entry->seridx]) {
+ case CTL_SER_BLOCK:
+ return (CTL_ACTION_BLOCK);
+ break; /* NOTREACHED */
+ case CTL_SER_EXTENT:
+ return (ctl_extent_check(pending_io, ooa_io));
+ break; /* NOTREACHED */
+ case CTL_SER_PASS:
+ return (CTL_ACTION_PASS);
+ break; /* NOTREACHED */
+ case CTL_SER_SKIP:
+ return (CTL_ACTION_SKIP);
+ break;
+ default:
+ panic("invalid serialization value %d",
+ serialize_row[pending_entry->seridx]);
+ break; /* NOTREACHED */
+ }
+
+ return (CTL_ACTION_ERROR);
+}
+
+/*
+ * Check for blockage or overlaps against the OOA (Order Of Arrival) queue.
+ * Assumptions:
+ * - caller holds ctl_lock
+ * - pending_io is generally either incoming, or on the blocked queue
+ * - starting I/O is the I/O we want to start the check with.
+ */
+static ctl_action
+ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
+ union ctl_io *starting_io)
+{
+ union ctl_io *ooa_io;
+ ctl_action action;
+
+ /*
+ * Run back along the OOA queue, starting with the current
+ * blocked I/O and going through every I/O before it on the
+ * queue. If starting_io is NULL, we'll just end up returning
+ * CTL_ACTION_PASS.
+ */
+ for (ooa_io = starting_io; ooa_io != NULL;
+ ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq,
+ ooa_links)){
+
+ /*
+ * This routine just checks to see whether
+ * cur_blocked is blocked by ooa_io, which is ahead
+ * of it in the queue. It doesn't queue/dequeue
+ * cur_blocked.
+ */
+ action = ctl_check_for_blockage(pending_io, ooa_io);
+ switch (action) {
+ case CTL_ACTION_BLOCK:
+ case CTL_ACTION_OVERLAP:
+ case CTL_ACTION_OVERLAP_TAG:
+ case CTL_ACTION_SKIP:
+ case CTL_ACTION_ERROR:
+ return (action);
+ break; /* NOTREACHED */
+ case CTL_ACTION_PASS:
+ break;
+ default:
+ panic("invalid action %d", action);
+ break; /* NOTREACHED */
+ }
+ }
+
+ return (CTL_ACTION_PASS);
+}
+
+/*
+ * Assumptions:
+ * - An I/O has just completed, and has been removed from the per-LUN OOA
+ * queue, so some items on the blocked queue may now be unblocked.
+ * - The caller holds ctl_softc->ctl_lock
+ */
+static int
+ctl_check_blocked(struct ctl_lun *lun)
+{
+ union ctl_io *cur_blocked, *next_blocked;
+
+ /*
+ * Run forward from the head of the blocked queue, checking each
+ * entry against the I/Os prior to it on the OOA queue to see if
+ * there is still any blockage.
+ *
+ * We cannot use the TAILQ_FOREACH() macro, because it can't deal
+ * with our removing a variable on it while it is traversing the
+ * list.
+ */
+ for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue);
+ cur_blocked != NULL; cur_blocked = next_blocked) {
+ union ctl_io *prev_ooa;
+ ctl_action action;
+
+ next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr,
+ blocked_links);
+
+ prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr,
+ ctl_ooaq, ooa_links);
+
+ /*
+ * If cur_blocked happens to be the first item in the OOA
+ * queue now, prev_ooa will be NULL, and the action
+ * returned will just be CTL_ACTION_PASS.
+ */
+ action = ctl_check_ooa(lun, cur_blocked, prev_ooa);
+
+ switch (action) {
+ case CTL_ACTION_BLOCK:
+ /* Nothing to do here, still blocked */
+ break;
+ case CTL_ACTION_OVERLAP:
+ case CTL_ACTION_OVERLAP_TAG:
+ /*
+ * This shouldn't happen! In theory we've already
+ * checked this command for overlap...
+ */
+ break;
+ case CTL_ACTION_PASS:
+ case CTL_ACTION_SKIP: {
+ struct ctl_softc *softc;
+ struct ctl_cmd_entry *entry;
+ uint32_t initidx;
+ uint8_t opcode;
+ int isc_retval;
+
+ /*
+ * The skip case shouldn't happen, this transaction
+ * should have never made it onto the blocked queue.
+ */
+ /*
+ * This I/O is no longer blocked, we can remove it
+ * from the blocked queue. Since this is a TAILQ
+ * (doubly linked list), we can do O(1) removals
+ * from any place on the list.
+ */
+ TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr,
+ blocked_links);
+ cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
+
+ if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){
+ /*
+ * Need to send IO back to original side to
+ * run
+ */
+ union ctl_ha_msg msg_info;
+
+ msg_info.hdr.original_sc =
+ cur_blocked->io_hdr.original_sc;
+ msg_info.hdr.serializing_sc = cur_blocked;
+ msg_info.hdr.msg_type = CTL_MSG_R2R;
+ if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ &msg_info, sizeof(msg_info), 0)) >
+ CTL_HA_STATUS_SUCCESS) {
+ printf("CTL:Check Blocked error from "
+ "ctl_ha_msg_send %d\n",
+ isc_retval);
+ }
+ break;
+ }
+ opcode = cur_blocked->scsiio.cdb[0];
+ entry = &ctl_cmd_table[opcode];
+ softc = control_softc;
+
+ initidx = ctl_get_initindex(&cur_blocked->io_hdr.nexus);
+
+ /*
+ * Check this I/O for LUN state changes that may
+ * have happened while this command was blocked.
+ * The LUN state may have been changed by a command
+ * ahead of us in the queue, so we need to re-check
+ * for any states that can be caused by SCSI
+ * commands.
+ */
+ if (ctl_scsiio_lun_check(softc, lun, entry,
+ &cur_blocked->scsiio) == 0) {
+ cur_blocked->io_hdr.flags |=
+ CTL_FLAG_IS_WAS_ON_RTR;
+ STAILQ_INSERT_TAIL(&lun->ctl_softc->rtr_queue,
+ &cur_blocked->io_hdr, links);
+ /*
+ * In the non CTL_DONE_THREAD case, we need
+ * to wake up the work thread here. When
+ * we're processing completed requests from
+ * the work thread context, we'll pop back
+ * around and end up pulling things off the
+ * RtR queue. When we aren't processing
+ * things from the work thread context,
+ * though, we won't ever check the RtR queue.
+ * So we need to wake up the thread to clear
+ * things off the queue. Otherwise this
+ * transaction will just sit on the RtR queue
+ * until a new I/O comes in. (Which may or
+ * may not happen...)
+ */
+#ifndef CTL_DONE_THREAD
+ ctl_wakeup_thread();
+#endif
+ } else
+ ctl_done_lock(cur_blocked, /*have_lock*/ 1);
+ break;
+ }
+ default:
+ /*
+ * This probably shouldn't happen -- we shouldn't
+ * get CTL_ACTION_ERROR, or anything else.
+ */
+ break;
+ }
+ }
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+/*
+ * This routine (with one exception) checks LUN flags that can be set by
+ * commands ahead of us in the OOA queue. These flags have to be checked
+ * when a command initially comes in, and when we pull a command off the
+ * blocked queue and are preparing to execute it. The reason we have to
+ * check these flags for commands on the blocked queue is that the LUN
+ * state may have been changed by a command ahead of us while we're on the
+ * blocked queue.
+ *
+ * Ordering is somewhat important with these checks, so please pay
+ * careful attention to the placement of any new checks.
+ */
+static int
+ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
+ struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
+{
+ int retval;
+
+ retval = 0;
+
+ /*
+ * If this shelf is a secondary shelf controller, we have to reject
+ * any media access commands.
+ */
+#if 0
+ /* No longer needed for HA */
+ if (((ctl_softc->flags & CTL_FLAG_MASTER_SHELF) == 0)
+ && ((entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0)) {
+ ctl_set_lun_standby(ctsio);
+ retval = 1;
+ goto bailout;
+ }
+#endif
+
+ /*
+ * Check for a reservation conflict. If this command isn't allowed
+ * even on reserved LUNs, and if this initiator isn't the one who
+ * reserved us, reject the command with a reservation conflict.
+ */
+ if ((lun->flags & CTL_LUN_RESERVED)
+ && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) {
+ if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id)
+ || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port)
+ || (ctsio->io_hdr.nexus.targ_target.id !=
+ lun->rsv_nexus.targ_target.id)) {
+ ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ retval = 1;
+ goto bailout;
+ }
+ }
+
+ if ( (lun->flags & CTL_LUN_PR_RESERVED)
+ && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV) == 0)) {
+ uint32_t residx;
+
+ residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ /*
+ * if we aren't registered or it's a res holder type
+ * reservation and this isn't the res holder then set a
+ * conflict.
+ * NOTE: Commands which might be allowed on write exclusive
+ * type reservations are checked in the particular command
+ * for a conflict. Read and SSU are the only ones.
+ */
+ if (!lun->per_res[residx].registered
+ || (residx != lun->pr_res_idx && lun->res_type < 4)) {
+ ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+ retval = 1;
+ goto bailout;
+ }
+
+ }
+
+ if ((lun->flags & CTL_LUN_OFFLINE)
+ && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) {
+ ctl_set_lun_not_ready(ctsio);
+ retval = 1;
+ goto bailout;
+ }
+
+ /*
+ * If the LUN is stopped, see if this particular command is allowed
+ * for a stopped lun. Otherwise, reject it with 0x04,0x02.
+ */
+ if ((lun->flags & CTL_LUN_STOPPED)
+ && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) {
+ /* "Logical unit not ready, initializing cmd. required" */
+ ctl_set_lun_stopped(ctsio);
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((lun->flags & CTL_LUN_INOPERABLE)
+ && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) {
+ /* "Medium format corrupted" */
+ ctl_set_medium_format_corrupted(ctsio);
+ retval = 1;
+ goto bailout;
+ }
+
+bailout:
+ return (retval);
+
+}
+
+static void
+ctl_failover_io(union ctl_io *io, int have_lock)
+{
+ ctl_set_busy(&io->scsiio);
+ ctl_done_lock(io, have_lock);
+}
+
+static void
+ctl_failover(void)
+{
+ struct ctl_lun *lun;
+ struct ctl_softc *ctl_softc;
+ union ctl_io *next_io, *pending_io;
+ union ctl_io *io;
+ int lun_idx;
+ int i;
+
+ ctl_softc = control_softc;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ /*
+ * Remove any cmds from the other SC from the rtr queue. These
+ * will obviously only be for LUNs for which we're the primary.
+ * We can't send status or get/send data for these commands.
+ * Since they haven't been executed yet, we can just remove them.
+ * We'll either abort them or delete them below, depending on
+ * which HA mode we're in.
+ */
+ for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->rtr_queue);
+ io != NULL; io = next_io) {
+ next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
+ if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
+ STAILQ_REMOVE(&ctl_softc->rtr_queue, &io->io_hdr,
+ ctl_io_hdr, links);
+ }
+
+ for (lun_idx=0; lun_idx < ctl_softc->num_luns; lun_idx++) {
+ lun = ctl_softc->ctl_luns[lun_idx];
+ if (lun==NULL)
+ continue;
+
+ /*
+ * Processor LUNs are primary on both sides.
+ * XXX will this always be true?
+ */
+ if (lun->be_lun->lun_type == T_PROCESSOR)
+ continue;
+
+ if ((lun->flags & CTL_LUN_PRIMARY_SC)
+ && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
+ printf("FAILOVER: primary lun %d\n", lun_idx);
+ /*
+ * Remove all commands from the other SC. First from the
+ * blocked queue then from the ooa queue. Once we have
+ * removed them. Call ctl_check_blocked to see if there
+ * is anything that can run.
+ */
+ for (io = (union ctl_io *)TAILQ_FIRST(
+ &lun->blocked_queue); io != NULL; io = next_io) {
+
+ next_io = (union ctl_io *)TAILQ_NEXT(
+ &io->io_hdr, blocked_links);
+
+ if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
+ TAILQ_REMOVE(&lun->blocked_queue,
+ &io->io_hdr,blocked_links);
+ io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
+ TAILQ_REMOVE(&lun->ooa_queue,
+ &io->io_hdr, ooa_links);
+
+ ctl_free_io_internal(io, 1);
+ }
+ }
+
+ for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
+ io != NULL; io = next_io) {
+
+ next_io = (union ctl_io *)TAILQ_NEXT(
+ &io->io_hdr, ooa_links);
+
+ if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
+
+ TAILQ_REMOVE(&lun->ooa_queue,
+ &io->io_hdr,
+ ooa_links);
+
+ ctl_free_io_internal(io, 1);
+ }
+ }
+ ctl_check_blocked(lun);
+ } else if ((lun->flags & CTL_LUN_PRIMARY_SC)
+ && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) {
+
+ printf("FAILOVER: primary lun %d\n", lun_idx);
+ /*
+ * Abort all commands from the other SC. We can't
+ * send status back for them now. These should get
+ * cleaned up when they are completed or come out
+ * for a datamove operation.
+ */
+ for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
+ io != NULL; io = next_io) {
+ next_io = (union ctl_io *)TAILQ_NEXT(
+ &io->io_hdr, ooa_links);
+
+ if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
+ io->io_hdr.flags |= CTL_FLAG_ABORT;
+ }
+ } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
+ && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) {
+
+ printf("FAILOVER: secondary lun %d\n", lun_idx);
+
+ lun->flags |= CTL_LUN_PRIMARY_SC;
+
+ /*
+ * We send all I/O that was sent to this controller
+ * and redirected to the other side back with
+ * busy status, and have the initiator retry it.
+ * Figuring out how much data has been transferred,
+ * etc. and picking up where we left off would be
+ * very tricky.
+ *
+ * XXX KDM need to remove I/O from the blocked
+ * queue as well!
+ */
+ for (pending_io = (union ctl_io *)TAILQ_FIRST(
+ &lun->ooa_queue); pending_io != NULL;
+ pending_io = next_io) {
+
+ next_io = (union ctl_io *)TAILQ_NEXT(
+ &pending_io->io_hdr, ooa_links);
+
+ pending_io->io_hdr.flags &=
+ ~CTL_FLAG_SENT_2OTHER_SC;
+
+ if (pending_io->io_hdr.flags &
+ CTL_FLAG_IO_ACTIVE) {
+ pending_io->io_hdr.flags |=
+ CTL_FLAG_FAILOVER;
+ } else {
+ ctl_set_busy(&pending_io->scsiio);
+ ctl_done_lock(pending_io,
+ /*have_lock*/1);
+ }
+ }
+
+ /*
+ * Build Unit Attention
+ */
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+ lun->pending_sense[i].ua_pending |=
+ CTL_UA_ASYM_ACC_CHANGE;
+ }
+ } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
+ && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
+ printf("FAILOVER: secondary lun %d\n", lun_idx);
+ /*
+ * if the first io on the OOA is not on the RtR queue
+ * add it.
+ */
+ lun->flags |= CTL_LUN_PRIMARY_SC;
+
+ pending_io = (union ctl_io *)TAILQ_FIRST(
+ &lun->ooa_queue);
+ if (pending_io==NULL) {
+ printf("Nothing on OOA queue\n");
+ continue;
+ }
+
+ pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
+ if ((pending_io->io_hdr.flags &
+ CTL_FLAG_IS_WAS_ON_RTR) == 0) {
+ pending_io->io_hdr.flags |=
+ CTL_FLAG_IS_WAS_ON_RTR;
+ STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
+ &pending_io->io_hdr, links);
+ }
+#if 0
+ else
+ {
+ printf("Tag 0x%04x is running\n",
+ pending_io->scsiio.tag_num);
+ }
+#endif
+
+ next_io = (union ctl_io *)TAILQ_NEXT(
+ &pending_io->io_hdr, ooa_links);
+ for (pending_io=next_io; pending_io != NULL;
+ pending_io = next_io) {
+ pending_io->io_hdr.flags &=
+ ~CTL_FLAG_SENT_2OTHER_SC;
+ next_io = (union ctl_io *)TAILQ_NEXT(
+ &pending_io->io_hdr, ooa_links);
+ if (pending_io->io_hdr.flags &
+ CTL_FLAG_IS_WAS_ON_RTR) {
+#if 0
+ printf("Tag 0x%04x is running\n",
+ pending_io->scsiio.tag_num);
+#endif
+ continue;
+ }
+
+ switch (ctl_check_ooa(lun, pending_io,
+ (union ctl_io *)TAILQ_PREV(
+ &pending_io->io_hdr, ctl_ooaq,
+ ooa_links))) {
+
+ case CTL_ACTION_BLOCK:
+ TAILQ_INSERT_TAIL(&lun->blocked_queue,
+ &pending_io->io_hdr,
+ blocked_links);
+ pending_io->io_hdr.flags |=
+ CTL_FLAG_BLOCKED;
+ break;
+ case CTL_ACTION_PASS:
+ case CTL_ACTION_SKIP:
+ pending_io->io_hdr.flags |=
+ CTL_FLAG_IS_WAS_ON_RTR;
+ STAILQ_INSERT_TAIL(
+ &ctl_softc->rtr_queue,
+ &pending_io->io_hdr, links);
+ break;
+ case CTL_ACTION_OVERLAP:
+ ctl_set_overlapped_cmd(
+ (struct ctl_scsiio *)pending_io);
+ ctl_done_lock(pending_io,
+ /*have_lock*/ 1);
+ break;
+ case CTL_ACTION_OVERLAP_TAG:
+ ctl_set_overlapped_tag(
+ (struct ctl_scsiio *)pending_io,
+ pending_io->scsiio.tag_num & 0xff);
+ ctl_done_lock(pending_io,
+ /*have_lock*/ 1);
+ break;
+ case CTL_ACTION_ERROR:
+ default:
+ ctl_set_internal_failure(
+ (struct ctl_scsiio *)pending_io,
+ 0, // sks_valid
+ 0); //retry count
+ ctl_done_lock(pending_io,
+ /*have_lock*/ 1);
+ break;
+ }
+ }
+
+ /*
+ * Build Unit Attention
+ */
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+ lun->pending_sense[i].ua_pending |=
+ CTL_UA_ASYM_ACC_CHANGE;
+ }
+ } else {
+ panic("Unhandled HA mode failover, LUN flags = %#x, "
+ "ha_mode = #%x", lun->flags, ctl_softc->ha_mode);
+ }
+ }
+ ctl_pause_rtr = 0;
+ mtx_unlock(&ctl_softc->ctl_lock);
+}
+
+static int
+ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct ctl_cmd_entry *entry;
+ uint8_t opcode;
+ uint32_t initidx;
+ int retval;
+
+ retval = 0;
+
+ lun = NULL;
+
+ opcode = ctsio->cdb[0];
+
+ mtx_lock(&ctl_softc->ctl_lock);
+
+ if ((ctsio->io_hdr.nexus.targ_lun < CTL_MAX_LUNS)
+ && (ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun] != NULL)) {
+ lun = ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun];
+ /*
+ * If the LUN is invalid, pretend that it doesn't exist.
+ * It will go away as soon as all pending I/O has been
+ * completed.
+ */
+ if (lun->flags & CTL_LUN_DISABLED) {
+ lun = NULL;
+ } else {
+ ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
+ ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr =
+ lun->be_lun;
+ if (lun->be_lun->lun_type == T_PROCESSOR) {
+ ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV;
+ }
+ }
+ } else {
+ ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
+ ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
+ }
+
+ entry = &ctl_cmd_table[opcode];
+
+ ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
+ ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK;
+
+ /*
+ * Check to see whether we can send this command to LUNs that don't
+ * exist. This should pretty much only be the case for inquiry
+ * and request sense. Further checks, below, really require having
+ * a LUN, so we can't really check the command anymore. Just put
+ * it on the rtr queue.
+ */
+ if (lun == NULL) {
+ if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
+ goto queue_rtr;
+
+ ctl_set_unsupported_lun(ctsio);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ ctl_done((union ctl_io *)ctsio);
+ goto bailout;
+ } else {
+ /*
+ * Every I/O goes into the OOA queue for a particular LUN, and
+ * stays there until completion.
+ */
+ TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+
+ /*
+ * Make sure we support this particular command on this LUN.
+ * e.g., we don't support writes to the control LUN.
+ */
+ switch (lun->be_lun->lun_type) {
+ case T_PROCESSOR:
+ if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0)
+ && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
+ == 0)) {
+ ctl_set_invalid_opcode(ctsio);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ ctl_done((union ctl_io *)ctsio);
+ goto bailout;
+ }
+ break;
+ case T_DIRECT:
+ if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0)
+ && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
+ == 0)){
+ ctl_set_invalid_opcode(ctsio);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ ctl_done((union ctl_io *)ctsio);
+ goto bailout;
+ }
+ break;
+ default:
+ printf("Unsupported CTL LUN type %d\n",
+ lun->be_lun->lun_type);
+ panic("Unsupported CTL LUN type %d\n",
+ lun->be_lun->lun_type);
+ break; /* NOTREACHED */
+ }
+ }
+
+ initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
+
+ /*
+ * If we've got a request sense, it'll clear the contingent
+ * allegiance condition. Otherwise, if we have a CA condition for
+ * this initiator, clear it, because it sent down a command other
+ * than request sense.
+ */
+ if ((opcode != REQUEST_SENSE)
+ && (ctl_is_set(lun->have_ca, initidx)))
+ ctl_clear_mask(lun->have_ca, initidx);
+
+ /*
+ * If the command has this flag set, it handles its own unit
+ * attention reporting, we shouldn't do anything. Otherwise we
+ * check for any pending unit attentions, and send them back to the
+ * initiator. We only do this when a command initially comes in,
+ * not when we pull it off the blocked queue.
+ *
+ * According to SAM-3, section 5.3.2, the order that things get
+ * presented back to the host is basically unit attentions caused
+ * by some sort of reset event, busy status, reservation conflicts
+ * or task set full, and finally any other status.
+ *
+ * One issue here is that some of the unit attentions we report
+ * don't fall into the "reset" category (e.g. "reported luns data
+ * has changed"). So reporting it here, before the reservation
+ * check, may be technically wrong. I guess the only thing to do
+ * would be to check for and report the reset events here, and then
+ * check for the other unit attention types after we check for a
+ * reservation conflict.
+ *
+ * XXX KDM need to fix this
+ */
+ if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) {
+ ctl_ua_type ua_type;
+
+ ua_type = lun->pending_sense[initidx].ua_pending;
+ if (ua_type != CTL_UA_NONE) {
+ ctl_sense_format sense_format;
+
+ if (lun != NULL)
+ sense_format = (lun->flags &
+ CTL_LUN_SENSE_DESC) ? CTL_SENSE_DESCRIPTOR :
+ CTL_SENSE_FIXED;
+ else
+ sense_format = CTL_SENSE_FIXED;
+
+ ua_type = ctl_build_ua(ua_type, &ctsio->sense_data,
+ sense_format);
+ if (ua_type != CTL_UA_NONE) {
+ ctsio->scsi_status = SCSI_STATUS_CHECK_COND;
+ ctsio->io_hdr.status = CTL_SCSI_ERROR |
+ CTL_AUTOSENSE;
+ ctsio->sense_len = SSD_FULL_SIZE;
+ lun->pending_sense[initidx].ua_pending &=
+ ~ua_type;
+ mtx_unlock(&ctl_softc->ctl_lock);
+ ctl_done((union ctl_io *)ctsio);
+ goto bailout;
+ }
+ }
+ }
+
+
+ if (ctl_scsiio_lun_check(ctl_softc, lun, entry, ctsio) != 0) {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ ctl_done((union ctl_io *)ctsio);
+ goto bailout;
+ }
+
+ /*
+ * XXX CHD this is where we want to send IO to other side if
+ * this LUN is secondary on this SC. We will need to make a copy
+ * of the IO and flag the IO on this side as SENT_2OTHER and the flag
+ * the copy we send as FROM_OTHER.
+ * We also need to stuff the address of the original IO so we can
+ * find it easily. Something similar will need be done on the other
+ * side so when we are done we can find the copy.
+ */
+ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
+ union ctl_ha_msg msg_info;
+ int isc_retval;
+
+ ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
+
+ msg_info.hdr.msg_type = CTL_MSG_SERIALIZE;
+ msg_info.hdr.original_sc = (union ctl_io *)ctsio;
+#if 0
+ printf("1. ctsio %p\n", ctsio);
+#endif
+ msg_info.hdr.serializing_sc = NULL;
+ msg_info.hdr.nexus = ctsio->io_hdr.nexus;
+ msg_info.scsi.tag_num = ctsio->tag_num;
+ msg_info.scsi.tag_type = ctsio->tag_type;
+ memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN);
+
+ ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
+
+ if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ (void *)&msg_info, sizeof(msg_info), 0)) >
+ CTL_HA_STATUS_SUCCESS) {
+ printf("CTL:precheck, ctl_ha_msg_send returned %d\n",
+ isc_retval);
+ printf("CTL:opcode is %x\n",opcode);
+ } else {
+#if 0
+ printf("CTL:Precheck sent msg, opcode is %x\n",opcode);
+#endif
+ }
+
+ /*
+ * XXX KDM this I/O is off the incoming queue, but hasn't
+ * been inserted on any other queue. We may need to come
+ * up with a holding queue while we wait for serialization
+ * so that we have an idea of what we're waiting for from
+ * the other side.
+ */
+ goto bailout_unlock;
+ }
+
+ switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
+ (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr,
+ ctl_ooaq, ooa_links))) {
+ case CTL_ACTION_BLOCK:
+ ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
+ TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
+ blocked_links);
+ goto bailout_unlock;
+ break; /* NOTREACHED */
+ case CTL_ACTION_PASS:
+ case CTL_ACTION_SKIP:
+ goto queue_rtr;
+ break; /* NOTREACHED */
+ case CTL_ACTION_OVERLAP:
+ ctl_set_overlapped_cmd(ctsio);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ ctl_done((union ctl_io *)ctsio);
+ goto bailout;
+ break; /* NOTREACHED */
+ case CTL_ACTION_OVERLAP_TAG:
+ ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ ctl_done((union ctl_io *)ctsio);
+ goto bailout;
+ break; /* NOTREACHED */
+ case CTL_ACTION_ERROR:
+ default:
+ ctl_set_internal_failure(ctsio,
+ /*sks_valid*/ 0,
+ /*retry_count*/ 0);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ ctl_done((union ctl_io *)ctsio);
+ goto bailout;
+ break; /* NOTREACHED */
+ }
+
+ goto bailout_unlock;
+
+queue_rtr:
+ ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
+ STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue, &ctsio->io_hdr, links);
+
+bailout_unlock:
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+bailout:
+ return (retval);
+}
+
+static int
+ctl_scsiio(struct ctl_scsiio *ctsio)
+{
+ int retval;
+ struct ctl_cmd_entry *entry;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0]));
+
+ entry = &ctl_cmd_table[ctsio->cdb[0]];
+
+ /*
+ * If this I/O has been aborted, just send it straight to
+ * ctl_done() without executing it.
+ */
+ if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) {
+ ctl_done((union ctl_io *)ctsio);
+ goto bailout;
+ }
+
+ /*
+ * All the checks should have been handled by ctl_scsiio_precheck().
+ * We should be clear now to just execute the I/O.
+ */
+ retval = entry->execute(ctsio);
+
+bailout:
+ return (retval);
+}
+
+/*
+ * Since we only implement one target right now, a bus reset simply resets
+ * our single target.
+ */
+static int
+ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io)
+{
+ return(ctl_target_reset(ctl_softc, io, CTL_UA_BUS_RESET));
+}
+
+static int
+ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
+ ctl_ua_type ua_type)
+{
+ struct ctl_lun *lun;
+ int retval;
+
+ if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
+ union ctl_ha_msg msg_info;
+
+ io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
+ msg_info.hdr.nexus = io->io_hdr.nexus;
+ if (ua_type==CTL_UA_TARG_RESET)
+ msg_info.task.task_action = CTL_TASK_TARGET_RESET;
+ else
+ msg_info.task.task_action = CTL_TASK_BUS_RESET;
+ msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
+ msg_info.hdr.original_sc = NULL;
+ msg_info.hdr.serializing_sc = NULL;
+ if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ (void *)&msg_info, sizeof(msg_info), 0)) {
+ }
+ }
+ retval = 0;
+
+ STAILQ_FOREACH(lun, &ctl_softc->lun_list, links)
+ retval += ctl_lun_reset(lun, io, ua_type);
+
+ return (retval);
+}
+
+/*
+ * The LUN should always be set. The I/O is optional, and is used to
+ * distinguish between I/Os sent by this initiator, and by other
+ * initiators. We set unit attention for initiators other than this one.
+ * SAM-3 is vague on this point. It does say that a unit attention should
+ * be established for other initiators when a LUN is reset (see section
+ * 5.7.3), but it doesn't specifically say that the unit attention should
+ * be established for this particular initiator when a LUN is reset. Here
+ * is the relevant text, from SAM-3 rev 8:
+ *
+ * 5.7.2 When a SCSI initiator port aborts its own tasks
+ *
+ * When a SCSI initiator port causes its own task(s) to be aborted, no
+ * notification that the task(s) have been aborted shall be returned to
+ * the SCSI initiator port other than the completion response for the
+ * command or task management function action that caused the task(s) to
+ * be aborted and notification(s) associated with related effects of the
+ * action (e.g., a reset unit attention condition).
+ *
+ * XXX KDM for now, we're setting unit attention for all initiators.
+ */
+static int
+ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
+{
+ union ctl_io *xio;
+#if 0
+ uint32_t initindex;
+#endif
+ int i;
+
+ /*
+ * Run through the OOA queue and abort each I/O.
+ */
+#if 0
+ TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) {
+#endif
+ for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
+ xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
+ xio->io_hdr.flags |= CTL_FLAG_ABORT;
+ }
+
+ /*
+ * This version sets unit attention for every
+ */
+#if 0
+ initindex = ctl_get_initindex(&io->io_hdr.nexus);
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+ if (initindex == i)
+ continue;
+ lun->pending_sense[i].ua_pending |= ua_type;
+ }
+#endif
+
+ /*
+ * A reset (any kind, really) clears reservations established with
+ * RESERVE/RELEASE. It does not clear reservations established
+ * with PERSISTENT RESERVE OUT, but we don't support that at the
+ * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address
+ * reservations made with the RESERVE/RELEASE commands, because
+ * those commands are obsolete in SPC-3.
+ */
+ lun->flags &= ~CTL_LUN_RESERVED;
+
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+ ctl_clear_mask(lun->have_ca, i);
+ lun->pending_sense[i].ua_pending |= ua_type;
+ }
+
+ return (0);
+}
+
+static int
+ctl_abort_task(union ctl_io *io)
+{
+ union ctl_io *xio;
+ struct ctl_lun *lun;
+ struct ctl_softc *ctl_softc;
+#if 0
+ struct sbuf sb;
+ char printbuf[128];
+#endif
+ int found;
+
+ ctl_softc = control_softc;
+ found = 0;
+
+ /*
+ * Look up the LUN.
+ */
+ if ((io->io_hdr.nexus.targ_lun < CTL_MAX_LUNS)
+ && (ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun] != NULL))
+ lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun];
+ else
+ goto bailout;
+
+#if 0
+ printf("ctl_abort_task: called for lun %lld, tag %d type %d\n",
+ lun->lun, io->taskio.tag_num, io->taskio.tag_type);
+#endif
+
+ /*
+ * Run through the OOA queue and attempt to find the given I/O.
+ * The target port, initiator ID, tag type and tag number have to
+ * match the values that we got from the initiator. If we have an
+ * untagged command to abort, simply abort the first untagged command
+ * we come to. We only allow one untagged command at a time of course.
+ */
+#if 0
+ TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) {
+#endif
+ for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
+ xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
+#if 0
+ sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN);
+
+ sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ",
+ lun->lun, xio->scsiio.tag_num,
+ xio->scsiio.tag_type,
+ (xio->io_hdr.blocked_links.tqe_prev
+ == NULL) ? "" : " BLOCKED",
+ (xio->io_hdr.flags &
+ CTL_FLAG_DMA_INPROG) ? " DMA" : "",
+ (xio->io_hdr.flags &
+ CTL_FLAG_ABORT) ? " ABORT" : ""),
+ (xio->io_hdr.flags &
+ CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "");
+ ctl_scsi_command_string(&xio->scsiio, NULL, &sb);
+ sbuf_finish(&sb);
+ printf("%s\n", sbuf_data(&sb));
+#endif
+
+ if ((xio->io_hdr.nexus.targ_port == io->io_hdr.nexus.targ_port)
+ && (xio->io_hdr.nexus.initid.id ==
+ io->io_hdr.nexus.initid.id)) {
+ /*
+ * If the abort says that the task is untagged, the
+ * task in the queue must be untagged. Otherwise,
+ * we just check to see whether the tag numbers
+ * match. This is because the QLogic firmware
+ * doesn't pass back the tag type in an abort
+ * request.
+ */
+#if 0
+ if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED)
+ && (io->taskio.tag_type == CTL_TAG_UNTAGGED))
+ || (xio->scsiio.tag_num == io->taskio.tag_num)) {
+#endif
+ /*
+ * XXX KDM we've got problems with FC, because it
+ * doesn't send down a tag type with aborts. So we
+ * can only really go by the tag number...
+ * This may cause problems with parallel SCSI.
+ * Need to figure that out!!
+ */
+ if (xio->scsiio.tag_num == io->taskio.tag_num) {
+ xio->io_hdr.flags |= CTL_FLAG_ABORT;
+ found = 1;
+ if ((io->io_hdr.flags &
+ CTL_FLAG_FROM_OTHER_SC) == 0 &&
+ !(lun->flags & CTL_LUN_PRIMARY_SC)) {
+ union ctl_ha_msg msg_info;
+
+ io->io_hdr.flags |=
+ CTL_FLAG_SENT_2OTHER_SC;
+ msg_info.hdr.nexus = io->io_hdr.nexus;
+ msg_info.task.task_action =
+ CTL_TASK_ABORT_TASK;
+ msg_info.task.tag_num =
+ io->taskio.tag_num;
+ msg_info.task.tag_type =
+ io->taskio.tag_type;
+ msg_info.hdr.msg_type =
+ CTL_MSG_MANAGE_TASKS;
+ msg_info.hdr.original_sc = NULL;
+ msg_info.hdr.serializing_sc = NULL;
+#if 0
+ printf("Sent Abort to other side\n");
+#endif
+ if (CTL_HA_STATUS_SUCCESS !=
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ (void *)&msg_info,
+ sizeof(msg_info), 0)) {
+ }
+ }
+#if 0
+ printf("ctl_abort_task: found I/O to abort\n");
+#endif
+ break;
+ }
+ }
+ }
+
+bailout:
+
+ if (found == 0) {
+ /*
+ * This isn't really an error. It's entirely possible for
+ * the abort and command completion to cross on the wire.
+ * This is more of an informative/diagnostic error.
+ */
+#if 0
+ printf("ctl_abort_task: ABORT sent for nonexistent I/O: "
+ "%d:%d:%d:%d tag %d type %d\n",
+ io->io_hdr.nexus.initid.id,
+ io->io_hdr.nexus.targ_port,
+ io->io_hdr.nexus.targ_target.id,
+ io->io_hdr.nexus.targ_lun, io->taskio.tag_num,
+ io->taskio.tag_type);
+#endif
+ return (1);
+ } else
+ return (0);
+}
+
+/*
+ * Assumptions: caller holds ctl_softc->ctl_lock
+ *
+ * This routine cannot block! It must be callable from an interrupt
+ * handler as well as from the work thread.
+ */
+static void
+ctl_run_task_queue(struct ctl_softc *ctl_softc)
+{
+ union ctl_io *io, *next_io;
+
+ CTL_DEBUG_PRINT(("ctl_run_task_queue\n"));
+
+ for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->task_queue);
+ io != NULL; io = next_io) {
+ int retval;
+ const char *task_desc;
+
+ next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
+
+ retval = 0;
+
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_TASK: {
+ task_desc = ctl_scsi_task_string(&io->taskio);
+ if (task_desc != NULL) {
+#ifdef NEEDTOPORT
+ csevent_log(CSC_CTL | CSC_SHELF_SW |
+ CTL_TASK_REPORT,
+ csevent_LogType_Trace,
+ csevent_Severity_Information,
+ csevent_AlertLevel_Green,
+ csevent_FRU_Firmware,
+ csevent_FRU_Unknown,
+ "CTL: received task: %s",task_desc);
+#endif
+ } else {
+#ifdef NEEDTOPORT
+ csevent_log(CSC_CTL | CSC_SHELF_SW |
+ CTL_TASK_REPORT,
+ csevent_LogType_Trace,
+ csevent_Severity_Information,
+ csevent_AlertLevel_Green,
+ csevent_FRU_Firmware,
+ csevent_FRU_Unknown,
+ "CTL: received unknown task "
+ "type: %d (%#x)",
+ io->taskio.task_action,
+ io->taskio.task_action);
+#endif
+ }
+ switch (io->taskio.task_action) {
+ case CTL_TASK_ABORT_TASK:
+ retval = ctl_abort_task(io);
+ break;
+ case CTL_TASK_ABORT_TASK_SET:
+ break;
+ case CTL_TASK_CLEAR_ACA:
+ break;
+ case CTL_TASK_CLEAR_TASK_SET:
+ break;
+ case CTL_TASK_LUN_RESET: {
+ struct ctl_lun *lun;
+ uint32_t targ_lun;
+ int retval;
+
+ targ_lun = io->io_hdr.nexus.targ_lun;
+
+ if ((targ_lun < CTL_MAX_LUNS)
+ && (ctl_softc->ctl_luns[targ_lun] != NULL))
+ lun = ctl_softc->ctl_luns[targ_lun];
+ else {
+ retval = 1;
+ break;
+ }
+
+ if (!(io->io_hdr.flags &
+ CTL_FLAG_FROM_OTHER_SC)) {
+ union ctl_ha_msg msg_info;
+
+ io->io_hdr.flags |=
+ CTL_FLAG_SENT_2OTHER_SC;
+ msg_info.hdr.msg_type =
+ CTL_MSG_MANAGE_TASKS;
+ msg_info.hdr.nexus = io->io_hdr.nexus;
+ msg_info.task.task_action =
+ CTL_TASK_LUN_RESET;
+ msg_info.hdr.original_sc = NULL;
+ msg_info.hdr.serializing_sc = NULL;
+ if (CTL_HA_STATUS_SUCCESS !=
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ (void *)&msg_info,
+ sizeof(msg_info), 0)) {
+ }
+ }
+
+ retval = ctl_lun_reset(lun, io,
+ CTL_UA_LUN_RESET);
+ break;
+ }
+ case CTL_TASK_TARGET_RESET:
+ retval = ctl_target_reset(ctl_softc, io,
+ CTL_UA_TARG_RESET);
+ break;
+ case CTL_TASK_BUS_RESET:
+ retval = ctl_bus_reset(ctl_softc, io);
+ break;
+ case CTL_TASK_PORT_LOGIN:
+ break;
+ case CTL_TASK_PORT_LOGOUT:
+ break;
+ default:
+ printf("ctl_run_task_queue: got unknown task "
+ "management event %d\n",
+ io->taskio.task_action);
+ break;
+ }
+ if (retval == 0)
+ io->io_hdr.status = CTL_SUCCESS;
+ else
+ io->io_hdr.status = CTL_ERROR;
+
+ STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
+ ctl_io_hdr, links);
+ /*
+ * This will queue this I/O to the done queue, but the
+ * work thread won't be able to process it until we
+ * return and the lock is released.
+ */
+ ctl_done_lock(io, /*have_lock*/ 1);
+ break;
+ }
+ default: {
+
+ printf("%s: invalid I/O type %d msg %d cdb %x"
+ " iptl: %ju:%d:%ju:%d tag 0x%04x\n",
+ __func__, io->io_hdr.io_type,
+ io->io_hdr.msg_type, io->scsiio.cdb[0],
+ (uintmax_t)io->io_hdr.nexus.initid.id,
+ io->io_hdr.nexus.targ_port,
+ (uintmax_t)io->io_hdr.nexus.targ_target.id,
+ io->io_hdr.nexus.targ_lun,
+ (io->io_hdr.io_type == CTL_IO_TASK) ?
+ io->taskio.tag_num : io->scsiio.tag_num);
+ STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
+ ctl_io_hdr, links);
+ ctl_free_io_internal(io, 1);
+ break;
+ }
+ }
+ }
+
+ ctl_softc->flags &= ~CTL_FLAG_TASK_PENDING;
+}
+
+/*
+ * For HA operation. Handle commands that come in from the other
+ * controller.
+ */
+static void
+ctl_handle_isc(union ctl_io *io)
+{
+ int free_io;
+ struct ctl_lun *lun;
+ struct ctl_softc *ctl_softc;
+
+ ctl_softc = control_softc;
+
+ lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun];
+
+ switch (io->io_hdr.msg_type) {
+ case CTL_MSG_SERIALIZE:
+ free_io = ctl_serialize_other_sc_cmd(&io->scsiio,
+ /*have_lock*/ 0);
+ break;
+ case CTL_MSG_R2R: {
+ uint8_t opcode;
+ struct ctl_cmd_entry *entry;
+
+ /*
+ * This is only used in SER_ONLY mode.
+ */
+ free_io = 0;
+ opcode = io->scsiio.cdb[0];
+ entry = &ctl_cmd_table[opcode];
+ mtx_lock(&ctl_softc->ctl_lock);
+ if (ctl_scsiio_lun_check(ctl_softc, lun,
+ entry, (struct ctl_scsiio *)io) != 0) {
+ ctl_done_lock(io, /*have_lock*/ 1);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ break;
+ }
+ io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
+ STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
+ &io->io_hdr, links);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ break;
+ }
+ case CTL_MSG_FINISH_IO:
+ if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
+ free_io = 0;
+ ctl_done_lock(io, /*have_lock*/ 0);
+ } else {
+ free_io = 1;
+ mtx_lock(&ctl_softc->ctl_lock);
+ TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr,
+ ooa_links);
+ STAILQ_REMOVE(&ctl_softc->task_queue,
+ &io->io_hdr, ctl_io_hdr, links);
+ ctl_check_blocked(lun);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ }
+ break;
+ case CTL_MSG_PERS_ACTION:
+ ctl_hndl_per_res_out_on_other_sc(
+ (union ctl_ha_msg *)&io->presio.pr_msg);
+ free_io = 1;
+ break;
+ case CTL_MSG_BAD_JUJU:
+ free_io = 0;
+ ctl_done_lock(io, /*have_lock*/ 0);
+ break;
+ case CTL_MSG_DATAMOVE:
+ /* Only used in XFER mode */
+ free_io = 0;
+ ctl_datamove_remote(io);
+ break;
+ case CTL_MSG_DATAMOVE_DONE:
+ /* Only used in XFER mode */
+ free_io = 0;
+ io->scsiio.be_move_done(io);
+ break;
+ default:
+ free_io = 1;
+ printf("%s: Invalid message type %d\n",
+ __func__, io->io_hdr.msg_type);
+ break;
+ }
+ if (free_io)
+ ctl_free_io_internal(io, 0);
+
+}
+
+
+/*
+ * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if
+ * there is no match.
+ */
+static ctl_lun_error_pattern
+ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
+{
+ struct ctl_cmd_entry *entry;
+ ctl_lun_error_pattern filtered_pattern, pattern;
+ uint8_t opcode;
+
+ pattern = desc->error_pattern;
+
+ /*
+ * XXX KDM we need more data passed into this function to match a
+ * custom pattern, and we actually need to implement custom pattern
+ * matching.
+ */
+ if (pattern & CTL_LUN_PAT_CMD)
+ return (CTL_LUN_PAT_CMD);
+
+ if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY)
+ return (CTL_LUN_PAT_ANY);
+
+ opcode = ctsio->cdb[0];
+ entry = &ctl_cmd_table[opcode];
+
+ filtered_pattern = entry->pattern & pattern;
+
+ /*
+ * If the user requested specific flags in the pattern (e.g.
+ * CTL_LUN_PAT_RANGE), make sure the command supports all of those
+ * flags.
+ *
+ * If the user did not specify any flags, it doesn't matter whether
+ * or not the command supports the flags.
+ */
+ if ((filtered_pattern & ~CTL_LUN_PAT_MASK) !=
+ (pattern & ~CTL_LUN_PAT_MASK))
+ return (CTL_LUN_PAT_NONE);
+
+ /*
+ * If the user asked for a range check, see if the requested LBA
+ * range overlaps with this command's LBA range.
+ */
+ if (filtered_pattern & CTL_LUN_PAT_RANGE) {
+ uint64_t lba1;
+ uint32_t len1;
+ ctl_action action;
+ int retval;
+
+ retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1);
+ if (retval != 0)
+ return (CTL_LUN_PAT_NONE);
+
+ action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba,
+ desc->lba_range.len);
+ /*
+ * A "pass" means that the LBA ranges don't overlap, so
+ * this doesn't match the user's range criteria.
+ */
+ if (action == CTL_ACTION_PASS)
+ return (CTL_LUN_PAT_NONE);
+ }
+
+ return (filtered_pattern);
+}
+
+/*
+ * Called with the CTL lock held.
+ */
+static void
+ctl_inject_error(struct ctl_lun *lun, union ctl_io *io)
+{
+ struct ctl_error_desc *desc, *desc2;
+
+ STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
+ ctl_lun_error_pattern pattern;
+ /*
+ * Check to see whether this particular command matches
+ * the pattern in the descriptor.
+ */
+ pattern = ctl_cmd_pattern_match(&io->scsiio, desc);
+ if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE)
+ continue;
+
+ switch (desc->lun_error & CTL_LUN_INJ_TYPE) {
+ case CTL_LUN_INJ_ABORTED:
+ ctl_set_aborted(&io->scsiio);
+ break;
+ case CTL_LUN_INJ_MEDIUM_ERR:
+ ctl_set_medium_error(&io->scsiio);
+ break;
+ case CTL_LUN_INJ_UA:
+ /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET
+ * OCCURRED */
+ ctl_set_ua(&io->scsiio, 0x29, 0x00);
+ break;
+ case CTL_LUN_INJ_CUSTOM:
+ /*
+ * We're assuming the user knows what he is doing.
+ * Just copy the sense information without doing
+ * checks.
+ */
+ bcopy(&desc->custom_sense, &io->scsiio.sense_data,
+ ctl_min(sizeof(desc->custom_sense),
+ sizeof(io->scsiio.sense_data)));
+ io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND;
+ io->scsiio.sense_len = SSD_FULL_SIZE;
+ io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+ break;
+ case CTL_LUN_INJ_NONE:
+ default:
+ /*
+ * If this is an error injection type we don't know
+ * about, clear the continuous flag (if it is set)
+ * so it will get deleted below.
+ */
+ desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS;
+ break;
+ }
+ /*
+ * By default, each error injection action is a one-shot
+ */
+ if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS)
+ continue;
+
+ STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links);
+
+ free(desc, M_CTL);
+ }
+}
+
+#ifdef CTL_IO_DELAY
+static void
+ctl_datamove_timer_wakeup(void *arg)
+{
+ union ctl_io *io;
+
+ io = (union ctl_io *)arg;
+
+ ctl_datamove(io);
+}
+#endif /* CTL_IO_DELAY */
+
+/*
+ * Assumption: caller does NOT hold ctl_lock
+ */
+void
+ctl_datamove(union ctl_io *io)
+{
+ void (*fe_datamove)(union ctl_io *io);
+
+ CTL_DEBUG_PRINT(("ctl_datamove\n"));
+
+#ifdef CTL_TIME_IO
+ if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
+ char str[256];
+ char path_str[64];
+ struct sbuf sb;
+
+ ctl_scsi_path_string(io, path_str, sizeof(path_str));
+ sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
+
+ sbuf_cat(&sb, path_str);
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI:
+ ctl_scsi_command_string(&io->scsiio, NULL, &sb);
+ sbuf_printf(&sb, "\n");
+ sbuf_cat(&sb, path_str);
+ sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
+ io->scsiio.tag_num, io->scsiio.tag_type);
+ break;
+ case CTL_IO_TASK:
+ sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
+ "Tag Type: %d\n", io->taskio.task_action,
+ io->taskio.tag_num, io->taskio.tag_type);
+ break;
+ default:
+ printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
+ panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
+ break;
+ }
+ sbuf_cat(&sb, path_str);
+ sbuf_printf(&sb, "ctl_datamove: %jd seconds\n",
+ (intmax_t)time_uptime - io->io_hdr.start_time);
+ sbuf_finish(&sb);
+ printf("%s", sbuf_data(&sb));
+ }
+#endif /* CTL_TIME_IO */
+
+ mtx_lock(&control_softc->ctl_lock);
+#ifdef CTL_IO_DELAY
+ if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
+ struct ctl_lun *lun;
+
+ lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
+ } else {
+ struct ctl_lun *lun;
+
+ lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ if ((lun != NULL)
+ && (lun->delay_info.datamove_delay > 0)) {
+ struct callout *callout;
+
+ callout = (struct callout *)&io->io_hdr.timer_bytes;
+ callout_init(callout, /*mpsafe*/ 1);
+ io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
+ callout_reset(callout,
+ lun->delay_info.datamove_delay * hz,
+ ctl_datamove_timer_wakeup, io);
+ if (lun->delay_info.datamove_type ==
+ CTL_DELAY_TYPE_ONESHOT)
+ lun->delay_info.datamove_delay = 0;
+ mtx_unlock(&control_softc->ctl_lock);
+ return;
+ }
+ }
+#endif
+ /*
+ * If we have any pending task management commands, process them
+ * first. This is necessary to eliminate a race condition with the
+ * FETD:
+ *
+ * - FETD submits a task management command, like an abort.
+ * - Back end calls fe_datamove() to move the data for the aborted
+ * command. The FETD can't really accept it, but if it did, it
+ * would end up transmitting data for a command that the initiator
+ * told us to abort.
+ *
+ * We close the race by processing all pending task management
+ * commands here (we can't block!), and then check this I/O to see
+ * if it has been aborted. If so, return it to the back end with
+ * bad status, so the back end can say return an error to the back end
+ * and then when the back end returns an error, we can return the
+ * aborted command to the FETD, so it can clean up its resources.
+ */
+ if (control_softc->flags & CTL_FLAG_TASK_PENDING)
+ ctl_run_task_queue(control_softc);
+
+ /*
+ * This command has been aborted. Set the port status, so we fail
+ * the data move.
+ */
+ if (io->io_hdr.flags & CTL_FLAG_ABORT) {
+ printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n",
+ io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id,
+ io->io_hdr.nexus.targ_port,
+ (uintmax_t)io->io_hdr.nexus.targ_target.id,
+ io->io_hdr.nexus.targ_lun);
+ io->io_hdr.status = CTL_CMD_ABORTED;
+ io->io_hdr.port_status = 31337;
+ mtx_unlock(&control_softc->ctl_lock);
+ /*
+ * Note that the backend, in this case, will get the
+ * callback in its context. In other cases it may get
+ * called in the frontend's interrupt thread context.
+ */
+ io->scsiio.be_move_done(io);
+ return;
+ }
+
+ /*
+ * If we're in XFER mode and this I/O is from the other shelf
+ * controller, we need to send the DMA to the other side to
+ * actually transfer the data to/from the host. In serialize only
+ * mode the transfer happens below CTL and ctl_datamove() is only
+ * called on the machine that originally received the I/O.
+ */
+ if ((control_softc->ha_mode == CTL_HA_MODE_XFER)
+ && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
+ union ctl_ha_msg msg;
+ uint32_t sg_entries_sent;
+ int do_sg_copy;
+ int i;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.hdr.msg_type = CTL_MSG_DATAMOVE;
+ msg.hdr.original_sc = io->io_hdr.original_sc;
+ msg.hdr.serializing_sc = io;
+ msg.hdr.nexus = io->io_hdr.nexus;
+ msg.dt.flags = io->io_hdr.flags;
+ /*
+ * We convert everything into a S/G list here. We can't
+ * pass by reference, only by value between controllers.
+ * So we can't pass a pointer to the S/G list, only as many
+ * S/G entries as we can fit in here. If it's possible for
+ * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries,
+ * then we need to break this up into multiple transfers.
+ */
+ if (io->scsiio.kern_sg_entries == 0) {
+ msg.dt.kern_sg_entries = 1;
+ /*
+ * If this is in cached memory, flush the cache
+ * before we send the DMA request to the other
+ * controller. We want to do this in either the
+ * read or the write case. The read case is
+ * straightforward. In the write case, we want to
+ * make sure nothing is in the local cache that
+ * could overwrite the DMAed data.
+ */
+ if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
+ /*
+ * XXX KDM use bus_dmamap_sync() here.
+ */
+ }
+
+ /*
+ * Convert to a physical address if this is a
+ * virtual address.
+ */
+ if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
+ msg.dt.sg_list[0].addr =
+ io->scsiio.kern_data_ptr;
+ } else {
+ /*
+ * XXX KDM use busdma here!
+ */
+#if 0
+ msg.dt.sg_list[0].addr = (void *)
+ vtophys(io->scsiio.kern_data_ptr);
+#endif
+ }
+
+ msg.dt.sg_list[0].len = io->scsiio.kern_data_len;
+ do_sg_copy = 0;
+ } else {
+ struct ctl_sg_entry *sgl;
+
+ do_sg_copy = 1;
+ msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries;
+ sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
+ if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
+ /*
+ * XXX KDM use bus_dmamap_sync() here.
+ */
+ }
+ }
+
+ msg.dt.kern_data_len = io->scsiio.kern_data_len;
+ msg.dt.kern_total_len = io->scsiio.kern_total_len;
+ msg.dt.kern_data_resid = io->scsiio.kern_data_resid;
+ msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset;
+ msg.dt.sg_sequence = 0;
+
+ /*
+ * Loop until we've sent all of the S/G entries. On the
+ * other end, we'll recompose these S/G entries into one
+ * contiguous list before passing it to the
+ */
+ for (sg_entries_sent = 0; sg_entries_sent <
+ msg.dt.kern_sg_entries; msg.dt.sg_sequence++) {
+ msg.dt.cur_sg_entries = ctl_min((sizeof(msg.dt.sg_list)/
+ sizeof(msg.dt.sg_list[0])),
+ msg.dt.kern_sg_entries - sg_entries_sent);
+
+ if (do_sg_copy != 0) {
+ struct ctl_sg_entry *sgl;
+ int j;
+
+ sgl = (struct ctl_sg_entry *)
+ io->scsiio.kern_data_ptr;
+ /*
+ * If this is in cached memory, flush the cache
+ * before we send the DMA request to the other
+ * controller. We want to do this in either
+ * the * read or the write case. The read
+ * case is straightforward. In the write
+ * case, we want to make sure nothing is
+ * in the local cache that could overwrite
+ * the DMAed data.
+ */
+
+ for (i = sg_entries_sent, j = 0;
+ i < msg.dt.cur_sg_entries; i++, j++) {
+ if ((io->io_hdr.flags &
+ CTL_FLAG_NO_DATASYNC) == 0) {
+ /*
+ * XXX KDM use bus_dmamap_sync()
+ */
+ }
+ if ((io->io_hdr.flags &
+ CTL_FLAG_BUS_ADDR) == 0) {
+ /*
+ * XXX KDM use busdma.
+ */
+#if 0
+ msg.dt.sg_list[j].addr =(void *)
+ vtophys(sgl[i].addr);
+#endif
+ } else {
+ msg.dt.sg_list[j].addr =
+ sgl[i].addr;
+ }
+ msg.dt.sg_list[j].len = sgl[i].len;
+ }
+ }
+
+ sg_entries_sent += msg.dt.cur_sg_entries;
+ if (sg_entries_sent >= msg.dt.kern_sg_entries)
+ msg.dt.sg_last = 1;
+ else
+ msg.dt.sg_last = 0;
+
+ /*
+ * XXX KDM drop and reacquire the lock here?
+ */
+ if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+ sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
+ /*
+ * XXX do something here.
+ */
+ }
+
+ msg.dt.sent_sg_entries = sg_entries_sent;
+ }
+ io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
+ if (io->io_hdr.flags & CTL_FLAG_FAILOVER)
+ ctl_failover_io(io, /*have_lock*/ 1);
+
+ } else {
+
+ /*
+ * Lookup the fe_datamove() function for this particular
+ * front end.
+ */
+ fe_datamove =
+ control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
+ mtx_unlock(&control_softc->ctl_lock);
+
+ fe_datamove(io);
+ }
+}
+
+static void
+ctl_send_datamove_done(union ctl_io *io, int have_lock)
+{
+ union ctl_ha_msg msg;
+ int isc_status;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
+ msg.hdr.original_sc = io;
+ msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
+ msg.hdr.nexus = io->io_hdr.nexus;
+ msg.hdr.status = io->io_hdr.status;
+ msg.scsi.tag_num = io->scsiio.tag_num;
+ msg.scsi.tag_type = io->scsiio.tag_type;
+ msg.scsi.scsi_status = io->scsiio.scsi_status;
+ memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
+ sizeof(io->scsiio.sense_data));
+ msg.scsi.sense_len = io->scsiio.sense_len;
+ msg.scsi.sense_residual = io->scsiio.sense_residual;
+ msg.scsi.fetd_status = io->io_hdr.port_status;
+ msg.scsi.residual = io->scsiio.residual;
+ io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
+
+ if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
+ ctl_failover_io(io, /*have_lock*/ have_lock);
+ return;
+ }
+
+ isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0);
+ if (isc_status > CTL_HA_STATUS_SUCCESS) {
+ /* XXX do something if this fails */
+ }
+
+}
+
+/*
+ * The DMA to the remote side is done, now we need to tell the other side
+ * we're done so it can continue with its data movement.
+ */
+static void
+ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq)
+{
+ union ctl_io *io;
+
+ io = rq->context;
+
+ if (rq->ret != CTL_HA_STATUS_SUCCESS) {
+ printf("%s: ISC DMA write failed with error %d", __func__,
+ rq->ret);
+ ctl_set_internal_failure(&io->scsiio,
+ /*sks_valid*/ 1,
+ /*retry_count*/ rq->ret);
+ }
+
+ ctl_dt_req_free(rq);
+
+ /*
+ * In this case, we had to malloc the memory locally. Free it.
+ */
+ if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
+ int i;
+ for (i = 0; i < io->scsiio.kern_sg_entries; i++)
+ free(io->io_hdr.local_sglist[i].addr, M_CTL);
+ }
+ /*
+ * The data is in local and remote memory, so now we need to send
+ * status (good or back) back to the other side.
+ */
+ ctl_send_datamove_done(io, /*have_lock*/ 0);
+}
+
+/*
+ * We've moved the data from the host/controller into local memory. Now we
+ * need to push it over to the remote controller's memory.
+ */
+static int
+ctl_datamove_remote_dm_write_cb(union ctl_io *io)
+{
+ int retval;
+
+ retval = 0;
+
+ retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE,
+ ctl_datamove_remote_write_cb);
+
+ return (retval);
+}
+
+static void
+ctl_datamove_remote_write(union ctl_io *io)
+{
+ int retval;
+ void (*fe_datamove)(union ctl_io *io);
+
+ /*
+ * - Get the data from the host/HBA into local memory.
+ * - DMA memory from the local controller to the remote controller.
+ * - Send status back to the remote controller.
+ */
+
+ retval = ctl_datamove_remote_sgl_setup(io);
+ if (retval != 0)
+ return;
+
+ /* Switch the pointer over so the FETD knows what to do */
+ io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
+
+ /*
+ * Use a custom move done callback, since we need to send completion
+ * back to the other controller, not to the backend on this side.
+ */
+ io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
+
+ fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
+
+ fe_datamove(io);
+
+ return;
+
+}
+
+static int
+ctl_datamove_remote_dm_read_cb(union ctl_io *io)
+{
+#if 0
+ char str[256];
+ char path_str[64];
+ struct sbuf sb;
+#endif
+
+ /*
+ * In this case, we had to malloc the memory locally. Free it.
+ */
+ if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
+ int i;
+ for (i = 0; i < io->scsiio.kern_sg_entries; i++)
+ free(io->io_hdr.local_sglist[i].addr, M_CTL);
+ }
+
+#if 0
+ scsi_path_string(io, path_str, sizeof(path_str));
+ sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
+ sbuf_cat(&sb, path_str);
+ scsi_command_string(&io->scsiio, NULL, &sb);
+ sbuf_printf(&sb, "\n");
+ sbuf_cat(&sb, path_str);
+ sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
+ io->scsiio.tag_num, io->scsiio.tag_type);
+ sbuf_cat(&sb, path_str);
+ sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__,
+ io->io_hdr.flags, io->io_hdr.status);
+ sbuf_finish(&sb);
+ printk("%s", sbuf_data(&sb));
+#endif
+
+
+ /*
+ * The read is done, now we need to send status (good or bad) back
+ * to the other side.
+ */
+ ctl_send_datamove_done(io, /*have_lock*/ 0);
+
+ return (0);
+}
+
+static void
+ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq)
+{
+ union ctl_io *io;
+ void (*fe_datamove)(union ctl_io *io);
+
+ io = rq->context;
+
+ if (rq->ret != CTL_HA_STATUS_SUCCESS) {
+ printf("%s: ISC DMA read failed with error %d", __func__,
+ rq->ret);
+ ctl_set_internal_failure(&io->scsiio,
+ /*sks_valid*/ 1,
+ /*retry_count*/ rq->ret);
+ }
+
+ ctl_dt_req_free(rq);
+
+ /* Switch the pointer over so the FETD knows what to do */
+ io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
+
+ /*
+ * Use a custom move done callback, since we need to send completion
+ * back to the other controller, not to the backend on this side.
+ */
+ io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb;
+
+ /* XXX KDM add checks like the ones in ctl_datamove? */
+
+ fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
+
+ fe_datamove(io);
+}
+
+static int
+ctl_datamove_remote_sgl_setup(union ctl_io *io)
+{
+ struct ctl_sg_entry *local_sglist, *remote_sglist;
+ struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist;
+ struct ctl_softc *softc;
+ int retval;
+ int i;
+
+ retval = 0;
+ softc = control_softc;
+
+ local_sglist = io->io_hdr.local_sglist;
+ local_dma_sglist = io->io_hdr.local_dma_sglist;
+ remote_sglist = io->io_hdr.remote_sglist;
+ remote_dma_sglist = io->io_hdr.remote_dma_sglist;
+
+ if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) {
+ for (i = 0; i < io->scsiio.kern_sg_entries; i++) {
+ local_sglist[i].len = remote_sglist[i].len;
+
+ /*
+ * XXX Detect the situation where the RS-level I/O
+ * redirector on the other side has already read the
+ * data off of the AOR RS on this side, and
+ * transferred it to remote (mirror) memory on the
+ * other side. Since we already have the data in
+ * memory here, we just need to use it.
+ *
+ * XXX KDM this can probably be removed once we
+ * get the cache device code in and take the
+ * current AOR implementation out.
+ */
+#ifdef NEEDTOPORT
+ if ((remote_sglist[i].addr >=
+ (void *)vtophys(softc->mirr->addr))
+ && (remote_sglist[i].addr <
+ ((void *)vtophys(softc->mirr->addr) +
+ CacheMirrorOffset))) {
+ local_sglist[i].addr = remote_sglist[i].addr -
+ CacheMirrorOffset;
+ if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+ CTL_FLAG_DATA_IN)
+ io->io_hdr.flags |= CTL_FLAG_REDIR_DONE;
+ } else {
+ local_sglist[i].addr = remote_sglist[i].addr +
+ CacheMirrorOffset;
+ }
+#endif
+#if 0
+ printf("%s: local %p, remote %p, len %d\n",
+ __func__, local_sglist[i].addr,
+ remote_sglist[i].addr, local_sglist[i].len);
+#endif
+ }
+ } else {
+ uint32_t len_to_go;
+
+ /*
+ * In this case, we don't have automatically allocated
+ * memory for this I/O on this controller. This typically
+ * happens with internal CTL I/O -- e.g. inquiry, mode
+ * sense, etc. Anything coming from RAIDCore will have
+ * a mirror area available.
+ */
+ len_to_go = io->scsiio.kern_data_len;
+
+ /*
+ * Clear the no datasync flag, we have to use malloced
+ * buffers.
+ */
+ io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC;
+
+ /*
+ * The difficult thing here is that the size of the various
+ * S/G segments may be different than the size from the
+ * remote controller. That'll make it harder when DMAing
+ * the data back to the other side.
+ */
+ for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) /
+ sizeof(io->io_hdr.remote_sglist[0])) &&
+ (len_to_go > 0); i++) {
+ local_sglist[i].len = ctl_min(len_to_go, 131072);
+ CTL_SIZE_8B(local_dma_sglist[i].len,
+ local_sglist[i].len);
+ local_sglist[i].addr =
+ malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK);
+
+ local_dma_sglist[i].addr = local_sglist[i].addr;
+
+ if (local_sglist[i].addr == NULL) {
+ int j;
+
+ printf("malloc failed for %zd bytes!",
+ local_dma_sglist[i].len);
+ for (j = 0; j < i; j++) {
+ free(local_sglist[j].addr, M_CTL);
+ }
+ ctl_set_internal_failure(&io->scsiio,
+ /*sks_valid*/ 1,
+ /*retry_count*/ 4857);
+ retval = 1;
+ goto bailout_error;
+
+ }
+ /* XXX KDM do we need a sync here? */
+
+ len_to_go -= local_sglist[i].len;
+ }
+ /*
+ * Reset the number of S/G entries accordingly. The
+ * original number of S/G entries is available in
+ * rem_sg_entries.
+ */
+ io->scsiio.kern_sg_entries = i;
+
+#if 0
+ printf("%s: kern_sg_entries = %d\n", __func__,
+ io->scsiio.kern_sg_entries);
+ for (i = 0; i < io->scsiio.kern_sg_entries; i++)
+ printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i,
+ local_sglist[i].addr, local_sglist[i].len,
+ local_dma_sglist[i].len);
+#endif
+ }
+
+
+ return (retval);
+
+bailout_error:
+
+ ctl_send_datamove_done(io, /*have_lock*/ 0);
+
+ return (retval);
+}
+
+static int
+ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
+ ctl_ha_dt_cb callback)
+{
+ struct ctl_ha_dt_req *rq;
+ struct ctl_sg_entry *remote_sglist, *local_sglist;
+ struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist;
+ uint32_t local_used, remote_used, total_used;
+ int retval;
+ int i, j;
+
+ retval = 0;
+
+ rq = ctl_dt_req_alloc();
+
+ /*
+ * If we failed to allocate the request, and if the DMA didn't fail
+ * anyway, set busy status. This is just a resource allocation
+ * failure.
+ */
+ if ((rq == NULL)
+ && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE))
+ ctl_set_busy(&io->scsiio);
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) {
+
+ if (rq != NULL)
+ ctl_dt_req_free(rq);
+
+ /*
+ * The data move failed. We need to return status back
+ * to the other controller. No point in trying to DMA
+ * data to the remote controller.
+ */
+
+ ctl_send_datamove_done(io, /*have_lock*/ 0);
+
+ retval = 1;
+
+ goto bailout;
+ }
+
+ local_sglist = io->io_hdr.local_sglist;
+ local_dma_sglist = io->io_hdr.local_dma_sglist;
+ remote_sglist = io->io_hdr.remote_sglist;
+ remote_dma_sglist = io->io_hdr.remote_dma_sglist;
+ local_used = 0;
+ remote_used = 0;
+ total_used = 0;
+
+ if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) {
+ rq->ret = CTL_HA_STATUS_SUCCESS;
+ rq->context = io;
+ callback(rq);
+ goto bailout;
+ }
+
+ /*
+ * Pull/push the data over the wire from/to the other controller.
+ * This takes into account the possibility that the local and
+ * remote sglists may not be identical in terms of the size of
+ * the elements and the number of elements.
+ *
+ * One fundamental assumption here is that the length allocated for
+ * both the local and remote sglists is identical. Otherwise, we've
+ * essentially got a coding error of some sort.
+ */
+ for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) {
+ int isc_ret;
+ uint32_t cur_len, dma_length;
+ uint8_t *tmp_ptr;
+
+ rq->id = CTL_HA_DATA_CTL;
+ rq->command = command;
+ rq->context = io;
+
+ /*
+ * Both pointers should be aligned. But it is possible
+ * that the allocation length is not. They should both
+ * also have enough slack left over at the end, though,
+ * to round up to the next 8 byte boundary.
+ */
+ cur_len = ctl_min(local_sglist[i].len - local_used,
+ remote_sglist[j].len - remote_used);
+
+ /*
+ * In this case, we have a size issue and need to decrease
+ * the size, except in the case where we actually have less
+ * than 8 bytes left. In that case, we need to increase
+ * the DMA length to get the last bit.
+ */
+ if ((cur_len & 0x7) != 0) {
+ if (cur_len > 0x7) {
+ cur_len = cur_len - (cur_len & 0x7);
+ dma_length = cur_len;
+ } else {
+ CTL_SIZE_8B(dma_length, cur_len);
+ }
+
+ } else
+ dma_length = cur_len;
+
+ /*
+ * If we had to allocate memory for this I/O, instead of using
+ * the non-cached mirror memory, we'll need to flush the cache
+ * before trying to DMA to the other controller.
+ *
+ * We could end up doing this multiple times for the same
+ * segment if we have a larger local segment than remote
+ * segment. That shouldn't be an issue.
+ */
+ if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
+ /*
+ * XXX KDM use bus_dmamap_sync() here.
+ */
+ }
+
+ rq->size = dma_length;
+
+ tmp_ptr = (uint8_t *)local_sglist[i].addr;
+ tmp_ptr += local_used;
+
+ /* Use physical addresses when talking to ISC hardware */
+ if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) {
+ /* XXX KDM use busdma */
+#if 0
+ rq->local = vtophys(tmp_ptr);
+#endif
+ } else
+ rq->local = tmp_ptr;
+
+ tmp_ptr = (uint8_t *)remote_sglist[j].addr;
+ tmp_ptr += remote_used;
+ rq->remote = tmp_ptr;
+
+ rq->callback = NULL;
+
+ local_used += cur_len;
+ if (local_used >= local_sglist[i].len) {
+ i++;
+ local_used = 0;
+ }
+
+ remote_used += cur_len;
+ if (remote_used >= remote_sglist[j].len) {
+ j++;
+ remote_used = 0;
+ }
+ total_used += cur_len;
+
+ if (total_used >= io->scsiio.kern_data_len)
+ rq->callback = callback;
+
+ if ((rq->size & 0x7) != 0) {
+ printf("%s: warning: size %d is not on 8b boundary\n",
+ __func__, rq->size);
+ }
+ if (((uintptr_t)rq->local & 0x7) != 0) {
+ printf("%s: warning: local %p not on 8b boundary\n",
+ __func__, rq->local);
+ }
+ if (((uintptr_t)rq->remote & 0x7) != 0) {
+ printf("%s: warning: remote %p not on 8b boundary\n",
+ __func__, rq->local);
+ }
+#if 0
+ printf("%s: %s: local %#x remote %#x size %d\n", __func__,
+ (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ",
+ rq->local, rq->remote, rq->size);
+#endif
+
+ isc_ret = ctl_dt_single(rq);
+ if (isc_ret == CTL_HA_STATUS_WAIT)
+ continue;
+
+ if (isc_ret == CTL_HA_STATUS_DISCONNECT) {
+ rq->ret = CTL_HA_STATUS_SUCCESS;
+ } else {
+ rq->ret = isc_ret;
+ }
+ callback(rq);
+ goto bailout;
+ }
+
+bailout:
+ return (retval);
+
+}
+
+static void
+ctl_datamove_remote_read(union ctl_io *io)
+{
+ int retval;
+ int i;
+
+ /*
+ * This will send an error to the other controller in the case of a
+ * failure.
+ */
+ retval = ctl_datamove_remote_sgl_setup(io);
+ if (retval != 0)
+ return;
+
+ retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ,
+ ctl_datamove_remote_read_cb);
+ if ((retval != 0)
+ && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) {
+ /*
+ * Make sure we free memory if there was an error.. The
+ * ctl_datamove_remote_xfer() function will send the
+ * datamove done message, or call the callback with an
+ * error if there is a problem.
+ */
+ for (i = 0; i < io->scsiio.kern_sg_entries; i++)
+ free(io->io_hdr.local_sglist[i].addr, M_CTL);
+ }
+
+ return;
+}
+
+/*
+ * Process a datamove request from the other controller. This is used for
+ * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory
+ * first. Once that is complete, the data gets DMAed into the remote
+ * controller's memory. For reads, we DMA from the remote controller's
+ * memory into our memory first, and then move it out to the FETD.
+ *
+ * Should be called without the ctl_lock held.
+ */
+static void
+ctl_datamove_remote(union ctl_io *io)
+{
+ struct ctl_softc *softc;
+
+ softc = control_softc;
+
+ /*
+ * Note that we look for an aborted I/O here, but don't do some of
+ * the other checks that ctl_datamove() normally does. We don't
+ * need to run the task queue, because this I/O is on the ISC
+ * queue, which is executed by the work thread after the task queue.
+ * We don't need to run the datamove delay code, since that should
+ * have been done if need be on the other controller.
+ */
+ mtx_lock(&softc->ctl_lock);
+
+ if (io->io_hdr.flags & CTL_FLAG_ABORT) {
+
+ printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__,
+ io->scsiio.tag_num, io->io_hdr.nexus.initid.id,
+ io->io_hdr.nexus.targ_port,
+ io->io_hdr.nexus.targ_target.id,
+ io->io_hdr.nexus.targ_lun);
+ io->io_hdr.status = CTL_CMD_ABORTED;
+ io->io_hdr.port_status = 31338;
+
+ mtx_unlock(&softc->ctl_lock);
+
+ ctl_send_datamove_done(io, /*have_lock*/ 0);
+
+ return;
+ }
+
+ if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) {
+ mtx_unlock(&softc->ctl_lock);
+ ctl_datamove_remote_write(io);
+ } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){
+ mtx_unlock(&softc->ctl_lock);
+ ctl_datamove_remote_read(io);
+ } else {
+ union ctl_ha_msg msg;
+ struct scsi_sense_data *sense;
+ uint8_t sks[3];
+ int retry_count;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.hdr.msg_type = CTL_MSG_BAD_JUJU;
+ msg.hdr.status = CTL_SCSI_ERROR;
+ msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
+
+ retry_count = 4243;
+
+ sense = &msg.scsi.sense_data;
+ sks[0] = SSD_SCS_VALID;
+ sks[1] = (retry_count >> 8) & 0xff;
+ sks[2] = retry_count & 0xff;
+
+ /* "Internal target failure" */
+ scsi_set_sense_data(sense,
+ /*sense_format*/ SSD_TYPE_NONE,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
+ /*asc*/ 0x44,
+ /*ascq*/ 0x00,
+ /*type*/ SSD_ELEM_SKS,
+ /*size*/ sizeof(sks),
+ /*data*/ sks,
+ SSD_ELEM_NONE);
+
+ io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
+ if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
+ ctl_failover_io(io, /*have_lock*/ 1);
+ mtx_unlock(&softc->ctl_lock);
+ return;
+ }
+
+ mtx_unlock(&softc->ctl_lock);
+
+ if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) >
+ CTL_HA_STATUS_SUCCESS) {
+ /* XXX KDM what to do if this fails? */
+ }
+ return;
+ }
+
+}
+
+static int
+ctl_process_done(union ctl_io *io, int have_lock)
+{
+ struct ctl_lun *lun;
+ struct ctl_softc *ctl_softc;
+ void (*fe_done)(union ctl_io *io);
+ uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port);
+
+ CTL_DEBUG_PRINT(("ctl_process_done\n"));
+
+ fe_done =
+ control_softc->ctl_ports[targ_port]->fe_done;
+
+#ifdef CTL_TIME_IO
+ if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
+ char str[256];
+ char path_str[64];
+ struct sbuf sb;
+
+ ctl_scsi_path_string(io, path_str, sizeof(path_str));
+ sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
+
+ sbuf_cat(&sb, path_str);
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI:
+ ctl_scsi_command_string(&io->scsiio, NULL, &sb);
+ sbuf_printf(&sb, "\n");
+ sbuf_cat(&sb, path_str);
+ sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
+ io->scsiio.tag_num, io->scsiio.tag_type);
+ break;
+ case CTL_IO_TASK:
+ sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
+ "Tag Type: %d\n", io->taskio.task_action,
+ io->taskio.tag_num, io->taskio.tag_type);
+ break;
+ default:
+ printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
+ panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
+ break;
+ }
+ sbuf_cat(&sb, path_str);
+ sbuf_printf(&sb, "ctl_process_done: %jd seconds\n",
+ (intmax_t)time_uptime - io->io_hdr.start_time);
+ sbuf_finish(&sb);
+ printf("%s", sbuf_data(&sb));
+ }
+#endif /* CTL_TIME_IO */
+
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI:
+ break;
+ case CTL_IO_TASK:
+ ctl_io_error_print(io, NULL);
+ if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
+ ctl_free_io_internal(io, /*have_lock*/ 0);
+ else
+ fe_done(io);
+ return (CTL_RETVAL_COMPLETE);
+ break;
+ default:
+ printf("ctl_process_done: invalid io type %d\n",
+ io->io_hdr.io_type);
+ panic("ctl_process_done: invalid io type %d\n",
+ io->io_hdr.io_type);
+ break; /* NOTREACHED */
+ }
+
+ lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ if (lun == NULL) {
+ CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
+ io->io_hdr.nexus.targ_lun));
+ fe_done(io);
+ goto bailout;
+ }
+ ctl_softc = lun->ctl_softc;
+
+ /*
+ * Remove this from the OOA queue.
+ */
+ if (have_lock == 0)
+ mtx_lock(&ctl_softc->ctl_lock);
+
+ /*
+ * Check to see if we have any errors to inject here. We only
+ * inject errors for commands that don't already have errors set.
+ */
+ if ((STAILQ_FIRST(&lun->error_list) != NULL)
+ && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))
+ ctl_inject_error(lun, io);
+
+ /*
+ * XXX KDM how do we treat commands that aren't completed
+ * successfully?
+ *
+ * XXX KDM should we also track I/O latency?
+ */
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
+ uint32_t blocksize;
+#ifdef CTL_TIME_IO
+ struct bintime cur_bt;
+#endif
+
+ if ((lun->be_lun != NULL)
+ && (lun->be_lun->blocksize != 0))
+ blocksize = lun->be_lun->blocksize;
+ else
+ blocksize = 512;
+
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI: {
+ int isread;
+ struct ctl_lba_len lbalen;
+
+ isread = 0;
+ switch (io->scsiio.cdb[0]) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ isread = 1;
+ /* FALLTHROUGH */
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ case WRITE_VERIFY_10:
+ case WRITE_VERIFY_12:
+ case WRITE_VERIFY_16:
+ memcpy(&lbalen, io->io_hdr.ctl_private[
+ CTL_PRIV_LBA_LEN].bytes, sizeof(lbalen));
+
+ if (isread) {
+ lun->stats.ports[targ_port].bytes[CTL_STATS_READ] +=
+ lbalen.len * blocksize;
+ lun->stats.ports[targ_port].operations[CTL_STATS_READ]++;
+
+#ifdef CTL_TIME_IO
+ bintime_add(
+ &lun->stats.ports[targ_port].dma_time[CTL_STATS_READ],
+ &io->io_hdr.dma_bt);
+ lun->stats.ports[targ_port].num_dmas[CTL_STATS_READ] +=
+ io->io_hdr.num_dmas;
+ getbintime(&cur_bt);
+ bintime_sub(&cur_bt,
+ &io->io_hdr.start_bt);
+
+ bintime_add(
+ &lun->stats.ports[targ_port].time[CTL_STATS_READ],
+ &cur_bt);
+
+#if 0
+ cs_prof_gettime(&cur_ticks);
+ lun->stats.time[CTL_STATS_READ] +=
+ cur_ticks -
+ io->io_hdr.start_ticks;
+#endif
+#if 0
+ lun->stats.time[CTL_STATS_READ] +=
+ jiffies - io->io_hdr.start_time;
+#endif
+#endif /* CTL_TIME_IO */
+ } else {
+ lun->stats.ports[targ_port].bytes[CTL_STATS_WRITE] +=
+ lbalen.len * blocksize;
+ lun->stats.ports[targ_port].operations[
+ CTL_STATS_WRITE]++;
+
+#ifdef CTL_TIME_IO
+ bintime_add(
+ &lun->stats.ports[targ_port].dma_time[CTL_STATS_WRITE],
+ &io->io_hdr.dma_bt);
+ lun->stats.ports[targ_port].num_dmas[CTL_STATS_WRITE] +=
+ io->io_hdr.num_dmas;
+ getbintime(&cur_bt);
+ bintime_sub(&cur_bt,
+ &io->io_hdr.start_bt);
+
+ bintime_add(
+ &lun->stats.ports[targ_port].time[CTL_STATS_WRITE],
+ &cur_bt);
+#if 0
+ cs_prof_gettime(&cur_ticks);
+ lun->stats.ports[targ_port].time[CTL_STATS_WRITE] +=
+ cur_ticks -
+ io->io_hdr.start_ticks;
+ lun->stats.ports[targ_port].time[CTL_STATS_WRITE] +=
+ jiffies - io->io_hdr.start_time;
+#endif
+#endif /* CTL_TIME_IO */
+ }
+ break;
+ default:
+ lun->stats.ports[targ_port].operations[CTL_STATS_NO_IO]++;
+
+#ifdef CTL_TIME_IO
+ bintime_add(
+ &lun->stats.ports[targ_port].dma_time[CTL_STATS_NO_IO],
+ &io->io_hdr.dma_bt);
+ lun->stats.ports[targ_port].num_dmas[CTL_STATS_NO_IO] +=
+ io->io_hdr.num_dmas;
+ getbintime(&cur_bt);
+ bintime_sub(&cur_bt, &io->io_hdr.start_bt);
+
+ bintime_add(&lun->stats.ports[targ_port].time[CTL_STATS_NO_IO],
+ &cur_bt);
+
+#if 0
+ cs_prof_gettime(&cur_ticks);
+ lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] +=
+ cur_ticks -
+ io->io_hdr.start_ticks;
+ lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] +=
+ jiffies - io->io_hdr.start_time;
+#endif
+#endif /* CTL_TIME_IO */
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
+
+ /*
+ * Run through the blocked queue on this LUN and see if anything
+ * has become unblocked, now that this transaction is done.
+ */
+ ctl_check_blocked(lun);
+
+ /*
+ * If the LUN has been invalidated, free it if there is nothing
+ * left on its OOA queue.
+ */
+ if ((lun->flags & CTL_LUN_INVALID)
+ && (TAILQ_FIRST(&lun->ooa_queue) == NULL))
+ ctl_free_lun(lun);
+
+ /*
+ * If this command has been aborted, make sure we set the status
+ * properly. The FETD is responsible for freeing the I/O and doing
+ * whatever it needs to do to clean up its state.
+ */
+ if (io->io_hdr.flags & CTL_FLAG_ABORT)
+ io->io_hdr.status = CTL_CMD_ABORTED;
+
+ /*
+ * We print out status for every task management command. For SCSI
+ * commands, we filter out any unit attention errors; they happen
+ * on every boot, and would clutter up the log. Note: task
+ * management commands aren't printed here, they are printed above,
+ * since they should never even make it down here.
+ */
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI: {
+ int error_code, sense_key, asc, ascq;
+
+ sense_key = 0;
+
+ if (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR)
+ && (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) {
+ /*
+ * Since this is just for printing, no need to
+ * show errors here.
+ */
+ scsi_extract_sense_len(&io->scsiio.sense_data,
+ io->scsiio.sense_len,
+ &error_code,
+ &sense_key,
+ &asc,
+ &ascq,
+ /*show_errors*/ 0);
+ }
+
+ if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
+ && (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR)
+ || (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND)
+ || (sense_key != SSD_KEY_UNIT_ATTENTION))) {
+
+ if ((time_uptime - ctl_softc->last_print_jiffies) <= 0){
+ ctl_softc->skipped_prints++;
+ if (have_lock == 0)
+ mtx_unlock(&ctl_softc->ctl_lock);
+ } else {
+ uint32_t skipped_prints;
+
+ skipped_prints = ctl_softc->skipped_prints;
+
+ ctl_softc->skipped_prints = 0;
+ ctl_softc->last_print_jiffies = time_uptime;
+
+ if (have_lock == 0)
+ mtx_unlock(&ctl_softc->ctl_lock);
+ if (skipped_prints > 0) {
+#ifdef NEEDTOPORT
+ csevent_log(CSC_CTL | CSC_SHELF_SW |
+ CTL_ERROR_REPORT,
+ csevent_LogType_Trace,
+ csevent_Severity_Information,
+ csevent_AlertLevel_Green,
+ csevent_FRU_Firmware,
+ csevent_FRU_Unknown,
+ "High CTL error volume, %d prints "
+ "skipped", skipped_prints);
+#endif
+ }
+ ctl_io_error_print(io, NULL);
+ }
+ } else {
+ if (have_lock == 0)
+ mtx_unlock(&ctl_softc->ctl_lock);
+ }
+ break;
+ }
+ case CTL_IO_TASK:
+ if (have_lock == 0)
+ mtx_unlock(&ctl_softc->ctl_lock);
+ ctl_io_error_print(io, NULL);
+ break;
+ default:
+ if (have_lock == 0)
+ mtx_unlock(&ctl_softc->ctl_lock);
+ break;
+ }
+
+ /*
+ * Tell the FETD or the other shelf controller we're done with this
+ * command. Note that only SCSI commands get to this point. Task
+ * management commands are completed above.
+ *
+ * We only send status to the other controller if we're in XFER
+ * mode. In SER_ONLY mode, the I/O is done on the controller that
+ * received the I/O (from CTL's perspective), and so the status is
+ * generated there.
+ *
+ * XXX KDM if we hold the lock here, we could cause a deadlock
+ * if the frontend comes back in in this context to queue
+ * something.
+ */
+ if ((ctl_softc->ha_mode == CTL_HA_MODE_XFER)
+ && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
+ union ctl_ha_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.hdr.msg_type = CTL_MSG_FINISH_IO;
+ msg.hdr.original_sc = io->io_hdr.original_sc;
+ msg.hdr.nexus = io->io_hdr.nexus;
+ msg.hdr.status = io->io_hdr.status;
+ msg.scsi.scsi_status = io->scsiio.scsi_status;
+ msg.scsi.tag_num = io->scsiio.tag_num;
+ msg.scsi.tag_type = io->scsiio.tag_type;
+ msg.scsi.sense_len = io->scsiio.sense_len;
+ msg.scsi.sense_residual = io->scsiio.sense_residual;
+ msg.scsi.residual = io->scsiio.residual;
+ memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
+ sizeof(io->scsiio.sense_data));
+ /*
+ * We copy this whether or not this is an I/O-related
+ * command. Otherwise, we'd have to go and check to see
+ * whether it's a read/write command, and it really isn't
+ * worth it.
+ */
+ memcpy(&msg.scsi.lbalen,
+ &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
+ sizeof(msg.scsi.lbalen));;
+
+ if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+ sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
+ /* XXX do something here */
+ }
+
+ ctl_free_io_internal(io, /*have_lock*/ 0);
+ } else
+ fe_done(io);
+
+bailout:
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+/*
+ * Front end should call this if it doesn't do autosense. When the request
+ * sense comes back in from the initiator, we'll dequeue this and send it.
+ */
+int
+ctl_queue_sense(union ctl_io *io)
+{
+ struct ctl_lun *lun;
+ struct ctl_softc *ctl_softc;
+ uint32_t initidx;
+
+ ctl_softc = control_softc;
+
+ CTL_DEBUG_PRINT(("ctl_queue_sense\n"));
+
+ /*
+ * LUN lookup will likely move to the ctl_work_thread() once we
+ * have our new queueing infrastructure (that doesn't put things on
+ * a per-LUN queue initially). That is so that we can handle
+ * things like an INQUIRY to a LUN that we don't have enabled. We
+ * can't deal with that right now.
+ */
+ mtx_lock(&ctl_softc->ctl_lock);
+
+ /*
+ * If we don't have a LUN for this, just toss the sense
+ * information.
+ */
+ if ((io->io_hdr.nexus.targ_lun < CTL_MAX_LUNS)
+ && (ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun] != NULL))
+ lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun];
+ else
+ goto bailout;
+
+ initidx = ctl_get_initindex(&io->io_hdr.nexus);
+
+ /*
+ * Already have CA set for this LUN...toss the sense information.
+ */
+ if (ctl_is_set(lun->have_ca, initidx))
+ goto bailout;
+
+ memcpy(&lun->pending_sense[initidx].sense, &io->scsiio.sense_data,
+ ctl_min(sizeof(lun->pending_sense[initidx].sense),
+ sizeof(io->scsiio.sense_data)));
+ ctl_set_mask(lun->have_ca, initidx);
+
+bailout:
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ ctl_free_io(io);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+/*
+ * Primary command inlet from frontend ports. All SCSI and task I/O
+ * requests must go through this function.
+ */
+int
+ctl_queue(union ctl_io *io)
+{
+ struct ctl_softc *ctl_softc;
+
+ CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0]));
+
+ ctl_softc = control_softc;
+
+#ifdef CTL_TIME_IO
+ io->io_hdr.start_time = time_uptime;
+ getbintime(&io->io_hdr.start_bt);
+#endif /* CTL_TIME_IO */
+
+ mtx_lock(&ctl_softc->ctl_lock);
+
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI:
+ STAILQ_INSERT_TAIL(&ctl_softc->incoming_queue, &io->io_hdr,
+ links);
+ break;
+ case CTL_IO_TASK:
+ STAILQ_INSERT_TAIL(&ctl_softc->task_queue, &io->io_hdr, links);
+ /*
+ * Set the task pending flag. This is necessary to close a
+ * race condition with the FETD:
+ *
+ * - FETD submits a task management command, like an abort.
+ * - Back end calls fe_datamove() to move the data for the
+ * aborted command. The FETD can't really accept it, but
+ * if it did, it would end up transmitting data for a
+ * command that the initiator told us to abort.
+ *
+ * We close the race condition by setting the flag here,
+ * and checking it in ctl_datamove(), before calling the
+ * FETD's fe_datamove routine. If we've got a task
+ * pending, we run the task queue and then check to see
+ * whether our particular I/O has been aborted.
+ */
+ ctl_softc->flags |= CTL_FLAG_TASK_PENDING;
+ break;
+ default:
+ mtx_unlock(&ctl_softc->ctl_lock);
+ printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type);
+ return (-EINVAL);
+ break; /* NOTREACHED */
+ }
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ ctl_wakeup_thread();
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+#ifdef CTL_IO_DELAY
+static void
+ctl_done_timer_wakeup(void *arg)
+{
+ union ctl_io *io;
+
+ io = (union ctl_io *)arg;
+ ctl_done_lock(io, /*have_lock*/ 0);
+}
+#endif /* CTL_IO_DELAY */
+
+void
+ctl_done_lock(union ctl_io *io, int have_lock)
+{
+ struct ctl_softc *ctl_softc;
+#ifndef CTL_DONE_THREAD
+ union ctl_io *xio;
+#endif /* !CTL_DONE_THREAD */
+
+ ctl_softc = control_softc;
+
+ if (have_lock == 0)
+ mtx_lock(&ctl_softc->ctl_lock);
+
+ /*
+ * Enable this to catch duplicate completion issues.
+ */
+#if 0
+ if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) {
+ printf("%s: type %d msg %d cdb %x iptl: "
+ "%d:%d:%d:%d tag 0x%04x "
+ "flag %#x status %x\n",
+ __func__,
+ io->io_hdr.io_type,
+ io->io_hdr.msg_type,
+ io->scsiio.cdb[0],
+ io->io_hdr.nexus.initid.id,
+ io->io_hdr.nexus.targ_port,
+ io->io_hdr.nexus.targ_target.id,
+ io->io_hdr.nexus.targ_lun,
+ (io->io_hdr.io_type ==
+ CTL_IO_TASK) ?
+ io->taskio.tag_num :
+ io->scsiio.tag_num,
+ io->io_hdr.flags,
+ io->io_hdr.status);
+ } else
+ io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE;
+#endif
+
+ /*
+ * This is an internal copy of an I/O, and should not go through
+ * the normal done processing logic.
+ */
+ if (io->io_hdr.flags & CTL_FLAG_INT_COPY) {
+ if (have_lock == 0)
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return;
+ }
+
+ /*
+ * We need to send a msg to the serializing shelf to finish the IO
+ * as well. We don't send a finish message to the other shelf if
+ * this is a task management command. Task management commands
+ * aren't serialized in the OOA queue, but rather just executed on
+ * both shelf controllers for commands that originated on that
+ * controller.
+ */
+ if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)
+ && (io->io_hdr.io_type != CTL_IO_TASK)) {
+ union ctl_ha_msg msg_io;
+
+ msg_io.hdr.msg_type = CTL_MSG_FINISH_IO;
+ msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc;
+ if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io,
+ sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) {
+ }
+ /* continue on to finish IO */
+ }
+#ifdef CTL_IO_DELAY
+ if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
+ struct ctl_lun *lun;
+
+ lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
+ } else {
+ struct ctl_lun *lun;
+
+ lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ if ((lun != NULL)
+ && (lun->delay_info.done_delay > 0)) {
+ struct callout *callout;
+
+ callout = (struct callout *)&io->io_hdr.timer_bytes;
+ callout_init(callout, /*mpsafe*/ 1);
+ io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
+ callout_reset(callout,
+ lun->delay_info.done_delay * hz,
+ ctl_done_timer_wakeup, io);
+ if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT)
+ lun->delay_info.done_delay = 0;
+ if (have_lock == 0)
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return;
+ }
+ }
+#endif /* CTL_IO_DELAY */
+
+ STAILQ_INSERT_TAIL(&ctl_softc->done_queue, &io->io_hdr, links);
+
+#ifdef CTL_DONE_THREAD
+ if (have_lock == 0)
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ ctl_wakeup_thread();
+#else /* CTL_DONE_THREAD */
+ for (xio = (union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue);
+ xio != NULL;
+ xio =(union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue)) {
+
+ STAILQ_REMOVE_HEAD(&ctl_softc->done_queue, links);
+
+ ctl_process_done(xio, /*have_lock*/ 1);
+ }
+ if (have_lock == 0)
+ mtx_unlock(&ctl_softc->ctl_lock);
+#endif /* CTL_DONE_THREAD */
+}
+
+void
+ctl_done(union ctl_io *io)
+{
+ ctl_done_lock(io, /*have_lock*/ 0);
+}
+
+int
+ctl_isc(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ int retval;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0]));
+
+ CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n"));
+
+ retval = lun->backend->data_submit((union ctl_io *)ctsio);
+
+ return (retval);
+}
+
+
+static void
+ctl_work_thread(void *arg)
+{
+ struct ctl_softc *softc;
+ union ctl_io *io;
+ struct ctl_be_lun *be_lun;
+ int retval;
+
+ CTL_DEBUG_PRINT(("ctl_work_thread starting\n"));
+
+ softc = (struct ctl_softc *)arg;
+ if (softc == NULL)
+ return;
+
+ mtx_lock(&softc->ctl_lock);
+ for (;;) {
+ retval = 0;
+
+ /*
+ * We handle the queues in this order:
+ * - task management
+ * - ISC
+ * - done queue (to free up resources, unblock other commands)
+ * - RtR queue
+ * - incoming queue
+ *
+ * If those queues are empty, we break out of the loop and
+ * go to sleep.
+ */
+ io = (union ctl_io *)STAILQ_FIRST(&softc->task_queue);
+ if (io != NULL) {
+ ctl_run_task_queue(softc);
+ continue;
+ }
+ io = (union ctl_io *)STAILQ_FIRST(&softc->isc_queue);
+ if (io != NULL) {
+ STAILQ_REMOVE_HEAD(&softc->isc_queue, links);
+ ctl_handle_isc(io);
+ continue;
+ }
+ io = (union ctl_io *)STAILQ_FIRST(&softc->done_queue);
+ if (io != NULL) {
+ STAILQ_REMOVE_HEAD(&softc->done_queue, links);
+ /* clear any blocked commands, call fe_done */
+ mtx_unlock(&softc->ctl_lock);
+ /*
+ * XXX KDM
+ * Call this without a lock for now. This will
+ * depend on whether there is any way the FETD can
+ * sleep or deadlock if called with the CTL lock
+ * held.
+ */
+ retval = ctl_process_done(io, /*have_lock*/ 0);
+ mtx_lock(&softc->ctl_lock);
+ continue;
+ }
+ if (!ctl_pause_rtr) {
+ io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue);
+ if (io != NULL) {
+ STAILQ_REMOVE_HEAD(&softc->rtr_queue, links);
+ mtx_unlock(&softc->ctl_lock);
+ goto execute;
+ }
+ }
+ io = (union ctl_io *)STAILQ_FIRST(&softc->incoming_queue);
+ if (io != NULL) {
+ STAILQ_REMOVE_HEAD(&softc->incoming_queue, links);
+ mtx_unlock(&softc->ctl_lock);
+ ctl_scsiio_precheck(softc, &io->scsiio);
+ mtx_lock(&softc->ctl_lock);
+ continue;
+ }
+ /*
+ * We might want to move this to a separate thread, so that
+ * configuration requests (in this case LUN creations)
+ * won't impact the I/O path.
+ */
+ be_lun = STAILQ_FIRST(&softc->pending_lun_queue);
+ if (be_lun != NULL) {
+ STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links);
+ mtx_unlock(&softc->ctl_lock);
+ ctl_create_lun(be_lun);
+ mtx_lock(&softc->ctl_lock);
+ continue;
+ }
+
+ /* XXX KDM use the PDROP flag?? */
+ /* Sleep until we have something to do. */
+ mtx_sleep(softc, &softc->ctl_lock, PRIBIO, "ctl_work", 0);
+
+ /* Back to the top of the loop to see what woke us up. */
+ continue;
+
+execute:
+ retval = ctl_scsiio(&io->scsiio);
+ switch (retval) {
+ case CTL_RETVAL_COMPLETE:
+ break;
+ default:
+ /*
+ * Probably need to make sure this doesn't happen.
+ */
+ break;
+ }
+ mtx_lock(&softc->ctl_lock);
+ }
+}
+
+void
+ctl_wakeup_thread()
+{
+ struct ctl_softc *softc;
+
+ softc = control_softc;
+
+ wakeup(softc);
+}
+
+/* Initialization and failover */
+
+void
+ctl_init_isc_msg(void)
+{
+ printf("CTL: Still calling this thing\n");
+}
+
+/*
+ * Init component
+ * Initializes component into configuration defined by bootMode
+ * (see hasc-sv.c)
+ * returns hasc_Status:
+ * OK
+ * ERROR - fatal error
+ */
+static ctl_ha_comp_status
+ctl_isc_init(struct ctl_ha_component *c)
+{
+ ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
+
+ c->status = ret;
+ return ret;
+}
+
+/* Start component
+ * Starts component in state requested. If component starts successfully,
+ * it must set its own state to the requestrd state
+ * When requested state is HASC_STATE_HA, the component may refine it
+ * by adding _SLAVE or _MASTER flags.
+ * Currently allowed state transitions are:
+ * UNKNOWN->HA - initial startup
+ * UNKNOWN->SINGLE - initial startup when no parter detected
+ * HA->SINGLE - failover
+ * returns ctl_ha_comp_status:
+ * OK - component successfully started in requested state
+ * FAILED - could not start the requested state, failover may
+ * be possible
+ * ERROR - fatal error detected, no future startup possible
+ */
+static ctl_ha_comp_status
+ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state)
+{
+ ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
+
+ // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap)
+ if (c->state == CTL_HA_STATE_UNKNOWN ) {
+ ctl_is_single = 0;
+ if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
+ != CTL_HA_STATUS_SUCCESS) {
+ printf("ctl_isc_start: ctl_ha_msg_create failed.\n");
+ ret = CTL_HA_COMP_STATUS_ERROR;
+ }
+ } else if (CTL_HA_STATE_IS_HA(c->state)
+ && CTL_HA_STATE_IS_SINGLE(state)){
+ // HA->SINGLE transition
+ ctl_failover();
+ ctl_is_single = 1;
+ } else {
+ printf("ctl_isc_start:Invalid state transition %X->%X\n",
+ c->state, state);
+ ret = CTL_HA_COMP_STATUS_ERROR;
+ }
+ if (CTL_HA_STATE_IS_SINGLE(state))
+ ctl_is_single = 1;
+
+ c->state = state;
+ c->status = ret;
+ return ret;
+}
+
+/*
+ * Quiesce component
+ * The component must clear any error conditions (set status to OK) and
+ * prepare itself to another Start call
+ * returns ctl_ha_comp_status:
+ * OK
+ * ERROR
+ */
+static ctl_ha_comp_status
+ctl_isc_quiesce(struct ctl_ha_component *c)
+{
+ int ret = CTL_HA_COMP_STATUS_OK;
+
+ ctl_pause_rtr = 1;
+ c->status = ret;
+ return ret;
+}
+
+struct ctl_ha_component ctl_ha_component_ctlisc =
+{
+ .name = "CTL ISC",
+ .state = CTL_HA_STATE_UNKNOWN,
+ .init = ctl_isc_init,
+ .start = ctl_isc_start,
+ .quiesce = ctl_isc_quiesce
+};
+
+/*
+ * vim: ts=8
+ */
diff --git a/sys/cam/ctl/ctl.h b/sys/cam/ctl/ctl.h
new file mode 100644
index 0000000..0f7656f
--- /dev/null
+++ b/sys/cam/ctl/ctl.h
@@ -0,0 +1,216 @@
+/*-
+ * Copyright (c) 2003 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.h#5 $
+ * $FreeBSD$
+ */
+/*
+ * Function definitions used both within CTL and potentially in various CTL
+ * clients.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#ifndef _CTL_H_
+#define _CTL_H_
+
+#define ctl_min(x,y) (((x) < (y)) ? (x) : (y))
+#define CTL_RETVAL_COMPLETE 0
+#define CTL_RETVAL_QUEUED 1
+#define CTL_RETVAL_ALLOCATED 2
+#define CTL_RETVAL_ERROR 3
+
+typedef enum {
+ CTL_PORT_NONE = 0x00,
+ CTL_PORT_FC = 0x01,
+ CTL_PORT_SCSI = 0x02,
+ CTL_PORT_IOCTL = 0x04,
+ CTL_PORT_INTERNAL = 0x08,
+ CTL_PORT_ALL = 0xff,
+ CTL_PORT_ISC = 0x100 // FC port for inter-shelf communication
+} ctl_port_type;
+
+struct ctl_port_entry {
+ ctl_port_type port_type;
+ char port_name[64];
+ int32_t targ_port;
+ int physical_port;
+ int virtual_port;
+ u_int flags;
+#define CTL_PORT_WWNN_VALID 0x01
+#define CTL_PORT_WWPN_VALID 0x02
+ uint64_t wwnn;
+ uint64_t wwpn;
+ int online;
+};
+
+struct ctl_modepage_header {
+ uint8_t page_code;
+ uint8_t subpage;
+ int32_t len_used;
+ int32_t len_left;
+};
+
+struct ctl_modepage_aps {
+ struct ctl_modepage_header header;
+ uint8_t lock_active;
+};
+
+union ctl_modepage_info {
+ struct ctl_modepage_header header;
+ struct ctl_modepage_aps aps;
+};
+
+/*
+ * Serial number length, for VPD page 0x80.
+ */
+#define CTL_SN_LEN 16
+
+/*
+ * Device ID length, for VPD page 0x83.
+ */
+#define CTL_DEVID_LEN 16
+/*
+ * WWPN length, for VPD page 0x83.
+ */
+#define CTL_WWPN_LEN 8
+
+/*
+ * Unit attention types. ASC/ASCQ values for these should be placed in
+ * ctl_build_ua. These are also listed in order of reporting priority.
+ * i.e. a poweron UA is reported first, bus reset second, etc.
+ */
+typedef enum {
+ CTL_UA_NONE = 0x0000,
+ CTL_UA_POWERON = 0x0001,
+ CTL_UA_BUS_RESET = 0x0002,
+ CTL_UA_TARG_RESET = 0x0004,
+ CTL_UA_LUN_RESET = 0x0008,
+ CTL_UA_LUN_CHANGE = 0x0010,
+ CTL_UA_MODE_CHANGE = 0x0020,
+ CTL_UA_LOG_CHANGE = 0x0040,
+ CTL_UA_LVD = 0x0080,
+ CTL_UA_SE = 0x0100,
+ CTL_UA_RES_PREEMPT = 0x0200,
+ CTL_UA_RES_RELEASE = 0x0400,
+ CTL_UA_REG_PREEMPT = 0x0800,
+ CTL_UA_ASYM_ACC_CHANGE = 0x1000
+} ctl_ua_type;
+
+#ifdef _KERNEL
+
+MALLOC_DECLARE(M_CTL);
+
+typedef enum {
+ CTL_THREAD_NONE = 0x00,
+ CTL_THREAD_WAKEUP = 0x01
+} ctl_thread_flags;
+
+struct ctl_thread {
+ void (*thread_func)(void *arg);
+ void *arg;
+ struct cv wait_queue;
+ const char *thread_name;
+ ctl_thread_flags thread_flags;
+ struct completion *thread_event;
+ struct task_struct *task;
+};
+
+struct ctl_page_index;
+
+#ifdef SYSCTL_DECL /* from sysctl.h */
+SYSCTL_DECL(_kern_cam_ctl);
+#endif
+
+/*
+ * Call these routines to enable or disable front end ports.
+ */
+int ctl_port_enable(ctl_port_type port_type);
+int ctl_port_disable(ctl_port_type port_type);
+/*
+ * This routine grabs a list of frontend ports.
+ */
+int ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
+ int *num_entries_filled, int *num_entries_dropped,
+ ctl_port_type port_type, int no_virtual);
+
+/*
+ * Put a string into an sbuf, escaping characters that are illegal or not
+ * recommended in XML. Note this doesn't escape everything, just > < and &.
+ */
+int ctl_sbuf_printf_esc(struct sbuf *sb, char *str);
+
+int ctl_ffz(uint32_t *mask, uint32_t size);
+int ctl_set_mask(uint32_t *mask, uint32_t bit);
+int ctl_clear_mask(uint32_t *mask, uint32_t bit);
+int ctl_is_set(uint32_t *mask, uint32_t bit);
+int ctl_control_page_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index,
+ uint8_t *page_ptr);
+/**
+int ctl_failover_sp_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index,
+ uint8_t *page_ptr);
+**/
+int ctl_power_sp_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index, uint8_t *page_ptr);
+int ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index, int pc);
+int ctl_aps_sp_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index, uint8_t *page_ptr);
+int ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index,
+ int pc);
+int ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index,
+ uint8_t *page_ptr);
+int ctl_config_move_done(union ctl_io *io);
+void ctl_datamove(union ctl_io *io);
+void ctl_done(union ctl_io *io);
+void ctl_config_write_done(union ctl_io *io);
+#if 0
+int ctl_thread(void *arg);
+#endif
+void ctl_wakeup_thread(void);
+#if 0
+struct ctl_thread *ctl_create_thread(void (*thread_func)
+ (void *thread_arg), void *thread_arg, const char *thread_name);
+void ctl_signal_thread(struct ctl_thread *thread);
+void ctl_shutdown_thread(struct ctl_thread *thread);
+#endif
+void ctl_portDB_changed(int portnum);
+void ctl_init_isc_msg(void);
+
+#endif /* _KERNEL */
+
+#endif /* _CTL_H_ */
+
+/*
+ * vim: ts=8
+ */
diff --git a/sys/cam/ctl/ctl_backend.c b/sys/cam/ctl/ctl_backend.c
new file mode 100644
index 0000000..cd0d6cc
--- /dev/null
+++ b/sys/cam/ctl/ctl_backend.c
@@ -0,0 +1,177 @@
+/*-
+ * Copyright (c) 2003 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend.c#3 $
+ */
+/*
+ * CTL backend driver registration routines
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/queue.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_frontend_internal.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
+#include <cam/ctl/ctl_debug.h>
+
+extern struct ctl_softc *control_softc;
+
+int
+ctl_backend_register(struct ctl_backend_driver *be)
+{
+ struct ctl_softc *ctl_softc;
+ struct ctl_backend_driver *be_tmp;
+
+ ctl_softc = control_softc;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ /*
+ * Sanity check, make sure this isn't a duplicate registration.
+ */
+ STAILQ_FOREACH(be_tmp, &ctl_softc->be_list, links) {
+ if (strcmp(be_tmp->name, be->name) == 0) {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return (-1);
+ }
+ }
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ /*
+ * Call the backend's initialization routine.
+ */
+ be->init();
+
+ mtx_lock(&ctl_softc->ctl_lock);
+
+ STAILQ_INSERT_TAIL(&ctl_softc->be_list, be, links);
+
+ ctl_softc->num_backends++;
+
+ /*
+ * Don't want to increment the usage count for internal consumers,
+ * we won't be able to unload otherwise.
+ */
+ /* XXX KDM find a substitute for this? */
+#if 0
+ if ((be->flags & CTL_BE_FLAG_INTERNAL) == 0)
+ MOD_INC_USE_COUNT;
+#endif
+
+#ifdef CS_BE_CONFIG_MOVE_DONE_IS_NOT_USED
+ be->config_move_done = ctl_config_move_done;
+#endif
+ /* XXX KDM fix this! */
+ be->num_luns = 0;
+#if 0
+ atomic_set(&be->num_luns, 0);
+#endif
+
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ return (0);
+}
+
+int
+ctl_backend_deregister(struct ctl_backend_driver *be)
+{
+ struct ctl_softc *ctl_softc;
+
+ ctl_softc = control_softc;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+
+#if 0
+ if (atomic_read(&be->num_luns) != 0) {
+#endif
+ /* XXX KDM fix this! */
+ if (be->num_luns != 0) {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return (-1);
+ }
+
+ STAILQ_REMOVE(&ctl_softc->be_list, be, ctl_backend_driver, links);
+
+ ctl_softc->num_backends--;
+
+ /* XXX KDM find a substitute for this? */
+#if 0
+ if ((be->flags & CTL_BE_FLAG_INTERNAL) == 0)
+ MOD_DEC_USE_COUNT;
+#endif
+
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ return (0);
+}
+
+struct ctl_backend_driver *
+ctl_backend_find(char *backend_name)
+{
+ struct ctl_softc *ctl_softc;
+ struct ctl_backend_driver *be_tmp;
+
+ ctl_softc = control_softc;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+
+ STAILQ_FOREACH(be_tmp, &ctl_softc->be_list, links) {
+ if (strcmp(be_tmp->name, backend_name) == 0) {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return (be_tmp);
+ }
+ }
+
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ return (NULL);
+}
+
+/*
+ * vim: ts=8
+ */
diff --git a/sys/cam/ctl/ctl_backend.h b/sys/cam/ctl/ctl_backend.h
new file mode 100644
index 0000000..e33b42b
--- /dev/null
+++ b/sys/cam/ctl/ctl_backend.h
@@ -0,0 +1,288 @@
+/*-
+ * Copyright (c) 2003 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend.h#2 $
+ * $FreeBSD$
+ */
+/*
+ * CTL backend driver definitions
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#ifndef _CTL_BACKEND_H_
+#define _CTL_BACKEND_H_
+
+/*
+ * XXX KDM move this to another header file?
+ */
+#define CTL_BE_NAME_LEN 32
+
+/*
+ * The ID_REQ flag is used to say that the caller has requested a
+ * particular LUN ID in the req_lun_id field. If we cannot allocate that
+ * LUN ID, the ctl_add_lun() call will fail.
+ *
+ * The POWERED_OFF flag tells us that the LUN should default to the powered
+ * off state. It will return 0x04,0x02 until it is powered up. ("Logical
+ * unit not ready, initializing command required.")
+ *
+ * The INOPERABLE flag tells us that this LUN is not operable for whatever
+ * reason. This means that user data may have been (or has been?) lost.
+ * We will return 0x31,0x00 ("Medium format corrupted") until the host
+ * issues a FORMAT UNIT command to clear the error.
+ *
+ * The PRIMARY flag tells us that this LUN is registered as a Primary LUN
+ * which is accessible via the Master shelf controller in an HA. This flag
+ * being set indicates a Primary LUN. This flag being reset represents a
+ * Secondary LUN controlled by the Secondary controller in an HA
+ * configuration. Flag is applicable at this time to T_DIRECT types.
+ *
+ * The SERIAL_NUM flag tells us that the serial_num field is filled in and
+ * valid for use in SCSI INQUIRY VPD page 0x80.
+ *
+ * The DEVID flag tells us that the device_id field is filled in and
+ * valid for use in SCSI INQUIRY VPD page 0x83.
+ *
+ * The DEV_TYPE flag tells us that the device_type field is filled in.
+ */
+typedef enum {
+ CTL_LUN_FLAG_ID_REQ = 0x01,
+ CTL_LUN_FLAG_POWERED_OFF = 0x02,
+ CTL_LUN_FLAG_INOPERABLE = 0x04,
+ CTL_LUN_FLAG_PRIMARY = 0x08,
+ CTL_LUN_FLAG_SERIAL_NUM = 0x10,
+ CTL_LUN_FLAG_DEVID = 0x20,
+ CTL_LUN_FLAG_DEV_TYPE = 0x40
+} ctl_backend_lun_flags;
+
+#ifdef _KERNEL
+
+#define CTL_BACKEND_DECLARE(name, driver) \
+ static int name ## _modevent(module_t mod, int type, void *data) \
+ { \
+ switch (type) { \
+ case MOD_LOAD: \
+ ctl_backend_register( \
+ (struct ctl_backend_driver *)data); \
+ break; \
+ case MOD_UNLOAD: \
+ printf(#name " module unload - not possible for this module type\n"); \
+ return EINVAL; \
+ default: \
+ return EOPNOTSUPP; \
+ } \
+ return 0; \
+ } \
+ static moduledata_t name ## _mod = { \
+ #name, \
+ name ## _modevent, \
+ (void *)&driver \
+ }; \
+ DECLARE_MODULE(name, name ## _mod, SI_SUB_CONFIGURE, SI_ORDER_FOURTH); \
+ MODULE_DEPEND(name, ctl, 1, 1, 1); \
+ MODULE_DEPEND(name, cam, 1, 1, 1)
+
+
+typedef enum {
+ CTL_LUN_CONFIG_OK,
+ CTL_LUN_CONFIG_FAILURE
+} ctl_lun_config_status;
+
+typedef void (*be_callback_t)(void *be_lun);
+typedef void (*be_lun_config_t)(void *be_lun,
+ ctl_lun_config_status status);
+
+/*
+ * The lun_type field is the SCSI device type of this particular LUN. In
+ * general, this should be T_DIRECT, although backends will want to create
+ * a processor LUN, typically at LUN 0. See scsi_all.h for the defines for
+ * the various SCSI device types.
+ *
+ * The flags are described above.
+ *
+ * The be_lun field is the backend driver's own context that will get
+ * passsed back so that it can tell which LUN CTL is referencing.
+ *
+ * maxlba is the maximum accessible LBA on the LUN. Note that this is
+ * different from the capacity of the array. capacity = maxlba + 1
+ *
+ * blocksize is the size, in bytes, of each LBA on the LUN. In general
+ * this should be 512. In theory CTL should be able to handle other block
+ * sizes. Host application software may not deal with it very well, though.
+ *
+ * req_lun_id is the requested LUN ID. CTL only pays attention to this
+ * field if the CTL_LUN_FLAG_ID_REQ flag is set. If the requested LUN ID is
+ * not available, the LUN addition will fail. If a particular LUN ID isn't
+ * requested, the first available LUN ID will be allocated.
+ *
+ * serial_num is the device serial number returned in the SCSI INQUIRY VPD
+ * page 0x80. This should be a unique, per-shelf value. The data inside
+ * this field should be ASCII only, left aligned, and any unused space
+ * should be padded out with ASCII spaces. This field should NOT be NULL
+ * terminated.
+ *
+ * device_id is the T10 device identifier returned in the SCSI INQUIRY VPD
+ * page 0x83. This should be a unique, per-LUN value. The data inside
+ * this field should be ASCII only, left aligned, and any unused space
+ * should be padded with ASCII spaces. This field should NOT be NULL
+ * terminated.
+ *
+ * The lun_shutdown() method is the callback for the ctl_invalidate_lun()
+ * call. It is called when all outstanding I/O for that LUN has been
+ * completed and CTL has deleted the resources for that LUN. When the CTL
+ * backend gets this call, it can safely free its per-LUN resources.
+ *
+ * The lun_config_status() method is the callback for the ctl_add_lun()
+ * call. It is called when the LUN is successfully added, or when LUN
+ * addition fails. If the LUN is successfully added, the backend may call
+ * the ctl_enable_lun() method to enable the LUN.
+ *
+ * The be field is a pointer to the ctl_backend_driver structure, which
+ * contains the backend methods to be called by CTL.
+ *
+ * The ctl_lun field is for CTL internal use only, and should not be used
+ * by the backend.
+ *
+ * The links field is for CTL internal use only, and should not be used by
+ * the backend.
+ */
+struct ctl_be_lun {
+ uint8_t lun_type; /* passed to CTL */
+ ctl_backend_lun_flags flags; /* passed to CTL */
+ void *be_lun; /* passed to CTL */
+ uint64_t maxlba; /* passed to CTL */
+ uint32_t blocksize; /* passed to CTL */
+ uint32_t req_lun_id; /* passed to CTL */
+ uint32_t lun_id; /* returned from CTL */
+ uint8_t serial_num[CTL_SN_LEN]; /* passed to CTL */
+ uint8_t device_id[CTL_DEVID_LEN];/* passed to CTL */
+ be_callback_t lun_shutdown; /* passed to CTL */
+ be_lun_config_t lun_config_status; /* passed to CTL */
+ struct ctl_backend_driver *be; /* passed to CTL */
+ void *ctl_lun; /* used by CTL */
+ STAILQ_ENTRY(ctl_be_lun) links; /* used by CTL */
+};
+
+typedef enum {
+ CTL_BE_FLAG_NONE = 0x00, /* no flags */
+ CTL_BE_FLAG_HAS_CONFIG = 0x01, /* can do config reads, writes */
+ CTL_BE_FLAG_INTERNAL = 0x02 /* don't inc mod refcount */
+} ctl_backend_flags;
+
+typedef int (*be_init_t)(void);
+typedef int (*be_func_t)(union ctl_io *io);
+typedef void (*be_vfunc_t)(union ctl_io *io);
+typedef int (*be_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+ struct thread *td);
+typedef int (*be_luninfo_t)(void *be_lun, struct sbuf *sb);
+
+struct ctl_backend_driver {
+ char name[CTL_BE_NAME_LEN]; /* passed to CTL */
+ ctl_backend_flags flags; /* passed to CTL */
+ be_init_t init; /* passed to CTL */
+ be_func_t data_submit; /* passed to CTL */
+ be_func_t data_move_done; /* passed to CTL */
+ be_func_t config_read; /* passed to CTL */
+ be_func_t config_write; /* passed to CTL */
+ be_ioctl_t ioctl; /* passed to CTL */
+ be_luninfo_t lun_info; /* passed to CTL */
+#ifdef CS_BE_CONFIG_MOVE_DONE_IS_NOT_USED
+ be_func_t config_move_done; /* passed to backend */
+#endif
+#if 0
+ be_vfunc_t config_write_done; /* passed to backend */
+#endif
+ u_int num_luns; /* used by CTL */
+ STAILQ_ENTRY(ctl_backend_driver) links; /* used by CTL */
+};
+
+int ctl_backend_register(struct ctl_backend_driver *be);
+int ctl_backend_deregister(struct ctl_backend_driver *be);
+struct ctl_backend_driver *ctl_backend_find(char *backend_name);
+
+/*
+ * To add a LUN, first call ctl_add_lun(). You will get the lun_config_status()
+ * callback when the LUN addition has either succeeded or failed.
+ *
+ * Once you get that callback, you can then call ctl_enable_lun() to enable
+ * the LUN.
+ */
+int ctl_add_lun(struct ctl_be_lun *be_lun);
+int ctl_enable_lun(struct ctl_be_lun *be_lun);
+
+/*
+ * To delete a LUN, first call ctl_disable_lun(), then
+ * ctl_invalidate_lun(). You will get the lun_shutdown() callback when all
+ * I/O to the LUN has completed and the LUN has been deleted.
+ */
+int ctl_disable_lun(struct ctl_be_lun *be_lun);
+int ctl_invalidate_lun(struct ctl_be_lun *be_lun);
+
+/*
+ * To start a LUN (transition from powered off to powered on state) call
+ * ctl_start_lun(). To stop a LUN (transition from powered on to powered
+ * off state) call ctl_stop_lun().
+ */
+int ctl_start_lun(struct ctl_be_lun *be_lun);
+int ctl_stop_lun(struct ctl_be_lun *be_lun);
+
+/*
+ * If a LUN is inoperable, call ctl_lun_inoperable(). Generally the LUN
+ * will become operable once again when the user issues the SCSI FORMAT UNIT
+ * command. (CTL will automatically clear the inoperable flag.) If we
+ * need to re-enable the LUN, we can call ctl_lun_operable() to enable it
+ * without a SCSI command.
+ */
+int ctl_lun_inoperable(struct ctl_be_lun *be_lun);
+int ctl_lun_operable(struct ctl_be_lun *be_lun);
+
+/*
+ * If a LUN is locked on or unlocked from a power/APS standpoint, call
+ * ctl_lun_power_lock() to update the current status in CTL's APS subpage.
+ * Set the lock flag to 1 to lock the LUN, set it to 0 to unlock the LUN.
+ */
+int ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
+ int lock);
+
+/*
+ * To take a LUN offline, call ctl_lun_offline(). Generally the LUN will
+ * be online again once the user sends a SCSI START STOP UNIT command with
+ * the start and on/offline bits set. The backend can bring the LUN back
+ * online via the ctl_lun_online() function, if necessary.
+ */
+int ctl_lun_offline(struct ctl_be_lun *be_lun);
+int ctl_lun_online(struct ctl_be_lun *be_lun);
+
+#endif /* _KERNEL */
+#endif /* _CTL_BACKEND_H_ */
+
+/*
+ * vim: ts=8
+ */
diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c
new file mode 100644
index 0000000..9d78654
--- /dev/null
+++ b/sys/cam/ctl/ctl_backend_block.c
@@ -0,0 +1,2213 @@
+/*-
+ * Copyright (c) 2003 Silicon Graphics International Corp.
+ * Copyright (c) 2009-2011 Spectra Logic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $
+ */
+/*
+ * CAM Target Layer driver backend for block devices.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <opt_kdtrace.h>
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/kthread.h>
+#include <sys/bio.h>
+#include <sys/fcntl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/malloc.h>
+#include <sys/conf.h>
+#include <sys/ioccom.h>
+#include <sys/queue.h>
+#include <sys/sbuf.h>
+#include <sys/endian.h>
+#include <sys/uio.h>
+#include <sys/buf.h>
+#include <sys/taskqueue.h>
+#include <sys/vnode.h>
+#include <sys/namei.h>
+#include <sys/mount.h>
+#include <sys/disk.h>
+#include <sys/fcntl.h>
+#include <sys/filedesc.h>
+#include <sys/proc.h>
+#include <sys/pcpu.h>
+#include <sys/module.h>
+#include <sys/sdt.h>
+#include <sys/devicestat.h>
+#include <sys/sysctl.h>
+
+#include <geom/geom.h>
+
+#include <cam/cam.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_frontend_internal.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_scsi_all.h>
+#include <cam/ctl/ctl_error.h>
+
+/*
+ * The idea here is that we'll allocate enough S/G space to hold a 16MB
+ * I/O. If we get an I/O larger than that, we'll reject it.
+ */
+#define CTLBLK_MAX_IO_SIZE (16 * 1024 * 1024)
+#define CTLBLK_MAX_SEGS (CTLBLK_MAX_IO_SIZE / MAXPHYS) + 1
+
+#ifdef CTLBLK_DEBUG
+#define DPRINTF(fmt, args...) \
+ printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
+#else
+#define DPRINTF(fmt, args...) do {} while(0)
+#endif
+
+SDT_PROVIDER_DEFINE(cbb);
+
+typedef enum {
+ CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01,
+ CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02,
+ CTL_BE_BLOCK_LUN_WAITING = 0x04,
+ CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08
+} ctl_be_block_lun_flags;
+
+typedef enum {
+ CTL_BE_BLOCK_NONE,
+ CTL_BE_BLOCK_DEV,
+ CTL_BE_BLOCK_FILE
+} ctl_be_block_type;
+
+struct ctl_be_block_devdata {
+ struct cdev *cdev;
+ struct cdevsw *csw;
+ int dev_ref;
+};
+
+struct ctl_be_block_filedata {
+ struct ucred *cred;
+};
+
+union ctl_be_block_bedata {
+ struct ctl_be_block_devdata dev;
+ struct ctl_be_block_filedata file;
+};
+
+struct ctl_be_block_io;
+struct ctl_be_block_lun;
+
+typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun,
+ struct ctl_be_block_io *beio);
+
+/*
+ * Backend LUN structure. There is a 1:1 mapping between a block device
+ * and a backend block LUN, and between a backend block LUN and a CTL LUN.
+ */
+struct ctl_be_block_lun {
+ struct ctl_block_disk *disk;
+ char lunname[32];
+ char *dev_path;
+ ctl_be_block_type dev_type;
+ struct vnode *vn;
+ union ctl_be_block_bedata backend;
+ cbb_dispatch_t dispatch;
+ cbb_dispatch_t lun_flush;
+ struct mtx lock;
+ uma_zone_t lun_zone;
+ uint64_t size_blocks;
+ uint64_t size_bytes;
+ uint32_t blocksize;
+ int blocksize_shift;
+ struct ctl_be_block_softc *softc;
+ struct devstat *disk_stats;
+ ctl_be_block_lun_flags flags;
+ STAILQ_ENTRY(ctl_be_block_lun) links;
+ struct ctl_be_lun ctl_be_lun;
+ struct taskqueue *io_taskqueue;
+ struct task io_task;
+ int num_threads;
+ STAILQ_HEAD(, ctl_io_hdr) input_queue;
+ STAILQ_HEAD(, ctl_io_hdr) config_write_queue;
+ STAILQ_HEAD(, ctl_io_hdr) datamove_queue;
+};
+
+/*
+ * Overall softc structure for the block backend module.
+ */
+struct ctl_be_block_softc {
+ STAILQ_HEAD(, ctl_be_block_io) beio_free_queue;
+ struct mtx lock;
+ int prealloc_beio;
+ int num_disks;
+ STAILQ_HEAD(, ctl_block_disk) disk_list;
+ int num_luns;
+ STAILQ_HEAD(, ctl_be_block_lun) lun_list;
+};
+
+static struct ctl_be_block_softc backend_block_softc;
+
+/*
+ * Per-I/O information.
+ */
+struct ctl_be_block_io {
+ union ctl_io *io;
+ struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS];
+ struct iovec xiovecs[CTLBLK_MAX_SEGS];
+ int bio_cmd;
+ int bio_flags;
+ int num_segs;
+ int num_bios_sent;
+ int num_bios_done;
+ int send_complete;
+ int num_errors;
+ struct bintime ds_t0;
+ devstat_tag_type ds_tag_type;
+ devstat_trans_flags ds_trans_type;
+ uint64_t io_len;
+ uint64_t io_offset;
+ struct ctl_be_block_softc *softc;
+ struct ctl_be_block_lun *lun;
+ STAILQ_ENTRY(ctl_be_block_io) links;
+};
+
+static int cbb_num_threads = 14;
+TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads);
+SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0,
+ "CAM Target Layer Block Backend");
+SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW,
+ &cbb_num_threads, 0, "Number of threads per backing file");
+
+static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc);
+static void ctl_free_beio(struct ctl_be_block_io *beio);
+static int ctl_grow_beio(struct ctl_be_block_softc *softc, int count);
+#if 0
+static void ctl_shrink_beio(struct ctl_be_block_softc *softc);
+#endif
+static void ctl_complete_beio(struct ctl_be_block_io *beio);
+static int ctl_be_block_move_done(union ctl_io *io);
+static void ctl_be_block_biodone(struct bio *bio);
+static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
+ struct ctl_be_block_io *beio);
+static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
+ struct ctl_be_block_io *beio);
+static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
+ struct ctl_be_block_io *beio);
+static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
+ struct ctl_be_block_io *beio);
+static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io);
+static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io);
+static void ctl_be_block_worker(void *context, int pending);
+static int ctl_be_block_submit(union ctl_io *io);
+static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
+ int flag, struct thread *td);
+static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun,
+ struct ctl_lun_req *req);
+static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun,
+ struct ctl_lun_req *req);
+static int ctl_be_block_close(struct ctl_be_block_lun *be_lun);
+static int ctl_be_block_open(struct ctl_be_block_softc *softc,
+ struct ctl_be_block_lun *be_lun,
+ struct ctl_lun_req *req);
+static int ctl_be_block_create(struct ctl_be_block_softc *softc,
+ struct ctl_lun_req *req);
+static int ctl_be_block_rm(struct ctl_be_block_softc *softc,
+ struct ctl_lun_req *req);
+static void ctl_be_block_lun_shutdown(void *be_lun);
+static void ctl_be_block_lun_config_status(void *be_lun,
+ ctl_lun_config_status status);
+static int ctl_be_block_config_write(union ctl_io *io);
+static int ctl_be_block_config_read(union ctl_io *io);
+static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb);
+int ctl_be_block_init(void);
+
+static struct ctl_backend_driver ctl_be_block_driver =
+{
+ name: "block",
+ flags: CTL_BE_FLAG_HAS_CONFIG,
+ init: ctl_be_block_init,
+ data_submit: ctl_be_block_submit,
+ data_move_done: ctl_be_block_move_done,
+ config_read: ctl_be_block_config_read,
+ config_write: ctl_be_block_config_write,
+ ioctl: ctl_be_block_ioctl,
+ lun_info: ctl_be_block_lun_info
+};
+
+MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend");
+CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver);
+
+static struct ctl_be_block_io *
+ctl_alloc_beio(struct ctl_be_block_softc *softc)
+{
+ struct ctl_be_block_io *beio;
+ int count;
+
+ mtx_lock(&softc->lock);
+
+ beio = STAILQ_FIRST(&softc->beio_free_queue);
+ if (beio != NULL) {
+ STAILQ_REMOVE(&softc->beio_free_queue, beio,
+ ctl_be_block_io, links);
+ }
+ mtx_unlock(&softc->lock);
+
+ if (beio != NULL) {
+ bzero(beio, sizeof(*beio));
+ beio->softc = softc;
+ return (beio);
+ }
+
+ for (;;) {
+
+ count = ctl_grow_beio(softc, /*count*/ 10);
+
+ /*
+ * This shouldn't be possible, since ctl_grow_beio() uses a
+ * blocking malloc.
+ */
+ if (count == 0)
+ return (NULL);
+
+ /*
+ * Since we have to drop the lock when we're allocating beio
+ * structures, it's possible someone else can come along and
+ * allocate the beio's we've just allocated.
+ */
+ mtx_lock(&softc->lock);
+ beio = STAILQ_FIRST(&softc->beio_free_queue);
+ if (beio != NULL) {
+ STAILQ_REMOVE(&softc->beio_free_queue, beio,
+ ctl_be_block_io, links);
+ }
+ mtx_unlock(&softc->lock);
+
+ if (beio != NULL) {
+ bzero(beio, sizeof(*beio));
+ beio->softc = softc;
+ break;
+ }
+ }
+ return (beio);
+}
+
+static void
+ctl_free_beio(struct ctl_be_block_io *beio)
+{
+ struct ctl_be_block_softc *softc;
+ int duplicate_free;
+ int i;
+
+ softc = beio->softc;
+ duplicate_free = 0;
+
+ for (i = 0; i < beio->num_segs; i++) {
+ if (beio->sg_segs[i].addr == NULL)
+ duplicate_free++;
+
+ uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr);
+ beio->sg_segs[i].addr = NULL;
+ }
+
+ if (duplicate_free > 0) {
+ printf("%s: %d duplicate frees out of %d segments\n", __func__,
+ duplicate_free, beio->num_segs);
+ }
+ mtx_lock(&softc->lock);
+ STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links);
+ mtx_unlock(&softc->lock);
+}
+
+static int
+ctl_grow_beio(struct ctl_be_block_softc *softc, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct ctl_be_block_io *beio;
+
+ beio = (struct ctl_be_block_io *)malloc(sizeof(*beio),
+ M_CTLBLK,
+ M_WAITOK | M_ZERO);
+ if (beio == NULL)
+ break;
+
+ bzero(beio, sizeof(*beio));
+ beio->softc = softc;
+ mtx_lock(&softc->lock);
+ STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links);
+ mtx_unlock(&softc->lock);
+ }
+
+ return (i);
+}
+
+#if 0
+static void
+ctl_shrink_beio(struct ctl_be_block_softc *softc)
+{
+ struct ctl_be_block_io *beio, *beio_tmp;
+
+ mtx_lock(&softc->lock);
+ STAILQ_FOREACH_SAFE(beio, &softc->beio_free_queue, links, beio_tmp) {
+ STAILQ_REMOVE(&softc->beio_free_queue, beio,
+ ctl_be_block_io, links);
+ free(beio, M_CTLBLK);
+ }
+ mtx_unlock(&softc->lock);
+}
+#endif
+
+static void
+ctl_complete_beio(struct ctl_be_block_io *beio)
+{
+ union ctl_io *io;
+ int io_len;
+
+ io = beio->io;
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
+ io_len = beio->io_len;
+ else
+ io_len = 0;
+
+ devstat_end_transaction(beio->lun->disk_stats,
+ /*bytes*/ io_len,
+ beio->ds_tag_type,
+ beio->ds_trans_type,
+ /*now*/ NULL,
+ /*then*/&beio->ds_t0);
+
+ ctl_free_beio(beio);
+ ctl_done(io);
+}
+
+static int
+ctl_be_block_move_done(union ctl_io *io)
+{
+ struct ctl_be_block_io *beio;
+ struct ctl_be_block_lun *be_lun;
+#ifdef CTL_TIME_IO
+ struct bintime cur_bt;
+#endif
+
+ beio = (struct ctl_be_block_io *)
+ io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
+
+ be_lun = beio->lun;
+
+ DPRINTF("entered\n");
+
+#ifdef CTL_TIME_IO
+ getbintime(&cur_bt);
+ bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
+ bintime_add(&io->io_hdr.dma_bt, &cur_bt);
+ io->io_hdr.num_dmas++;
+#endif
+
+ /*
+ * We set status at this point for read commands, and write
+ * commands with errors.
+ */
+ if ((beio->bio_cmd == BIO_READ)
+ && (io->io_hdr.port_status == 0)
+ && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
+ && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
+ ctl_set_success(&io->scsiio);
+ else if ((io->io_hdr.port_status != 0)
+ && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
+ && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
+ /*
+ * For hardware error sense keys, the sense key
+ * specific value is defined to be a retry count,
+ * but we use it to pass back an internal FETD
+ * error code. XXX KDM Hopefully the FETD is only
+ * using 16 bits for an error code, since that's
+ * all the space we have in the sks field.
+ */
+ ctl_set_internal_failure(&io->scsiio,
+ /*sks_valid*/ 1,
+ /*retry_count*/
+ io->io_hdr.port_status);
+ }
+
+ /*
+ * If this is a read, or a write with errors, it is done.
+ */
+ if ((beio->bio_cmd == BIO_READ)
+ || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)
+ || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) {
+ ctl_complete_beio(beio);
+ return (0);
+ }
+
+ /*
+ * At this point, we have a write and the DMA completed
+ * successfully. We now have to queue it to the task queue to
+ * execute the backend I/O. That is because we do blocking
+ * memory allocations, and in the file backing case, blocking I/O.
+ * This move done routine is generally called in the SIM's
+ * interrupt context, and therefore we cannot block.
+ */
+ mtx_lock(&be_lun->lock);
+ /*
+ * XXX KDM make sure that links is okay to use at this point.
+ * Otherwise, we either need to add another field to ctl_io_hdr,
+ * or deal with resource allocation here.
+ */
+ STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links);
+ mtx_unlock(&be_lun->lock);
+
+ taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
+
+ return (0);
+}
+
+static void
+ctl_be_block_biodone(struct bio *bio)
+{
+ struct ctl_be_block_io *beio;
+ struct ctl_be_block_lun *be_lun;
+ union ctl_io *io;
+
+ beio = bio->bio_caller1;
+ be_lun = beio->lun;
+ io = beio->io;
+
+ DPRINTF("entered\n");
+
+ mtx_lock(&be_lun->lock);
+ if (bio->bio_error != 0)
+ beio->num_errors++;
+
+ beio->num_bios_done++;
+
+ /*
+ * XXX KDM will this cause WITNESS to complain? Holding a lock
+ * during the free might cause it to complain.
+ */
+ g_destroy_bio(bio);
+
+ /*
+ * If the send complete bit isn't set, or we aren't the last I/O to
+ * complete, then we're done.
+ */
+ if ((beio->send_complete == 0)
+ || (beio->num_bios_done < beio->num_bios_sent)) {
+ mtx_unlock(&be_lun->lock);
+ return;
+ }
+
+ /*
+ * At this point, we've verified that we are the last I/O to
+ * complete, so it's safe to drop the lock.
+ */
+ mtx_unlock(&be_lun->lock);
+
+ /*
+ * If there are any errors from the backing device, we fail the
+ * entire I/O with a medium error.
+ */
+ if (beio->num_errors > 0) {
+ if (beio->bio_cmd == BIO_FLUSH) {
+ /* XXX KDM is there is a better error here? */
+ ctl_set_internal_failure(&io->scsiio,
+ /*sks_valid*/ 1,
+ /*retry_count*/ 0xbad2);
+ } else
+ ctl_set_medium_error(&io->scsiio);
+ ctl_complete_beio(beio);
+ return;
+ }
+
+ /*
+ * If this is a write or a flush, we're all done.
+ * If this is a read, we can now send the data to the user.
+ */
+ if ((beio->bio_cmd == BIO_WRITE)
+ || (beio->bio_cmd == BIO_FLUSH)) {
+ ctl_set_success(&io->scsiio);
+ ctl_complete_beio(beio);
+ } else {
+ io->scsiio.be_move_done = ctl_be_block_move_done;
+ io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
+ io->scsiio.kern_data_len = beio->io_len;
+ io->scsiio.kern_total_len = beio->io_len;
+ io->scsiio.kern_rel_offset = 0;
+ io->scsiio.kern_data_resid = 0;
+ io->scsiio.kern_sg_entries = beio->num_segs;
+ io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
+#ifdef CTL_TIME_IO
+ getbintime(&io->io_hdr.dma_start_bt);
+#endif
+ ctl_datamove(io);
+ }
+}
+
+static void
+ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
+ struct ctl_be_block_io *beio)
+{
+ union ctl_io *io;
+ struct mount *mountpoint;
+ int vfs_is_locked, error, lock_flags;
+
+ DPRINTF("entered\n");
+
+ io = beio->io;
+
+ vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
+
+ (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
+
+ if (MNT_SHARED_WRITES(mountpoint)
+ || ((mountpoint == NULL)
+ && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
+ lock_flags = LK_SHARED;
+ else
+ lock_flags = LK_EXCLUSIVE;
+
+ vn_lock(be_lun->vn, lock_flags | LK_RETRY);
+
+ binuptime(&beio->ds_t0);
+ devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
+
+ error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread);
+ VOP_UNLOCK(be_lun->vn, 0);
+
+ vn_finished_write(mountpoint);
+
+ VFS_UNLOCK_GIANT(vfs_is_locked);
+
+ if (error == 0)
+ ctl_set_success(&io->scsiio);
+ else {
+ /* XXX KDM is there is a better error here? */
+ ctl_set_internal_failure(&io->scsiio,
+ /*sks_valid*/ 1,
+ /*retry_count*/ 0xbad1);
+ }
+
+ ctl_complete_beio(beio);
+}
+
+SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, file_start, "uint64_t");
+SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, file_start, "uint64_t");
+SDT_PROBE_DEFINE1(cbb, kernel, read, file_done, file_done,"uint64_t");
+SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, file_done, "uint64_t");
+
+static void
+ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
+ struct ctl_be_block_io *beio)
+{
+ struct ctl_be_block_filedata *file_data;
+ union ctl_io *io;
+ struct uio xuio;
+ struct iovec *xiovec;
+ int vfs_is_locked, flags;
+ int error, i;
+
+ DPRINTF("entered\n");
+
+ file_data = &be_lun->backend.file;
+ io = beio->io;
+ flags = beio->bio_flags;
+
+ if (beio->bio_cmd == BIO_READ) {
+ SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0);
+ } else {
+ SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0);
+ }
+
+ bzero(&xuio, sizeof(xuio));
+ if (beio->bio_cmd == BIO_READ)
+ xuio.uio_rw = UIO_READ;
+ else
+ xuio.uio_rw = UIO_WRITE;
+
+ xuio.uio_offset = beio->io_offset;
+ xuio.uio_resid = beio->io_len;
+ xuio.uio_segflg = UIO_SYSSPACE;
+ xuio.uio_iov = beio->xiovecs;
+ xuio.uio_iovcnt = beio->num_segs;
+ xuio.uio_td = curthread;
+
+ for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) {
+ xiovec->iov_base = beio->sg_segs[i].addr;
+ xiovec->iov_len = beio->sg_segs[i].len;
+ }
+
+ vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
+ if (beio->bio_cmd == BIO_READ) {
+ vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
+
+ binuptime(&beio->ds_t0);
+ devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
+
+ /*
+ * UFS pays attention to IO_DIRECT for reads. If the
+ * DIRECTIO option is configured into the kernel, it calls
+ * ffs_rawread(). But that only works for single-segment
+ * uios with user space addresses. In our case, with a
+ * kernel uio, it still reads into the buffer cache, but it
+ * will just try to release the buffer from the cache later
+ * on in ffs_read().
+ *
+ * ZFS does not pay attention to IO_DIRECT for reads.
+ *
+ * UFS does not pay attention to IO_SYNC for reads.
+ *
+ * ZFS pays attention to IO_SYNC (which translates into the
+ * Solaris define FRSYNC for zfs_read()) for reads. It
+ * attempts to sync the file before reading.
+ *
+ * So, to attempt to provide some barrier semantics in the
+ * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC.
+ */
+ error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ?
+ (IO_DIRECT|IO_SYNC) : 0, file_data->cred);
+
+ VOP_UNLOCK(be_lun->vn, 0);
+ } else {
+ struct mount *mountpoint;
+ int lock_flags;
+
+ (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
+
+ if (MNT_SHARED_WRITES(mountpoint)
+ || ((mountpoint == NULL)
+ && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
+ lock_flags = LK_SHARED;
+ else
+ lock_flags = LK_EXCLUSIVE;
+
+ vn_lock(be_lun->vn, lock_flags | LK_RETRY);
+
+ binuptime(&beio->ds_t0);
+ devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
+
+ /*
+ * UFS pays attention to IO_DIRECT for writes. The write
+ * is done asynchronously. (Normally the write would just
+ * get put into cache.
+ *
+ * UFS pays attention to IO_SYNC for writes. It will
+ * attempt to write the buffer out synchronously if that
+ * flag is set.
+ *
+ * ZFS does not pay attention to IO_DIRECT for writes.
+ *
+ * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC)
+ * for writes. It will flush the transaction from the
+ * cache before returning.
+ *
+ * So if we've got the BIO_ORDERED flag set, we want
+ * IO_SYNC in either the UFS or ZFS case.
+ */
+ error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ?
+ IO_SYNC : 0, file_data->cred);
+ VOP_UNLOCK(be_lun->vn, 0);
+
+ vn_finished_write(mountpoint);
+ }
+ VFS_UNLOCK_GIANT(vfs_is_locked);
+
+ /*
+ * If we got an error, set the sense data to "MEDIUM ERROR" and
+ * return the I/O to the user.
+ */
+ if (error != 0) {
+ char path_str[32];
+
+ ctl_scsi_path_string(io, path_str, sizeof(path_str));
+ /*
+ * XXX KDM ZFS returns ENOSPC when the underlying
+ * filesystem fills up. What kind of SCSI error should we
+ * return for that?
+ */
+ printf("%s%s command returned errno %d\n", path_str,
+ (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error);
+ ctl_set_medium_error(&io->scsiio);
+ ctl_complete_beio(beio);
+ return;
+ }
+
+ /*
+ * If this is a write, we're all done.
+ * If this is a read, we can now send the data to the user.
+ */
+ if (beio->bio_cmd == BIO_WRITE) {
+ ctl_set_success(&io->scsiio);
+ SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
+ ctl_complete_beio(beio);
+ } else {
+ SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
+ io->scsiio.be_move_done = ctl_be_block_move_done;
+ io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
+ io->scsiio.kern_data_len = beio->io_len;
+ io->scsiio.kern_total_len = beio->io_len;
+ io->scsiio.kern_rel_offset = 0;
+ io->scsiio.kern_data_resid = 0;
+ io->scsiio.kern_sg_entries = beio->num_segs;
+ io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
+#ifdef CTL_TIME_IO
+ getbintime(&io->io_hdr.dma_start_bt);
+#endif
+ ctl_datamove(io);
+ }
+}
+
+static void
+ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
+ struct ctl_be_block_io *beio)
+{
+ struct bio *bio;
+ union ctl_io *io;
+ struct ctl_be_block_devdata *dev_data;
+
+ dev_data = &be_lun->backend.dev;
+ io = beio->io;
+
+ DPRINTF("entered\n");
+
+ /* This can't fail, it's a blocking allocation. */
+ bio = g_alloc_bio();
+
+ bio->bio_cmd = BIO_FLUSH;
+ bio->bio_flags |= BIO_ORDERED;
+ bio->bio_dev = dev_data->cdev;
+ bio->bio_offset = 0;
+ bio->bio_data = 0;
+ bio->bio_done = ctl_be_block_biodone;
+ bio->bio_caller1 = beio;
+ bio->bio_pblkno = 0;
+
+ /*
+ * We don't need to acquire the LUN lock here, because we are only
+ * sending one bio, and so there is no other context to synchronize
+ * with.
+ */
+ beio->num_bios_sent = 1;
+ beio->send_complete = 1;
+
+ binuptime(&beio->ds_t0);
+ devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
+
+ (*dev_data->csw->d_strategy)(bio);
+}
+
+static void
+ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
+ struct ctl_be_block_io *beio)
+{
+ int i;
+ struct bio *bio;
+ struct ctl_be_block_devdata *dev_data;
+ off_t cur_offset;
+ int max_iosize;
+
+ DPRINTF("entered\n");
+
+ dev_data = &be_lun->backend.dev;
+
+ /*
+ * We have to limit our I/O size to the maximum supported by the
+ * backend device. Hopefully it is MAXPHYS. If the driver doesn't
+ * set it properly, use DFLTPHYS.
+ */
+ max_iosize = dev_data->cdev->si_iosize_max;
+ if (max_iosize < PAGE_SIZE)
+ max_iosize = DFLTPHYS;
+
+ cur_offset = beio->io_offset;
+
+ /*
+ * XXX KDM need to accurately reflect the number of I/Os outstanding
+ * to a device.
+ */
+ binuptime(&beio->ds_t0);
+ devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
+
+ for (i = 0; i < beio->num_segs; i++) {
+ size_t cur_size;
+ uint8_t *cur_ptr;
+
+ cur_size = beio->sg_segs[i].len;
+ cur_ptr = beio->sg_segs[i].addr;
+
+ while (cur_size > 0) {
+ /* This can't fail, it's a blocking allocation. */
+ bio = g_alloc_bio();
+
+ KASSERT(bio != NULL, ("g_alloc_bio() failed!\n"));
+
+ bio->bio_cmd = beio->bio_cmd;
+ bio->bio_flags |= beio->bio_flags;
+ bio->bio_dev = dev_data->cdev;
+ bio->bio_caller1 = beio;
+ bio->bio_length = min(cur_size, max_iosize);
+ bio->bio_offset = cur_offset;
+ bio->bio_data = cur_ptr;
+ bio->bio_done = ctl_be_block_biodone;
+ bio->bio_pblkno = cur_offset / be_lun->blocksize;
+
+ cur_offset += bio->bio_length;
+ cur_ptr += bio->bio_length;
+ cur_size -= bio->bio_length;
+
+ /*
+ * Make sure we set the complete bit just before we
+ * issue the last bio so we don't wind up with a
+ * race.
+ *
+ * Use the LUN mutex here instead of a combination
+ * of atomic variables for simplicity.
+ *
+ * XXX KDM we could have a per-IO lock, but that
+ * would cause additional per-IO setup and teardown
+ * overhead. Hopefully there won't be too much
+ * contention on the LUN lock.
+ */
+ mtx_lock(&be_lun->lock);
+
+ beio->num_bios_sent++;
+
+ if ((i == beio->num_segs - 1)
+ && (cur_size == 0))
+ beio->send_complete = 1;
+
+ mtx_unlock(&be_lun->lock);
+
+ (*dev_data->csw->d_strategy)(bio);
+ }
+ }
+}
+
+static void
+ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io)
+{
+ struct ctl_be_block_io *beio;
+ struct ctl_be_block_softc *softc;
+
+ DPRINTF("entered\n");
+
+ softc = be_lun->softc;
+ beio = ctl_alloc_beio(softc);
+ if (beio == NULL) {
+ /*
+ * This should not happen. ctl_alloc_beio() will call
+ * ctl_grow_beio() with a blocking malloc as needed.
+ * A malloc with M_WAITOK should not fail.
+ */
+ ctl_set_busy(&io->scsiio);
+ ctl_done(io);
+ return;
+ }
+
+ beio->io = io;
+ beio->softc = softc;
+ beio->lun = be_lun;
+ io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio;
+
+ switch (io->scsiio.cdb[0]) {
+ case SYNCHRONIZE_CACHE:
+ case SYNCHRONIZE_CACHE_16:
+ beio->ds_trans_type = DEVSTAT_NO_DATA;
+ beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
+ beio->io_len = 0;
+ be_lun->lun_flush(be_lun, beio);
+ break;
+ default:
+ panic("Unhandled CDB type %#x", io->scsiio.cdb[0]);
+ break;
+ }
+}
+
+SDT_PROBE_DEFINE1(cbb, kernel, read, start, start, "uint64_t");
+SDT_PROBE_DEFINE1(cbb, kernel, write, start, start, "uint64_t");
+SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, alloc_done, "uint64_t");
+SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, alloc_done, "uint64_t");
+
+static void
+ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
+ union ctl_io *io)
+{
+ struct ctl_be_block_io *beio;
+ struct ctl_be_block_softc *softc;
+ struct ctl_lba_len lbalen;
+ uint64_t len_left, io_size_bytes;
+ int i;
+
+ softc = be_lun->softc;
+
+ DPRINTF("entered\n");
+
+ if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
+ SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0);
+ } else {
+ SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0);
+ }
+
+ memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
+ sizeof(lbalen));
+
+ io_size_bytes = lbalen.len * be_lun->blocksize;
+
+ /*
+ * XXX KDM this is temporary, until we implement chaining of beio
+ * structures and multiple datamove calls to move all the data in
+ * or out.
+ */
+ if (io_size_bytes > CTLBLK_MAX_IO_SIZE) {
+ printf("%s: IO length %ju > max io size %u\n", __func__,
+ io_size_bytes, CTLBLK_MAX_IO_SIZE);
+ ctl_set_invalid_field(&io->scsiio,
+ /*sks_valid*/ 0,
+ /*command*/ 1,
+ /*field*/ 0,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done(io);
+ return;
+ }
+
+ beio = ctl_alloc_beio(softc);
+ if (beio == NULL) {
+ /*
+ * This should not happen. ctl_alloc_beio() will call
+ * ctl_grow_beio() with a blocking malloc as needed.
+ * A malloc with M_WAITOK should not fail.
+ */
+ ctl_set_busy(&io->scsiio);
+ ctl_done(io);
+ return;
+ }
+
+ beio->io = io;
+ beio->softc = softc;
+ beio->lun = be_lun;
+ io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio;
+
+ /*
+ * If the I/O came down with an ordered or head of queue tag, set
+ * the BIO_ORDERED attribute. For head of queue tags, that's
+ * pretty much the best we can do.
+ *
+ * XXX KDM we don't have a great way to easily know about the FUA
+ * bit right now (it is decoded in ctl_read_write(), but we don't
+ * pass that knowledge to the backend), and in any case we would
+ * need to determine how to handle it.
+ */
+ if ((io->scsiio.tag_type == CTL_TAG_ORDERED)
+ || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE))
+ beio->bio_flags = BIO_ORDERED;
+
+ switch (io->scsiio.tag_type) {
+ case CTL_TAG_ORDERED:
+ beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
+ break;
+ case CTL_TAG_HEAD_OF_QUEUE:
+ beio->ds_tag_type = DEVSTAT_TAG_HEAD;
+ break;
+ case CTL_TAG_UNTAGGED:
+ case CTL_TAG_SIMPLE:
+ case CTL_TAG_ACA:
+ default:
+ beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
+ break;
+ }
+
+ /*
+ * This path handles read and write only. The config write path
+ * handles flush operations.
+ */
+ if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
+ beio->bio_cmd = BIO_READ;
+ beio->ds_trans_type = DEVSTAT_READ;
+ } else {
+ beio->bio_cmd = BIO_WRITE;
+ beio->ds_trans_type = DEVSTAT_WRITE;
+ }
+
+ beio->io_len = lbalen.len * be_lun->blocksize;
+ beio->io_offset = lbalen.lba * be_lun->blocksize;
+
+ DPRINTF("%s at LBA %jx len %u\n",
+ (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE",
+ (uintmax_t)lbalen.lba, lbalen.len);
+
+ for (i = 0, len_left = io_size_bytes; i < CTLBLK_MAX_SEGS &&
+ len_left > 0; i++) {
+
+ /*
+ * Setup the S/G entry for this chunk.
+ */
+ beio->sg_segs[i].len = min(MAXPHYS, len_left);
+ beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK);
+ /*
+ * uma_zalloc() can in theory return NULL even with M_WAITOK
+ * if it can't pull more memory into the zone.
+ */
+ if (beio->sg_segs[i].addr == NULL) {
+ ctl_set_busy(&io->scsiio);
+ ctl_complete_beio(beio);
+ return;
+ }
+
+ DPRINTF("segment %d addr %p len %zd\n", i,
+ beio->sg_segs[i].addr, beio->sg_segs[i].len);
+
+ beio->num_segs++;
+ len_left -= beio->sg_segs[i].len;
+ }
+
+ /*
+ * For the read case, we need to read the data into our buffers and
+ * then we can send it back to the user. For the write case, we
+ * need to get the data from the user first.
+ */
+ if (beio->bio_cmd == BIO_READ) {
+ SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0);
+ be_lun->dispatch(be_lun, beio);
+ } else {
+ SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0);
+ io->scsiio.be_move_done = ctl_be_block_move_done;
+ io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
+ io->scsiio.kern_data_len = beio->io_len;
+ io->scsiio.kern_total_len = beio->io_len;
+ io->scsiio.kern_rel_offset = 0;
+ io->scsiio.kern_data_resid = 0;
+ io->scsiio.kern_sg_entries = beio->num_segs;
+ io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
+#ifdef CTL_TIME_IO
+ getbintime(&io->io_hdr.dma_start_bt);
+#endif
+ ctl_datamove(io);
+ }
+}
+
+static void
+ctl_be_block_worker(void *context, int pending)
+{
+ struct ctl_be_block_lun *be_lun;
+ struct ctl_be_block_softc *softc;
+ union ctl_io *io;
+
+ be_lun = (struct ctl_be_block_lun *)context;
+ softc = be_lun->softc;
+
+ DPRINTF("entered\n");
+
+ mtx_lock(&be_lun->lock);
+ for (;;) {
+ io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue);
+ if (io != NULL) {
+ struct ctl_be_block_io *beio;
+
+ DPRINTF("datamove queue\n");
+
+ STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr,
+ ctl_io_hdr, links);
+
+ mtx_unlock(&be_lun->lock);
+
+ beio = (struct ctl_be_block_io *)
+ io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
+
+ be_lun->dispatch(be_lun, beio);
+
+ mtx_lock(&be_lun->lock);
+ continue;
+ }
+ io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue);
+ if (io != NULL) {
+
+ DPRINTF("config write queue\n");
+
+ STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr,
+ ctl_io_hdr, links);
+
+ mtx_unlock(&be_lun->lock);
+
+ ctl_be_block_cw_dispatch(be_lun, io);
+
+ mtx_lock(&be_lun->lock);
+ continue;
+ }
+ io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue);
+ if (io != NULL) {
+ DPRINTF("input queue\n");
+
+ STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr,
+ ctl_io_hdr, links);
+ mtx_unlock(&be_lun->lock);
+
+ /*
+ * We must drop the lock, since this routine and
+ * its children may sleep.
+ */
+ ctl_be_block_dispatch(be_lun, io);
+
+ mtx_lock(&be_lun->lock);
+ continue;
+ }
+
+ /*
+ * If we get here, there is no work left in the queues, so
+ * just break out and let the task queue go to sleep.
+ */
+ break;
+ }
+ mtx_unlock(&be_lun->lock);
+}
+
+/*
+ * Entry point from CTL to the backend for I/O. We queue everything to a
+ * work thread, so this just puts the I/O on a queue and wakes up the
+ * thread.
+ */
+static int
+ctl_be_block_submit(union ctl_io *io)
+{
+ struct ctl_be_block_lun *be_lun;
+ struct ctl_be_lun *ctl_be_lun;
+ int retval;
+
+ DPRINTF("entered\n");
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
+ CTL_PRIV_BACKEND_LUN].ptr;
+ be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
+
+ /*
+ * Make sure we only get SCSI I/O.
+ */
+ KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type "
+ "%#x) encountered", io->io_hdr.io_type));
+
+ mtx_lock(&be_lun->lock);
+ /*
+ * XXX KDM make sure that links is okay to use at this point.
+ * Otherwise, we either need to add another field to ctl_io_hdr,
+ * or deal with resource allocation here.
+ */
+ STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
+ mtx_unlock(&be_lun->lock);
+
+ taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
+
+ return (retval);
+}
+
+static int
+ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
+ int flag, struct thread *td)
+{
+ struct ctl_be_block_softc *softc;
+ int error;
+
+ softc = &backend_block_softc;
+
+ error = 0;
+
+ switch (cmd) {
+ case CTL_LUN_REQ: {
+ struct ctl_lun_req *lun_req;
+
+ lun_req = (struct ctl_lun_req *)addr;
+
+ switch (lun_req->reqtype) {
+ case CTL_LUNREQ_CREATE:
+ error = ctl_be_block_create(softc, lun_req);
+ break;
+ case CTL_LUNREQ_RM:
+ error = ctl_be_block_rm(softc, lun_req);
+ break;
+ default:
+ lun_req->status = CTL_LUN_ERROR;
+ snprintf(lun_req->error_str, sizeof(lun_req->error_str),
+ "%s: invalid LUN request type %d", __func__,
+ lun_req->reqtype);
+ break;
+ }
+ break;
+ }
+ default:
+ error = ENOTTY;
+ break;
+ }
+
+ return (error);
+}
+
+static int
+ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
+{
+ struct ctl_be_block_filedata *file_data;
+ struct ctl_lun_create_params *params;
+ struct vattr vattr;
+ int error;
+
+ error = 0;
+ file_data = &be_lun->backend.file;
+ params = &req->reqdata.create;
+
+ be_lun->dev_type = CTL_BE_BLOCK_FILE;
+ be_lun->dispatch = ctl_be_block_dispatch_file;
+ be_lun->lun_flush = ctl_be_block_flush_file;
+
+ error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
+ if (error != 0) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "error calling VOP_GETATTR() for file %s",
+ be_lun->dev_path);
+ return (error);
+ }
+
+ /*
+ * Verify that we have the ability to upgrade to exclusive
+ * access on this file so we can trap errors at open instead
+ * of reporting them during first access.
+ */
+ if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) {
+ vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY);
+ if (be_lun->vn->v_iflag & VI_DOOMED) {
+ error = EBADF;
+ snprintf(req->error_str, sizeof(req->error_str),
+ "error locking file %s", be_lun->dev_path);
+ return (error);
+ }
+ }
+
+
+ file_data->cred = crhold(curthread->td_ucred);
+ be_lun->size_bytes = vattr.va_size;
+ /*
+ * We set the multi thread flag for file operations because all
+ * filesystems (in theory) are capable of allowing multiple readers
+ * of a file at once. So we want to get the maximum possible
+ * concurrency.
+ */
+ be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD;
+
+ /*
+ * XXX KDM vattr.va_blocksize may be larger than 512 bytes here.
+ * With ZFS, it is 131072 bytes. Block sizes that large don't work
+ * with disklabel and UFS on FreeBSD at least. Large block sizes
+ * may not work with other OSes as well. So just export a sector
+ * size of 512 bytes, which should work with any OS or
+ * application. Since our backing is a file, any block size will
+ * work fine for the backing store.
+ */
+#if 0
+ be_lun->blocksize= vattr.va_blocksize;
+#endif
+ if (params->blocksize_bytes != 0)
+ be_lun->blocksize = params->blocksize_bytes;
+ else
+ be_lun->blocksize = 512;
+
+ /*
+ * Sanity check. The media size has to be at least one
+ * sector long.
+ */
+ if (be_lun->size_bytes < be_lun->blocksize) {
+ error = EINVAL;
+ snprintf(req->error_str, sizeof(req->error_str),
+ "file %s size %ju < block size %u", be_lun->dev_path,
+ (uintmax_t)be_lun->size_bytes, be_lun->blocksize);
+ }
+ return (error);
+}
+
+static int
+ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
+{
+ struct ctl_lun_create_params *params;
+ struct vattr vattr;
+ struct cdev *dev;
+ struct cdevsw *devsw;
+ int error;
+
+ params = &req->reqdata.create;
+
+ be_lun->dev_type = CTL_BE_BLOCK_DEV;
+ be_lun->dispatch = ctl_be_block_dispatch_dev;
+ be_lun->lun_flush = ctl_be_block_flush_dev;
+ be_lun->backend.dev.cdev = be_lun->vn->v_rdev;
+ be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev,
+ &be_lun->backend.dev.dev_ref);
+ if (be_lun->backend.dev.csw == NULL)
+ panic("Unable to retrieve device switch");
+
+ error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED);
+ if (error) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: error getting vnode attributes for device %s",
+ __func__, be_lun->dev_path);
+ return (error);
+ }
+
+ dev = be_lun->vn->v_rdev;
+ devsw = dev->si_devsw;
+ if (!devsw->d_ioctl) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: no d_ioctl for device %s!", __func__,
+ be_lun->dev_path);
+ return (ENODEV);
+ }
+
+ error = devsw->d_ioctl(dev, DIOCGSECTORSIZE,
+ (caddr_t)&be_lun->blocksize, FREAD,
+ curthread);
+ if (error) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: error %d returned for DIOCGSECTORSIZE ioctl "
+ "on %s!", __func__, error, be_lun->dev_path);
+ return (error);
+ }
+
+ /*
+ * If the user has asked for a blocksize that is greater than the
+ * backing device's blocksize, we can do it only if the blocksize
+ * the user is asking for is an even multiple of the underlying
+ * device's blocksize.
+ */
+ if ((params->blocksize_bytes != 0)
+ && (params->blocksize_bytes > be_lun->blocksize)) {
+ uint32_t bs_multiple, tmp_blocksize;
+
+ bs_multiple = params->blocksize_bytes / be_lun->blocksize;
+
+ tmp_blocksize = bs_multiple * be_lun->blocksize;
+
+ if (tmp_blocksize == params->blocksize_bytes) {
+ be_lun->blocksize = params->blocksize_bytes;
+ } else {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: requested blocksize %u is not an even "
+ "multiple of backing device blocksize %u",
+ __func__, params->blocksize_bytes,
+ be_lun->blocksize);
+ return (EINVAL);
+
+ }
+ } else if ((params->blocksize_bytes != 0)
+ && (params->blocksize_bytes != be_lun->blocksize)) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: requested blocksize %u < backing device "
+ "blocksize %u", __func__, params->blocksize_bytes,
+ be_lun->blocksize);
+ return (EINVAL);
+ }
+
+ error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
+ (caddr_t)&be_lun->size_bytes, FREAD,
+ curthread);
+ if (error) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: error %d returned for DIOCGMEDIASIZE ioctl "
+ "on %s!", __func__, error, be_lun->dev_path);
+ return (error);
+ }
+
+ return (0);
+
+}
+
+
+static int
+ctl_be_block_close(struct ctl_be_block_lun *be_lun)
+{
+ DROP_GIANT();
+ if (be_lun->vn) {
+ int flags = FREAD | FWRITE;
+ int vfs_is_locked = 0;
+
+ switch (be_lun->dev_type) {
+ case CTL_BE_BLOCK_DEV:
+ if (be_lun->backend.dev.csw) {
+ dev_relthread(be_lun->backend.dev.cdev,
+ be_lun->backend.dev.dev_ref);
+ be_lun->backend.dev.csw = NULL;
+ be_lun->backend.dev.cdev = NULL;
+ }
+ break;
+ case CTL_BE_BLOCK_FILE:
+ vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
+ break;
+ case CTL_BE_BLOCK_NONE:
+ default:
+ panic("Unexpected backend type.");
+ break;
+ }
+
+ (void)vn_close(be_lun->vn, flags, NOCRED, curthread);
+ be_lun->vn = NULL;
+
+ switch (be_lun->dev_type) {
+ case CTL_BE_BLOCK_DEV:
+ break;
+ case CTL_BE_BLOCK_FILE:
+ VFS_UNLOCK_GIANT(vfs_is_locked);
+ if (be_lun->backend.file.cred != NULL) {
+ crfree(be_lun->backend.file.cred);
+ be_lun->backend.file.cred = NULL;
+ }
+ break;
+ case CTL_BE_BLOCK_NONE:
+ default:
+ panic("Unexpected backend type.");
+ break;
+ }
+ }
+ PICKUP_GIANT();
+
+ return (0);
+}
+
+static int
+ctl_be_block_open(struct ctl_be_block_softc *softc,
+ struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
+{
+ struct nameidata nd;
+ int flags;
+ int error;
+ int vfs_is_locked;
+
+ /*
+ * XXX KDM allow a read-only option?
+ */
+ flags = FREAD | FWRITE;
+ error = 0;
+
+ if (rootvnode == NULL) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: Root filesystem is not mounted", __func__);
+ return (1);
+ }
+
+ if (!curthread->td_proc->p_fd->fd_cdir) {
+ curthread->td_proc->p_fd->fd_cdir = rootvnode;
+ VREF(rootvnode);
+ }
+ if (!curthread->td_proc->p_fd->fd_rdir) {
+ curthread->td_proc->p_fd->fd_rdir = rootvnode;
+ VREF(rootvnode);
+ }
+ if (!curthread->td_proc->p_fd->fd_jdir) {
+ curthread->td_proc->p_fd->fd_jdir = rootvnode;
+ VREF(rootvnode);
+ }
+
+ again:
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread);
+ error = vn_open(&nd, &flags, 0, NULL);
+ if (error) {
+ /*
+ * This is the only reasonable guess we can make as far as
+ * path if the user doesn't give us a fully qualified path.
+ * If they want to specify a file, they need to specify the
+ * full path.
+ */
+ if (be_lun->dev_path[0] != '/') {
+ char *dev_path = "/dev/";
+ char *dev_name;
+
+ /* Try adding device path at beginning of name */
+ dev_name = malloc(strlen(be_lun->dev_path)
+ + strlen(dev_path) + 1,
+ M_CTLBLK, M_WAITOK);
+ if (dev_name) {
+ sprintf(dev_name, "%s%s", dev_path,
+ be_lun->dev_path);
+ free(be_lun->dev_path, M_CTLBLK);
+ be_lun->dev_path = dev_name;
+ goto again;
+ }
+ }
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: error opening %s", __func__, be_lun->dev_path);
+ return (error);
+ }
+
+ vfs_is_locked = NDHASGIANT(&nd);
+
+ NDFREE(&nd, NDF_ONLY_PNBUF);
+
+ be_lun->vn = nd.ni_vp;
+
+ /* We only support disks and files. */
+ if (vn_isdisk(be_lun->vn, &error)) {
+ error = ctl_be_block_open_dev(be_lun, req);
+ } else if (be_lun->vn->v_type == VREG) {
+ error = ctl_be_block_open_file(be_lun, req);
+ } else {
+ error = EINVAL;
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s is not a disk or file", be_lun->dev_path);
+ }
+ VOP_UNLOCK(be_lun->vn, 0);
+ VFS_UNLOCK_GIANT(vfs_is_locked);
+
+ if (error != 0) {
+ ctl_be_block_close(be_lun);
+ return (error);
+ }
+
+ be_lun->blocksize_shift = fls(be_lun->blocksize) - 1;
+ be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift;
+
+ return (0);
+
+}
+
+static int
+ctl_be_block_mem_ctor(void *mem, int size, void *arg, int flags)
+{
+ return (0);
+}
+
+static void
+ctl_be_block_mem_dtor(void *mem, int size, void *arg)
+{
+ bzero(mem, size);
+}
+
+static int
+ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
+{
+ struct ctl_be_block_lun *be_lun;
+ struct ctl_lun_create_params *params;
+ struct ctl_be_arg *file_arg;
+ char tmpstr[32];
+ int retval, num_threads;
+ int i;
+
+ params = &req->reqdata.create;
+ retval = 0;
+
+ num_threads = cbb_num_threads;
+
+ file_arg = NULL;
+
+ be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK);
+
+ if (be_lun == NULL) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: error allocating %zd bytes", __func__,
+ sizeof(*be_lun));
+ goto bailout_error;
+ }
+
+ be_lun->softc = softc;
+ STAILQ_INIT(&be_lun->input_queue);
+ STAILQ_INIT(&be_lun->config_write_queue);
+ STAILQ_INIT(&be_lun->datamove_queue);
+ sprintf(be_lun->lunname, "cblk%d", softc->num_luns);
+ mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF);
+
+ be_lun->lun_zone = uma_zcreate(be_lun->lunname, MAXPHYS,
+ ctl_be_block_mem_ctor, ctl_be_block_mem_dtor, NULL, NULL,
+ /*align*/ 0, /*flags*/0);
+
+ if (be_lun->lun_zone == NULL) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: error allocating UMA zone", __func__);
+ goto bailout_error;
+ }
+
+ if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
+ be_lun->ctl_be_lun.lun_type = params->device_type;
+ else
+ be_lun->ctl_be_lun.lun_type = T_DIRECT;
+
+ if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
+ for (i = 0; i < req->num_be_args; i++) {
+ if (strcmp(req->kern_be_args[i].name, "file") == 0) {
+ file_arg = &req->kern_be_args[i];
+ break;
+ }
+ }
+
+ if (file_arg == NULL) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: no file argument specified", __func__);
+ goto bailout_error;
+ }
+
+ be_lun->dev_path = malloc(file_arg->vallen, M_CTLBLK,
+ M_WAITOK | M_ZERO);
+ if (be_lun->dev_path == NULL) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: error allocating %d bytes", __func__,
+ file_arg->vallen);
+ goto bailout_error;
+ }
+
+ strlcpy(be_lun->dev_path, (char *)file_arg->value,
+ file_arg->vallen);
+
+ retval = ctl_be_block_open(softc, be_lun, req);
+ if (retval != 0) {
+ retval = 0;
+ goto bailout_error;
+ }
+
+ /*
+ * Tell the user the size of the file/device.
+ */
+ params->lun_size_bytes = be_lun->size_bytes;
+
+ /*
+ * The maximum LBA is the size - 1.
+ */
+ be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
+ } else {
+ /*
+ * For processor devices, we don't have any size.
+ */
+ be_lun->blocksize = 0;
+ be_lun->size_blocks = 0;
+ be_lun->size_bytes = 0;
+ be_lun->ctl_be_lun.maxlba = 0;
+ params->lun_size_bytes = 0;
+
+ /*
+ * Default to just 1 thread for processor devices.
+ */
+ num_threads = 1;
+ }
+
+ /*
+ * XXX This searching loop might be refactored to be combined with
+ * the loop above,
+ */
+ for (i = 0; i < req->num_be_args; i++) {
+ if (strcmp(req->kern_be_args[i].name, "num_threads") == 0) {
+ struct ctl_be_arg *thread_arg;
+ char num_thread_str[16];
+ int tmp_num_threads;
+
+
+ thread_arg = &req->kern_be_args[i];
+
+ strlcpy(num_thread_str, (char *)thread_arg->value,
+ min(thread_arg->vallen,
+ sizeof(num_thread_str)));
+
+ tmp_num_threads = strtol(num_thread_str, NULL, 0);
+
+ /*
+ * We don't let the user specify less than one
+ * thread, but hope he's clueful enough not to
+ * specify 1000 threads.
+ */
+ if (tmp_num_threads < 1) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: invalid number of threads %s",
+ __func__, num_thread_str);
+ goto bailout_error;
+ }
+
+ num_threads = tmp_num_threads;
+ }
+ }
+
+ be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED;
+ be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
+ be_lun->ctl_be_lun.be_lun = be_lun;
+ be_lun->ctl_be_lun.blocksize = be_lun->blocksize;
+ /* Tell the user the blocksize we ended up using */
+ params->blocksize_bytes = be_lun->blocksize;
+ if (params->flags & CTL_LUN_FLAG_ID_REQ) {
+ be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
+ be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
+ } else
+ be_lun->ctl_be_lun.req_lun_id = 0;
+
+ be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown;
+ be_lun->ctl_be_lun.lun_config_status =
+ ctl_be_block_lun_config_status;
+ be_lun->ctl_be_lun.be = &ctl_be_block_driver;
+
+ if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
+ snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
+ softc->num_luns);
+ strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
+ ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
+ sizeof(tmpstr)));
+
+ /* Tell the user what we used for a serial number */
+ strncpy((char *)params->serial_num, tmpstr,
+ ctl_min(sizeof(params->serial_num), sizeof(tmpstr)));
+ } else {
+ strncpy((char *)be_lun->ctl_be_lun.serial_num,
+ params->serial_num,
+ ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
+ sizeof(params->serial_num)));
+ }
+ if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
+ snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
+ strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
+ ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
+ sizeof(tmpstr)));
+
+ /* Tell the user what we used for a device ID */
+ strncpy((char *)params->device_id, tmpstr,
+ ctl_min(sizeof(params->device_id), sizeof(tmpstr)));
+ } else {
+ strncpy((char *)be_lun->ctl_be_lun.device_id,
+ params->device_id,
+ ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
+ sizeof(params->device_id)));
+ }
+
+ TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun);
+
+ be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
+ taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
+
+ if (be_lun->io_taskqueue == NULL) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: Unable to create taskqueue", __func__);
+ goto bailout_error;
+ }
+
+ /*
+ * Note that we start the same number of threads by default for
+ * both the file case and the block device case. For the file
+ * case, we need multiple threads to allow concurrency, because the
+ * vnode interface is designed to be a blocking interface. For the
+ * block device case, ZFS zvols at least will block the caller's
+ * context in many instances, and so we need multiple threads to
+ * overcome that problem. Other block devices don't need as many
+ * threads, but they shouldn't cause too many problems.
+ *
+ * If the user wants to just have a single thread for a block
+ * device, he can specify that when the LUN is created, or change
+ * the tunable/sysctl to alter the default number of threads.
+ */
+ retval = taskqueue_start_threads(&be_lun->io_taskqueue,
+ /*num threads*/num_threads,
+ /*priority*/PWAIT,
+ /*thread name*/
+ "%s taskq", be_lun->lunname);
+
+ if (retval != 0)
+ goto bailout_error;
+
+ be_lun->num_threads = num_threads;
+
+ mtx_lock(&softc->lock);
+ softc->num_luns++;
+ STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
+
+ mtx_unlock(&softc->lock);
+
+ retval = ctl_add_lun(&be_lun->ctl_be_lun);
+ if (retval != 0) {
+ mtx_lock(&softc->lock);
+ STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
+ links);
+ softc->num_luns--;
+ mtx_unlock(&softc->lock);
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: ctl_add_lun() returned error %d, see dmesg for "
+ "details", __func__, retval);
+ retval = 0;
+ goto bailout_error;
+ }
+
+ mtx_lock(&softc->lock);
+
+ /*
+ * Tell the config_status routine that we're waiting so it won't
+ * clean up the LUN in the event of an error.
+ */
+ be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
+
+ while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) {
+ retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
+ if (retval == EINTR)
+ break;
+ }
+ be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
+
+ if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: LUN configuration error, see dmesg for details",
+ __func__);
+ STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
+ links);
+ softc->num_luns--;
+ mtx_unlock(&softc->lock);
+ goto bailout_error;
+ } else {
+ params->req_lun_id = be_lun->ctl_be_lun.lun_id;
+ }
+
+ mtx_unlock(&softc->lock);
+
+ be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id,
+ be_lun->blocksize,
+ DEVSTAT_ALL_SUPPORTED,
+ be_lun->ctl_be_lun.lun_type
+ | DEVSTAT_TYPE_IF_OTHER,
+ DEVSTAT_PRIORITY_OTHER);
+
+
+ req->status = CTL_LUN_OK;
+
+ return (retval);
+
+bailout_error:
+ req->status = CTL_LUN_ERROR;
+
+ ctl_be_block_close(be_lun);
+
+ free(be_lun->dev_path, M_CTLBLK);
+ free(be_lun, M_CTLBLK);
+
+ return (retval);
+}
+
+static int
+ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
+{
+ struct ctl_lun_rm_params *params;
+ struct ctl_be_block_lun *be_lun;
+ int retval;
+
+ params = &req->reqdata.rm;
+
+ mtx_lock(&softc->lock);
+
+ be_lun = NULL;
+
+ STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
+ if (be_lun->ctl_be_lun.lun_id == params->lun_id)
+ break;
+ }
+ mtx_unlock(&softc->lock);
+
+ if (be_lun == NULL) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: LUN %u is not managed by the block backend",
+ __func__, params->lun_id);
+ goto bailout_error;
+ }
+
+ retval = ctl_disable_lun(&be_lun->ctl_be_lun);
+
+ if (retval != 0) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: error %d returned from ctl_disable_lun() for "
+ "LUN %d", __func__, retval, params->lun_id);
+ goto bailout_error;
+
+ }
+
+ retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
+ if (retval != 0) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: error %d returned from ctl_invalidate_lun() for "
+ "LUN %d", __func__, retval, params->lun_id);
+ goto bailout_error;
+ }
+
+ mtx_lock(&softc->lock);
+
+ be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
+
+ while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
+ retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
+ if (retval == EINTR)
+ break;
+ }
+
+ be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
+
+ if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: interrupted waiting for LUN to be freed",
+ __func__);
+ mtx_unlock(&softc->lock);
+ goto bailout_error;
+ }
+
+ STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links);
+
+ softc->num_luns--;
+ mtx_unlock(&softc->lock);
+
+ taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
+
+ taskqueue_free(be_lun->io_taskqueue);
+
+ ctl_be_block_close(be_lun);
+
+ if (be_lun->disk_stats != NULL)
+ devstat_remove_entry(be_lun->disk_stats);
+
+ uma_zdestroy(be_lun->lun_zone);
+
+ free(be_lun->dev_path, M_CTLBLK);
+
+ free(be_lun, M_CTLBLK);
+
+ req->status = CTL_LUN_OK;
+
+ return (0);
+
+bailout_error:
+
+ req->status = CTL_LUN_ERROR;
+
+ return (0);
+}
+
+static void
+ctl_be_block_lun_shutdown(void *be_lun)
+{
+ struct ctl_be_block_lun *lun;
+ struct ctl_be_block_softc *softc;
+
+ lun = (struct ctl_be_block_lun *)be_lun;
+
+ softc = lun->softc;
+
+ mtx_lock(&softc->lock);
+ lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED;
+ if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
+ wakeup(lun);
+ mtx_unlock(&softc->lock);
+
+}
+
+static void
+ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status)
+{
+ struct ctl_be_block_lun *lun;
+ struct ctl_be_block_softc *softc;
+
+ lun = (struct ctl_be_block_lun *)be_lun;
+ softc = lun->softc;
+
+ if (status == CTL_LUN_CONFIG_OK) {
+ mtx_lock(&softc->lock);
+ lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED;
+ if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
+ wakeup(lun);
+ mtx_unlock(&softc->lock);
+
+ /*
+ * We successfully added the LUN, attempt to enable it.
+ */
+ if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
+ printf("%s: ctl_enable_lun() failed!\n", __func__);
+ if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
+ printf("%s: ctl_invalidate_lun() failed!\n",
+ __func__);
+ }
+ }
+
+ return;
+ }
+
+
+ mtx_lock(&softc->lock);
+ lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED;
+ lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR;
+ wakeup(lun);
+ mtx_unlock(&softc->lock);
+}
+
+
+static int
+ctl_be_block_config_write(union ctl_io *io)
+{
+ struct ctl_be_block_lun *be_lun;
+ struct ctl_be_lun *ctl_be_lun;
+ int retval;
+
+ retval = 0;
+
+ DPRINTF("entered\n");
+
+ ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
+ CTL_PRIV_BACKEND_LUN].ptr;
+ be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
+
+ switch (io->scsiio.cdb[0]) {
+ case SYNCHRONIZE_CACHE:
+ case SYNCHRONIZE_CACHE_16:
+ /*
+ * The upper level CTL code will filter out any CDBs with
+ * the immediate bit set and return the proper error.
+ *
+ * We don't really need to worry about what LBA range the
+ * user asked to be synced out. When they issue a sync
+ * cache command, we'll sync out the whole thing.
+ */
+ mtx_lock(&be_lun->lock);
+ STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr,
+ links);
+ mtx_unlock(&be_lun->lock);
+ taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
+ break;
+ case START_STOP_UNIT: {
+ struct scsi_start_stop_unit *cdb;
+
+ cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
+
+ if (cdb->how & SSS_START)
+ retval = ctl_start_lun(ctl_be_lun);
+ else {
+ retval = ctl_stop_lun(ctl_be_lun);
+ /*
+ * XXX KDM Copan-specific offline behavior.
+ * Figure out a reasonable way to port this?
+ */
+#ifdef NEEDTOPORT
+ if ((retval == 0)
+ && (cdb->byte2 & SSS_ONOFFLINE))
+ retval = ctl_lun_offline(ctl_be_lun);
+#endif
+ }
+
+ /*
+ * In general, the above routines should not fail. They
+ * just set state for the LUN. So we've got something
+ * pretty wrong here if we can't start or stop the LUN.
+ */
+ if (retval != 0) {
+ ctl_set_internal_failure(&io->scsiio,
+ /*sks_valid*/ 1,
+ /*retry_count*/ 0xf051);
+ retval = CTL_RETVAL_COMPLETE;
+ } else {
+ ctl_set_success(&io->scsiio);
+ }
+ ctl_config_write_done(io);
+ break;
+ }
+ default:
+ ctl_set_invalid_opcode(&io->scsiio);
+ ctl_config_write_done(io);
+ retval = CTL_RETVAL_COMPLETE;
+ break;
+ }
+
+ return (retval);
+
+}
+
+static int
+ctl_be_block_config_read(union ctl_io *io)
+{
+ return (0);
+}
+
+static int
+ctl_be_block_lun_info(void *be_lun, struct sbuf *sb)
+{
+ struct ctl_be_block_lun *lun;
+ int retval;
+
+ lun = (struct ctl_be_block_lun *)be_lun;
+ retval = 0;
+
+ retval = sbuf_printf(sb, "<num_threads>");
+
+ if (retval != 0)
+ goto bailout;
+
+ retval = sbuf_printf(sb, "%d", lun->num_threads);
+
+ if (retval != 0)
+ goto bailout;
+
+ retval = sbuf_printf(sb, "</num_threads>");
+
+ /*
+ * For processor devices, we don't have a path variable.
+ */
+ if ((retval != 0)
+ || (lun->dev_path == NULL))
+ goto bailout;
+
+ retval = sbuf_printf(sb, "<file>");
+
+ if (retval != 0)
+ goto bailout;
+
+ retval = ctl_sbuf_printf_esc(sb, lun->dev_path);
+
+ if (retval != 0)
+ goto bailout;
+
+ retval = sbuf_printf(sb, "</file>\n");
+
+bailout:
+
+ return (retval);
+}
+
+int
+ctl_be_block_init(void)
+{
+ struct ctl_be_block_softc *softc;
+ int retval;
+
+ softc = &backend_block_softc;
+ retval = 0;
+
+ mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF);
+ STAILQ_INIT(&softc->beio_free_queue);
+ STAILQ_INIT(&softc->disk_list);
+ STAILQ_INIT(&softc->lun_list);
+ ctl_grow_beio(softc, 200);
+
+ return (retval);
+}
diff --git a/sys/cam/ctl/ctl_backend_block.h b/sys/cam/ctl/ctl_backend_block.h
new file mode 100644
index 0000000..e5781d5
--- /dev/null
+++ b/sys/cam/ctl/ctl_backend_block.h
@@ -0,0 +1,72 @@
+/*-
+ * Copyright (c) 2003 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.h#1 $
+ * $FreeBSD$
+ */
+/*
+ * CAM Target Layer driver backend interface for block devices.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#ifndef _CTL_BACKEND_BLOCK_H_
+#define _CTL_BACKEND_BLOCK_H_
+
+struct ctl_block_disk {
+ uint32_t version; /* interface version */
+ uint32_t disknum; /* returned device number */
+ STAILQ_ENTRY(ctl_block_disk) links; /* linked list pointer */
+ char disk_name[MAXPATHLEN]; /* name of this device */
+ int allocated; /* disk is allocated to a LUN */
+ uint64_t size_blocks; /* disk size in blocks */
+ uint64_t size_bytes; /* disk size in bytes */
+};
+
+typedef enum {
+ CTL_BLOCK_DEVLIST_MORE,
+ CTL_BLOCK_DEVLIST_DONE
+} ctl_block_devlist_status;
+
+struct ctl_block_devlist {
+ uint32_t version; /* interface version */
+ uint32_t buf_len; /* passed in, buffer length */
+ uint32_t ctl_disk_size; /* size of adddev, passed in */
+ struct ctl_block_disk *devbuf; /* buffer passed in/filled out*/
+ uint32_t num_bufs; /* number passed out */
+ uint32_t buf_used; /* bytes passed out */
+ uint32_t total_disks; /* number of disks in system */
+ ctl_block_devlist_status status; /* did we get the whole list? */
+};
+
+#define CTL_BLOCK_ADDDEV _IOWR(COPAN_ARRAY_BE_BLOCK, 0x00, struct ctl_block_disk)
+#define CTL_BLOCK_DEVLIST _IOWR(COPAN_ARRAY_BE_BLOCK, 0x01, struct ctl_block_devlist)
+#define CTL_BLOCK_RMDEV _IOW(COPAN_ARRAY_BE_BLOCK, 0x02, struct ctl_block_disk)
+
+#endif /* _CTL_BACKEND_BLOCK_H_ */
diff --git a/sys/cam/ctl/ctl_backend_ramdisk.c b/sys/cam/ctl/ctl_backend_ramdisk.c
new file mode 100644
index 0000000..0790f28
--- /dev/null
+++ b/sys/cam/ctl/ctl_backend_ramdisk.c
@@ -0,0 +1,835 @@
+/*-
+ * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
+ */
+/*
+ * CAM Target Layer backend for a "fake" ramdisk.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/condvar.h>
+#include <sys/types.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/malloc.h>
+#include <sys/time.h>
+#include <sys/queue.h>
+#include <sys/conf.h>
+#include <sys/ioccom.h>
+#include <sys/module.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_frontend_internal.h>
+#include <cam/ctl/ctl_debug.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_error.h>
+
+typedef enum {
+ CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01,
+ CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02,
+ CTL_BE_RAMDISK_LUN_WAITING = 0x04
+} ctl_be_ramdisk_lun_flags;
+
+struct ctl_be_ramdisk_lun {
+ uint64_t size_bytes;
+ uint64_t size_blocks;
+ struct ctl_be_ramdisk_softc *softc;
+ ctl_be_ramdisk_lun_flags flags;
+ STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
+ struct ctl_be_lun ctl_be_lun;
+};
+
+struct ctl_be_ramdisk_softc {
+ struct mtx lock;
+ int rd_size;
+#ifdef CTL_RAMDISK_PAGES
+ uint8_t **ramdisk_pages;
+ int num_pages;
+#else
+ uint8_t *ramdisk_buffer;
+#endif
+ int num_luns;
+ STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
+};
+
+static struct ctl_be_ramdisk_softc rd_softc;
+
+int ctl_backend_ramdisk_init(void);
+void ctl_backend_ramdisk_shutdown(void);
+static int ctl_backend_ramdisk_move_done(union ctl_io *io);
+static int ctl_backend_ramdisk_submit(union ctl_io *io);
+static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
+ caddr_t addr, int flag, struct thread *td);
+static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
+ struct ctl_lun_req *req);
+static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
+ struct ctl_lun_req *req, int do_wait);
+static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
+static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
+ ctl_lun_config_status status);
+static int ctl_backend_ramdisk_config_write(union ctl_io *io);
+static int ctl_backend_ramdisk_config_read(union ctl_io *io);
+
+static struct ctl_backend_driver ctl_be_ramdisk_driver =
+{
+ name: "ramdisk",
+ flags: CTL_BE_FLAG_HAS_CONFIG,
+ init: ctl_backend_ramdisk_init,
+ data_submit: ctl_backend_ramdisk_submit,
+ data_move_done: ctl_backend_ramdisk_move_done,
+ config_read: ctl_backend_ramdisk_config_read,
+ config_write: ctl_backend_ramdisk_config_write,
+ ioctl: ctl_backend_ramdisk_ioctl
+};
+
+MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
+CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
+
+int
+ctl_backend_ramdisk_init(void)
+{
+ struct ctl_be_ramdisk_softc *softc;
+#ifdef CTL_RAMDISK_PAGES
+ int i, j;
+#endif
+
+
+ softc = &rd_softc;
+
+ memset(softc, 0, sizeof(*softc));
+
+ mtx_init(&softc->lock, "ramdisk", NULL, MTX_DEF);
+
+ STAILQ_INIT(&softc->lun_list);
+ softc->rd_size = 4 * 1024 * 1024;
+#ifdef CTL_RAMDISK_PAGES
+ softc->num_pages = softc->rd_size / PAGE_SIZE;
+ softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
+ softc->num_pages, M_RAMDISK,
+ M_WAITOK);
+ for (i = 0; i < softc->num_pages; i++) {
+ softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK);
+ if (softc->ramdisk_pages[i] == NULL) {
+ for (j = 0; j < i; j++) {
+ free(softc->ramdisk_pages[j], M_RAMDISK);
+ }
+ free(softc->ramdisk_pages, M_RAMDISK);
+ panic("RAMDisk initialization failed\n");
+ return (1); /* NOTREACHED */
+ }
+ }
+#else
+ softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK,
+ M_WAITOK);
+#endif
+
+ return (0);
+}
+
+void
+ctl_backend_ramdisk_shutdown(void)
+{
+ struct ctl_be_ramdisk_softc *softc;
+ struct ctl_be_ramdisk_lun *lun, *next_lun;
+#ifdef CTL_RAMDISK_PAGES
+ int i;
+#endif
+
+ softc = &rd_softc;
+
+ mtx_lock(&softc->lock);
+ for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
+ /*
+ * Grab the next LUN. The current LUN may get removed by
+ * ctl_invalidate_lun(), which will call our LUN shutdown
+ * routine, if there is no outstanding I/O for this LUN.
+ */
+ next_lun = STAILQ_NEXT(lun, links);
+
+ /*
+ * Drop our lock here. Since ctl_invalidate_lun() can call
+ * back into us, this could potentially lead to a recursive
+ * lock of the same mutex, which would cause a hang.
+ */
+ mtx_unlock(&softc->lock);
+ ctl_disable_lun(&lun->ctl_be_lun);
+ ctl_invalidate_lun(&lun->ctl_be_lun);
+ mtx_lock(&softc->lock);
+ }
+ mtx_unlock(&softc->lock);
+
+#ifdef CTL_RAMDISK_PAGES
+ for (i = 0; i < softc->num_pages; i++)
+ free(softc->ramdisk_pages[i], M_RAMDISK);
+
+ free(softc->ramdisk_pages, M_RAMDISK);
+#else
+ free(softc->ramdisk_buffer, M_RAMDISK);
+#endif
+
+ if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) {
+ printf("ctl_backend_ramdisk_shutdown: "
+ "ctl_backend_deregister() failed!\n");
+ }
+}
+
+static int
+ctl_backend_ramdisk_move_done(union ctl_io *io)
+{
+#ifdef CTL_TIME_IO
+ struct bintime cur_bt;
+#endif
+
+ CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
+ if ((io->io_hdr.port_status == 0)
+ && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
+ && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
+ io->io_hdr.status = CTL_SUCCESS;
+ else if ((io->io_hdr.port_status != 0)
+ && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
+ && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){
+ /*
+ * For hardware error sense keys, the sense key
+ * specific value is defined to be a retry count,
+ * but we use it to pass back an internal FETD
+ * error code. XXX KDM Hopefully the FETD is only
+ * using 16 bits for an error code, since that's
+ * all the space we have in the sks field.
+ */
+ ctl_set_internal_failure(&io->scsiio,
+ /*sks_valid*/ 1,
+ /*retry_count*/
+ io->io_hdr.port_status);
+ }
+#ifdef CTL_TIME_IO
+ getbintime(&cur_bt);
+ bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
+ bintime_add(&io->io_hdr.dma_bt, &cur_bt);
+ io->io_hdr.num_dmas++;
+#endif
+
+ if (io->scsiio.kern_sg_entries > 0)
+ free(io->scsiio.kern_data_ptr, M_RAMDISK);
+ ctl_done(io);
+ return(0);
+}
+
+static int
+ctl_backend_ramdisk_submit(union ctl_io *io)
+{
+ struct ctl_lba_len lbalen;
+#ifdef CTL_RAMDISK_PAGES
+ struct ctl_sg_entry *sg_entries;
+ int len_filled;
+ int i;
+#endif
+ int num_sg_entries, len;
+ struct ctl_be_ramdisk_softc *softc;
+ struct ctl_be_lun *ctl_be_lun;
+ struct ctl_be_ramdisk_lun *be_lun;
+
+ softc = &rd_softc;
+
+ ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
+ CTL_PRIV_BACKEND_LUN].ptr;
+ be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
+
+ memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
+ sizeof(lbalen));
+
+ len = lbalen.len * ctl_be_lun->blocksize;
+
+ /*
+ * Kick out the request if it's bigger than we can handle.
+ */
+ if (len > softc->rd_size) {
+ ctl_set_internal_failure(&io->scsiio,
+ /*sks_valid*/ 0,
+ /*retry_count*/ 0);
+ ctl_done(io);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * Kick out the request if it's larger than the device size that
+ * the user requested.
+ */
+ if (((lbalen.lba * ctl_be_lun->blocksize) + len) > be_lun->size_bytes) {
+ ctl_set_lba_out_of_range(&io->scsiio);
+ ctl_done(io);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+#ifdef CTL_RAMDISK_PAGES
+ num_sg_entries = len >> PAGE_SHIFT;
+ if ((len & (PAGE_SIZE - 1)) != 0)
+ num_sg_entries++;
+
+ if (num_sg_entries > 1) {
+ io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
+ num_sg_entries, M_RAMDISK,
+ M_WAITOK);
+ if (io->scsiio.kern_data_ptr == NULL) {
+ ctl_set_internal_failure(&io->scsiio,
+ /*sks_valid*/ 0,
+ /*retry_count*/ 0);
+ ctl_done(io);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
+ for (i = 0, len_filled = 0; i < num_sg_entries;
+ i++, len_filled += PAGE_SIZE) {
+ sg_entries[i].addr = softc->ramdisk_pages[i];
+ sg_entries[i].len = ctl_min(PAGE_SIZE,
+ len - len_filled);
+ }
+ } else {
+#endif /* CTL_RAMDISK_PAGES */
+ /*
+ * If this is less than 1 page, don't bother allocating a
+ * scatter/gather list for it. This saves time/overhead.
+ */
+ num_sg_entries = 0;
+#ifdef CTL_RAMDISK_PAGES
+ io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
+#else
+ io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
+#endif
+#ifdef CTL_RAMDISK_PAGES
+ }
+#endif
+
+ io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
+ io->scsiio.kern_data_len = len;
+ io->scsiio.kern_total_len = len;
+ io->scsiio.kern_rel_offset = 0;
+ io->scsiio.kern_data_resid = 0;
+ io->scsiio.kern_sg_entries = num_sg_entries;
+ io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
+#ifdef CTL_TIME_IO
+ getbintime(&io->io_hdr.dma_start_bt);
+#endif
+ ctl_datamove(io);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
+ int flag, struct thread *td)
+{
+ struct ctl_be_ramdisk_softc *softc;
+ int retval;
+
+ retval = 0;
+ softc = &rd_softc;
+
+ switch (cmd) {
+ case CTL_LUN_REQ: {
+ struct ctl_lun_req *lun_req;
+
+ lun_req = (struct ctl_lun_req *)addr;
+
+ switch (lun_req->reqtype) {
+ case CTL_LUNREQ_CREATE:
+ retval = ctl_backend_ramdisk_create(softc, lun_req,
+ /*do_wait*/ 1);
+ break;
+ case CTL_LUNREQ_RM:
+ retval = ctl_backend_ramdisk_rm(softc, lun_req);
+ break;
+ default:
+ lun_req->status = CTL_LUN_ERROR;
+ snprintf(lun_req->error_str, sizeof(lun_req->error_str),
+ "%s: invalid LUN request type %d", __func__,
+ lun_req->reqtype);
+ break;
+ }
+ break;
+ }
+ default:
+ retval = ENOTTY;
+ break;
+ }
+
+ return (retval);
+}
+
+static int
+ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
+ struct ctl_lun_req *req)
+{
+ struct ctl_be_ramdisk_lun *be_lun;
+ struct ctl_lun_rm_params *params;
+ int retval;
+
+
+ retval = 0;
+ params = &req->reqdata.rm;
+
+ be_lun = NULL;
+
+ mtx_lock(&softc->lock);
+
+ STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
+ if (be_lun->ctl_be_lun.lun_id == params->lun_id)
+ break;
+ }
+ mtx_unlock(&softc->lock);
+
+ if (be_lun == NULL) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: LUN %u is not managed by the ramdisk backend",
+ __func__, params->lun_id);
+ goto bailout_error;
+ }
+
+ retval = ctl_disable_lun(&be_lun->ctl_be_lun);
+
+ if (retval != 0) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: error %d returned from ctl_disable_lun() for "
+ "LUN %d", __func__, retval, params->lun_id);
+ goto bailout_error;
+ }
+
+ /*
+ * Set the waiting flag before we invalidate the LUN. Our shutdown
+ * routine can be called any time after we invalidate the LUN,
+ * and can be called from our context.
+ *
+ * This tells the shutdown routine that we're waiting, or we're
+ * going to wait for the shutdown to happen.
+ */
+ mtx_lock(&softc->lock);
+ be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
+ mtx_unlock(&softc->lock);
+
+ retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
+ if (retval != 0) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: error %d returned from ctl_invalidate_lun() for "
+ "LUN %d", __func__, retval, params->lun_id);
+ goto bailout_error;
+ }
+
+ mtx_lock(&softc->lock);
+
+ while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
+ retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
+ if (retval == EINTR)
+ break;
+ }
+ be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
+
+ /*
+ * We only remove this LUN from the list and free it (below) if
+ * retval == 0. If the user interrupted the wait, we just bail out
+ * without actually freeing the LUN. We let the shutdown routine
+ * free the LUN if that happens.
+ */
+ if (retval == 0) {
+ STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
+ links);
+ softc->num_luns--;
+ }
+
+ mtx_unlock(&softc->lock);
+
+ if (retval == 0)
+ free(be_lun, M_RAMDISK);
+
+ req->status = CTL_LUN_OK;
+
+ return (retval);
+
+bailout_error:
+
+ /*
+ * Don't leave the waiting flag set.
+ */
+ mtx_lock(&softc->lock);
+ be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
+ mtx_unlock(&softc->lock);
+
+ req->status = CTL_LUN_ERROR;
+
+ return (0);
+}
+
+static int
+ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
+ struct ctl_lun_req *req, int do_wait)
+{
+ struct ctl_be_ramdisk_lun *be_lun;
+ struct ctl_lun_create_params *params;
+ uint32_t blocksize;
+ char tmpstr[32];
+ int retval;
+
+ retval = 0;
+ params = &req->reqdata.create;
+ if (params->blocksize_bytes != 0)
+ blocksize = params->blocksize_bytes;
+ else
+ blocksize = 512;
+
+ be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | (do_wait ?
+ M_WAITOK : M_NOWAIT));
+
+ if (be_lun == NULL) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: error allocating %zd bytes", __func__,
+ sizeof(*be_lun));
+ goto bailout_error;
+ }
+
+ if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
+ be_lun->ctl_be_lun.lun_type = params->device_type;
+ else
+ be_lun->ctl_be_lun.lun_type = T_DIRECT;
+
+ if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
+
+ if (params->lun_size_bytes < blocksize) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: LUN size %ju < blocksize %u", __func__,
+ params->lun_size_bytes, blocksize);
+ goto bailout_error;
+ }
+
+ be_lun->size_blocks = params->lun_size_bytes / blocksize;
+ be_lun->size_bytes = be_lun->size_blocks * blocksize;
+
+ be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
+ } else {
+ be_lun->ctl_be_lun.maxlba = 0;
+ blocksize = 0;
+ be_lun->size_bytes = 0;
+ be_lun->size_blocks = 0;
+ }
+
+ be_lun->ctl_be_lun.blocksize = blocksize;
+
+ /* Tell the user the blocksize we ended up using */
+ params->blocksize_bytes = blocksize;
+
+ /* Tell the user the exact size we ended up using */
+ params->lun_size_bytes = be_lun->size_bytes;
+
+ be_lun->softc = softc;
+
+ be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
+ be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
+ be_lun->ctl_be_lun.be_lun = be_lun;
+
+ if (params->flags & CTL_LUN_FLAG_ID_REQ) {
+ be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
+ be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
+ } else
+ be_lun->ctl_be_lun.req_lun_id = 0;
+
+ be_lun->ctl_be_lun.lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
+ be_lun->ctl_be_lun.lun_config_status =
+ ctl_backend_ramdisk_lun_config_status;
+ be_lun->ctl_be_lun.be = &ctl_be_ramdisk_driver;
+ if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
+ snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
+ softc->num_luns);
+ strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
+ ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
+ sizeof(tmpstr)));
+
+ /* Tell the user what we used for a serial number */
+ strncpy((char *)params->serial_num, tmpstr,
+ ctl_min(sizeof(params->serial_num), sizeof(tmpstr)));
+ } else {
+ strncpy((char *)be_lun->ctl_be_lun.serial_num,
+ params->serial_num,
+ ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
+ sizeof(params->serial_num)));
+ }
+ if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
+ snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
+ strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
+ ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
+ sizeof(tmpstr)));
+
+ /* Tell the user what we used for a device ID */
+ strncpy((char *)params->device_id, tmpstr,
+ ctl_min(sizeof(params->device_id), sizeof(tmpstr)));
+ } else {
+ strncpy((char *)be_lun->ctl_be_lun.device_id,
+ params->device_id,
+ ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
+ sizeof(params->device_id)));
+ }
+
+ mtx_lock(&softc->lock);
+ softc->num_luns++;
+ STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
+
+ mtx_unlock(&softc->lock);
+
+ retval = ctl_add_lun(&be_lun->ctl_be_lun);
+ if (retval != 0) {
+ mtx_lock(&softc->lock);
+ STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
+ links);
+ softc->num_luns--;
+ mtx_unlock(&softc->lock);
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: ctl_add_lun() returned error %d, see dmesg for "
+ "details", __func__, retval);
+ retval = 0;
+ goto bailout_error;
+ }
+
+ if (do_wait == 0)
+ return (retval);
+
+ mtx_lock(&softc->lock);
+
+ /*
+ * Tell the config_status routine that we're waiting so it won't
+ * clean up the LUN in the event of an error.
+ */
+ be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
+
+ while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
+ retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
+ if (retval == EINTR)
+ break;
+ }
+ be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
+
+ if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: LUN configuration error, see dmesg for details",
+ __func__);
+ STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
+ links);
+ softc->num_luns--;
+ mtx_unlock(&softc->lock);
+ goto bailout_error;
+ } else {
+ params->req_lun_id = be_lun->ctl_be_lun.lun_id;
+ }
+ mtx_unlock(&softc->lock);
+
+ req->status = CTL_LUN_OK;
+
+ return (retval);
+
+bailout_error:
+ req->status = CTL_LUN_ERROR;
+ free(be_lun, M_RAMDISK);
+
+ return (retval);
+}
+
+static void
+ctl_backend_ramdisk_lun_shutdown(void *be_lun)
+{
+ struct ctl_be_ramdisk_lun *lun;
+ struct ctl_be_ramdisk_softc *softc;
+ int do_free;
+
+ lun = (struct ctl_be_ramdisk_lun *)be_lun;
+ softc = lun->softc;
+ do_free = 0;
+
+ mtx_lock(&softc->lock);
+
+ lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
+
+ if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
+ wakeup(lun);
+ } else {
+ STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
+ links);
+ softc->num_luns--;
+ do_free = 1;
+ }
+
+ mtx_unlock(&softc->lock);
+
+ if (do_free != 0)
+ free(be_lun, M_RAMDISK);
+}
+
+static void
+ctl_backend_ramdisk_lun_config_status(void *be_lun,
+ ctl_lun_config_status status)
+{
+ struct ctl_be_ramdisk_lun *lun;
+ struct ctl_be_ramdisk_softc *softc;
+
+ lun = (struct ctl_be_ramdisk_lun *)be_lun;
+ softc = lun->softc;
+
+ if (status == CTL_LUN_CONFIG_OK) {
+ mtx_lock(&softc->lock);
+ lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
+ if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
+ wakeup(lun);
+ mtx_unlock(&softc->lock);
+
+ /*
+ * We successfully added the LUN, attempt to enable it.
+ */
+ if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
+ printf("%s: ctl_enable_lun() failed!\n", __func__);
+ if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
+ printf("%s: ctl_invalidate_lun() failed!\n",
+ __func__);
+ }
+ }
+
+ return;
+ }
+
+
+ mtx_lock(&softc->lock);
+ lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
+
+ /*
+ * If we have a user waiting, let him handle the cleanup. If not,
+ * clean things up here.
+ */
+ if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
+ lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR;
+ wakeup(lun);
+ } else {
+ STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
+ links);
+ softc->num_luns--;
+ free(lun, M_RAMDISK);
+ }
+ mtx_unlock(&softc->lock);
+}
+
+static int
+ctl_backend_ramdisk_config_write(union ctl_io *io)
+{
+ struct ctl_be_ramdisk_softc *softc;
+ int retval;
+
+ retval = 0;
+ softc = &rd_softc;
+
+ switch (io->scsiio.cdb[0]) {
+ case SYNCHRONIZE_CACHE:
+ case SYNCHRONIZE_CACHE_16:
+ /*
+ * The upper level CTL code will filter out any CDBs with
+ * the immediate bit set and return the proper error. It
+ * will also not allow a sync cache command to go to a LUN
+ * that is powered down.
+ *
+ * We don't really need to worry about what LBA range the
+ * user asked to be synced out. When they issue a sync
+ * cache command, we'll sync out the whole thing.
+ *
+ * This is obviously just a stubbed out implementation.
+ * The real implementation will be in the RAIDCore/CTL
+ * interface, and can only really happen when RAIDCore
+ * implements a per-array cache sync.
+ */
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+ break;
+ case START_STOP_UNIT: {
+ struct scsi_start_stop_unit *cdb;
+ struct ctl_be_lun *ctl_be_lun;
+ struct ctl_be_ramdisk_lun *be_lun;
+
+ cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
+
+ ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
+ CTL_PRIV_BACKEND_LUN].ptr;
+ be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
+
+ if (cdb->how & SSS_START)
+ retval = ctl_start_lun(ctl_be_lun);
+ else {
+ retval = ctl_stop_lun(ctl_be_lun);
+#ifdef NEEDTOPORT
+ if ((retval == 0)
+ && (cdb->byte2 & SSS_ONOFFLINE))
+ retval = ctl_lun_offline(ctl_be_lun);
+#endif
+ }
+
+ /*
+ * In general, the above routines should not fail. They
+ * just set state for the LUN. So we've got something
+ * pretty wrong here if we can't start or stop the LUN.
+ */
+ if (retval != 0) {
+ ctl_set_internal_failure(&io->scsiio,
+ /*sks_valid*/ 1,
+ /*retry_count*/ 0xf051);
+ retval = CTL_RETVAL_COMPLETE;
+ } else {
+ ctl_set_success(&io->scsiio);
+ }
+ ctl_config_write_done(io);
+ break;
+ }
+ default:
+ ctl_set_invalid_opcode(&io->scsiio);
+ ctl_config_write_done(io);
+ retval = CTL_RETVAL_COMPLETE;
+ break;
+ }
+
+ return (retval);
+}
+
+static int
+ctl_backend_ramdisk_config_read(union ctl_io *io)
+{
+ /*
+ * XXX KDM need to implement!!
+ */
+ return (0);
+}
diff --git a/sys/cam/ctl/ctl_cmd_table.c b/sys/cam/ctl/ctl_cmd_table.c
new file mode 100644
index 0000000..acd8fec
--- /dev/null
+++ b/sys/cam/ctl/ctl_cmd_table.c
@@ -0,0 +1,984 @@
+/*-
+ * Copyright (c) 2003, 2004, 2005, 2009 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_cmd_table.c#4 $
+ * $FreeBSD$
+ */
+/*
+ * CAM Target Layer command table.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>, Kim Le
+ */
+
+#include <sys/cdefs.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/condvar.h>
+#include <sys/queue.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_frontend_internal.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
+
+/*
+ * Whenever support for a new command is added, it should be added to this
+ * table.
+ */
+struct ctl_cmd_entry ctl_cmd_table[] =
+{
+/* 00 TEST UNIT READY */
+{ctl_tur, CTL_SERIDX_TUR, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_FLAG_DATA_NONE |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_TUR},
+
+/* 01 REWIND */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 02 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 03 REQUEST SENSE */
+{ctl_request_sense, CTL_SERIDX_RQ_SNS, CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_OK_ON_ALL_LUNS |
+ CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_NO_SENSE |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_OFFLINE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE},
+
+/* 04 FORMAT UNIT */
+{ctl_format, CTL_SERIDX_FORMAT, CTL_CMD_FLAG_OK_ON_SLUN |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE},
+
+/* 05 READ BLOCK LIMITS */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 06 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 07 REASSIGN BLOCKS */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 08 READ(6) */
+{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE},
+
+/* 09 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0A WRITE(6) */
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN |
+ CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+
+/* 0B SEEK(6) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0C */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0D */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0E */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0F READ REVERSE(6) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 10 WRITE FILEMARKS(6) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 11 SPACE(6) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 12 INQUIRY */
+{ctl_inquiry, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_ALL_LUNS |
+ CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_NO_SENSE |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_OFFLINE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE},
+
+/* 13 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 14 RECOVER BUFFERED DATA */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 15 MODE SELECT(6) */
+{ctl_mode_select, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE},
+
+/* 16 RESERVE(6) */
+{ctl_scsi_reserve, CTL_SERIDX_RESV, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE},
+
+/* 17 RELEASE(6) */
+{ctl_scsi_release, CTL_SERIDX_REL, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_NONE,
+ CTL_LUN_PAT_NONE},
+
+/* 18 COPY */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 19 ERASE(6) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1A MODE SENSE(6) */
+{ctl_mode_sense, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE},
+
+/* 1B START STOP UNIT */
+{ctl_start_stop, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_SLUN |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_OFFLINE |
+ CTL_FLAG_DATA_NONE |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE},
+
+/* 1C RECEIVE DIAGNOSTIC RESULTS */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1D SEND DIAGNOSTIC */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1E PREVENT ALLOW MEDIUM REMOVAL */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1F */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 20 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 21 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 22 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 23 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 24 SET WINDOW */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 25 READ CAPACITY(10) */
+{ctl_read_capacity, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_SLUN|
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_READCAP},
+
+/* 26 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 27 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 28 READ(10) */
+{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE},
+
+/* 29 READ GENERATION */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 2A WRITE(10) */
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+
+/* 2B SEEK(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 2C ERASE(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 2D READ UPDATED BLOCK */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 2E WRITE AND VERIFY(10) */
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+
+/* 2F VERIFY(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 30 SEARCH DATA HIGH(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 31 SEARCH DATA EQUAL(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 32 SEARCH DATA LOW(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 33 SET LIMITS(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 34 PRE-FETCH(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 35 SYNCHRONIZE CACHE(10) */
+{ctl_sync_cache, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_SLUN |
+ CTL_FLAG_DATA_NONE,
+ CTL_LUN_PAT_NONE},
+
+/* 36 LOCK UNLOCK CACHE(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 37 READ DEFECT DATA(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 38 MEDIUM SCAN */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 39 COMPARE */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 3A COPY AND VERIFY */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 3B WRITE BUFFER */
+{ctl_write_buffer, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_PROC |
+ CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE},
+
+/* 3C READ BUFFER */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 3D UPDATE BLOCK */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 3E READ LONG */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 3F WRITE LONG */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 40 CHANGE DEFINITION */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 41 WRITE SAME(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 42 READ SUB-CHANNEL */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 43 READ TOC/PMA/ATIP */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 44 REPORT DENSITY SUPPORT */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 45 PLAY AUDIO(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 46 GET CONFIGURATION */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 47 PLAY AUDIO MSF */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 48 PLAY AUDIO TRACK INDEX */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 49 PLAY TRACK RELATIVE(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 4A GET EVENT STATUS NOTIFICATION */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 4B PAUSE/RESUME */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 4C LOG SELECT */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 4D LOG SENSE */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 4E STOP PLAY/SCAN */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 4F */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 50 XDWRITE(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 51 XPWRITE(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 52 XDREAD(10) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 53 RESERVE TRACK */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 54 SEND OPC INFORMATION */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 55 MODE SELECT(10) */
+{ctl_mode_select, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE},
+
+/* 56 RESERVE(10) */
+{ctl_scsi_reserve, CTL_SERIDX_RESV, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE},
+
+/* 57 RELEASE(10) */
+{ctl_scsi_release, CTL_SERIDX_REL, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE},
+
+/* 58 REPAIR TRACK */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 59 READ MASTER CUE */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 5A MODE SENSE(10) */
+{ctl_mode_sense, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE},
+
+/* 5B CLOSE TRACK/SESSION */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 5C READ BUFFER CAPACITY */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 5D SEND CUE SHEET */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 5E PERSISTENT RESERVE IN */
+{ctl_persistent_reserve_in, CTL_SERIDX_PRES_IN, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE},
+
+//{ctl_persistent_reserve_in, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
+
+/* 5F PERSISTENT RESERVE OUT */
+{ctl_persistent_reserve_out, CTL_SERIDX_PRES_OUT, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE|
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_OUT |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE},
+
+//{ctl_persistent_reserve_out, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
+
+/* 60 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 61 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 62 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 63 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 64 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 65 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 66 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 67 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 68 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 69 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 6A */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 6B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 6C */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 6D */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 6E */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 6F */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 70 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 71 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 72 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 73 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 74 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 75 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 76 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 77 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 78 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 79 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 7A */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 7B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 7C */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 7D */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 7E */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 7F */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 80 XDWRITE EXTENDED(16) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 81 REBUILD(16) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 82 REGENERATE(16) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 83 EXTENDED COPY */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 84 RECEIVE COPY RESULTS */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 85 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 86 ACCESS CONTROL IN */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 87 ACCESS CONTROL OUT */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 88 READ(16) */
+{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN | CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE},
+
+/* 89 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 8A WRITE(16) */
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+
+/* 8B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 8C READ ATTRIBUTE */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 8D WRITE ATTRIBUTE */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 8E WRITE AND VERIFY(16) */
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+
+/* 8F VERIFY(16) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 90 PRE-FETCH(16) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 91 SYNCHRONIZE CACHE(16) */
+{ctl_sync_cache, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_SLUN |
+ CTL_FLAG_DATA_NONE,
+ CTL_LUN_PAT_NONE},
+
+/* 92 LOCK UNLOCK CACHE(16) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 93 WRITE SAME(16) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 94 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 95 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 96 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 97 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 98 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 99 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 9A */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 9B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 9C */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 9D */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 9E SERVICE ACTION IN(16) */
+/* XXX KDM not all service actions will be read capacity!! */
+{ctl_service_action_in, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_SLUN |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_READCAP},
+
+/* 9F SERVICE ACTION OUT(16) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* A0 REPORT LUNS */
+{ctl_report_luns, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_ALL_LUNS |
+ CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_NO_SENSE |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_OFFLINE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE},
+
+/* A1 BLANK */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* A2 SEND EVENT */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* A3 MAINTENANCE (IN) Service Action - (0A) REPORT TARGET PORT GROUP */
+{ctl_maintenance_in, CTL_SERIDX_MAIN_IN, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE},
+
+/* A4 MAINTENANCE (OUT) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* A5 MOVE MEDIUM */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* A6 EXCHANGE MEDIUM */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* A7 MOVE MEDIUM ATTACHED */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* A8 READ(12) */
+{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN | CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE},
+
+/* A9 PLAY TRACK RELATIVE(12) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* AA WRITE(12) */
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+
+/* AB SERVICE ACTION IN(12) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* AC ERASE(12) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* AD READ DVD STRUCTURE */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* AE WRITE AND VERIFY(12) */
+{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+
+/* AF VERIFY(12) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* B0 SEARCH DATA HIGH(12) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* B1 SEARCH DATA EQUAL(12) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* B2 SEARCH DATA LOW(12) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* B3 SET LIMITS(12) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* B4 READ ELEMENT STATUS ATTACHED */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* B5 REQUEST VOLUME ELEMENT ADDRESS */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* B6 SEND VOLUME TAG */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* B7 READ DEFECT DATA(12) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* B8 READ ELEMENT STATUS */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* B9 READ CD MSF */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* BA REDUNDANCY GROUP (IN) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* BB REDUNDANCY GROUP (OUT) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* BC SPARE (IN) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* BD SPARE (OUT) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* BE VOLUME SET (IN) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* BF VOLUME SET (OUT) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* C0 - ISC_SEND_MSG_SHORT */
+//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
+{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_NONE,
+ CTL_LUN_PAT_NONE},
+
+/* C1 - ISC_SEND_MSG */
+//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
+{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE},
+
+/* C2 - ISC_WRITE */
+//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
+{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE},
+
+/* C3 - ISC_READ */
+//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
+{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE},
+
+/* C4 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* C5 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* C6 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* C7 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* C8 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* C9 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* CA */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* CB */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* CC */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* CD */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* CE */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* CF */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* D0 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* D1 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* D2 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* D3 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* D4 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* D5 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* D6 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* D7 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* D8 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* D9 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* DA */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* DB */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* DC */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* DD */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* DE */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* DF */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* E0 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* E1 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* E2 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* E3 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* E4 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* E5 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* E6 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* E7 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* E8 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* E9 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* EA */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* EB */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* EC */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* ED */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* EE */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* EF */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* F0 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* F1 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* F2 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* F3 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* F4 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* F5 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* F6 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* F7 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* F8 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* F9 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* FA */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* FB */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* FC */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* FD */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* FE */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* FF */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}
+
+};
diff --git a/sys/cam/ctl/ctl_debug.h b/sys/cam/ctl/ctl_debug.h
new file mode 100644
index 0000000..53f406b
--- /dev/null
+++ b/sys/cam/ctl/ctl_debug.h
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (c) 2003 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_debug.h#2 $
+ * $FreeBSD$
+ */
+/*
+ * CAM Target Layer debugging interface.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#ifndef _CTL_DEBUG_H_
+#define _CTL_DEBUG_H_
+
+#ifdef CAM_CTL_DEBUG
+#define CTL_DEBUG_PRINT(X) \
+ do { \
+ printf("ctl_debug: "); \
+ printf X; \
+ } while (0)
+#else /* CAM_CTL_DEBUG */
+#define CTL_DEBUG_PRINT(X)
+#endif /* CAM_CTL_DEBUG */
+
+#endif /* _CTL_DEBUG_H_ */
diff --git a/sys/cam/ctl/ctl_error.c b/sys/cam/ctl/ctl_error.c
new file mode 100644
index 0000000..a0e1e97
--- /dev/null
+++ b/sys/cam/ctl/ctl_error.c
@@ -0,0 +1,811 @@
+/*-
+ * Copyright (c) 2003-2009 Silicon Graphics International Corp.
+ * Copyright (c) 2011 Spectra Logic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_error.c#2 $
+ */
+/*
+ * CAM Target Layer error reporting routines.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/stddef.h>
+#include <sys/ctype.h>
+#include <machine/stdarg.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_frontend_internal.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_error.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
+
+void
+ctl_set_sense_data_va(struct scsi_sense_data *sense_data, void *lunptr,
+ scsi_sense_data_type sense_format, int current_error,
+ int sense_key, int asc, int ascq, va_list ap)
+{
+ struct ctl_lun *lun;
+
+ lun = (struct ctl_lun *)lunptr;
+
+ /*
+ * Determine whether to return fixed or descriptor format sense
+ * data.
+ */
+ if (sense_format == SSD_TYPE_NONE) {
+ /*
+ * If the format isn't specified, we only return descriptor
+ * sense if the LUN exists and descriptor sense is turned
+ * on for that LUN.
+ */
+ if ((lun != NULL)
+ && (lun->flags & CTL_LUN_SENSE_DESC))
+ sense_format = SSD_TYPE_DESC;
+ else
+ sense_format = SSD_TYPE_FIXED;
+ }
+
+ scsi_set_sense_data_va(sense_data, sense_format, current_error,
+ sense_key, asc, ascq, ap);
+}
+
+void
+ctl_set_sense_data(struct scsi_sense_data *sense_data, void *lunptr,
+ scsi_sense_data_type sense_format, int current_error,
+ int sense_key, int asc, int ascq, ...)
+{
+ va_list ap;
+
+ va_start(ap, ascq);
+ ctl_set_sense_data_va(sense_data, lunptr, sense_format, current_error,
+ sense_key, asc, ascq, ap);
+ va_end(ap);
+}
+
+void
+ctl_set_sense(struct ctl_scsiio *ctsio, int current_error, int sense_key,
+ int asc, int ascq, ...)
+{
+ va_list ap;
+ struct ctl_lun *lun;
+
+ /*
+ * The LUN can't go away until all of the commands have been
+ * completed. Therefore we can safely access the LUN structure and
+ * flags without the lock.
+ */
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ va_start(ap, ascq);
+ ctl_set_sense_data_va(&ctsio->sense_data,
+ lun,
+ SSD_TYPE_NONE,
+ current_error,
+ sense_key,
+ asc,
+ ascq,
+ ap);
+ va_end(ap);
+
+ ctsio->scsi_status = SCSI_STATUS_CHECK_COND;
+ ctsio->sense_len = SSD_FULL_SIZE;
+ ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+}
+
+/*
+ * Transform fixed sense data into descriptor sense data.
+ *
+ * For simplicity's sake, we assume that both sense structures are
+ * SSD_FULL_SIZE. Otherwise, the logic gets more complicated.
+ */
+void
+ctl_sense_to_desc(struct scsi_sense_data_fixed *sense_src,
+ struct scsi_sense_data_desc *sense_dest)
+{
+ struct scsi_sense_stream stream_sense;
+ int current_error;
+ uint8_t stream_bits;
+
+ bzero(sense_dest, sizeof(*sense_dest));
+
+ if ((sense_src->error_code & SSD_ERRCODE) == SSD_DEFERRED_ERROR)
+ current_error = 0;
+ else
+ current_error = 1;
+
+ bzero(&stream_sense, sizeof(stream_sense));
+
+ /*
+ * Check to see whether any of the tape-specific bits are set. If
+ * so, we'll need a stream sense descriptor.
+ */
+ if (sense_src->flags & (SSD_ILI|SSD_EOM|SSD_FILEMARK))
+ stream_bits = sense_src->flags & ~SSD_KEY;
+ else
+ stream_bits = 0;
+
+ /*
+ * Utilize our sense setting routine to do the transform. If a
+ * value is set in the fixed sense data, set it in the descriptor
+ * data. Otherwise, skip it.
+ */
+ ctl_set_sense_data((struct scsi_sense_data *)sense_dest,
+ /*lun*/ NULL,
+ /*sense_format*/ SSD_TYPE_DESC,
+ current_error,
+ /*sense_key*/ sense_src->flags & SSD_KEY,
+ /*asc*/ sense_src->add_sense_code,
+ /*ascq*/ sense_src->add_sense_code_qual,
+
+ /* Information Bytes */
+ (scsi_4btoul(sense_src->info) != 0) ?
+ SSD_ELEM_INFO : SSD_ELEM_SKIP,
+ sizeof(sense_src->info),
+ sense_src->info,
+
+ /* Command specific bytes */
+ (scsi_4btoul(sense_src->cmd_spec_info) != 0) ?
+ SSD_ELEM_COMMAND : SSD_ELEM_SKIP,
+ sizeof(sense_src->cmd_spec_info),
+ sense_src->cmd_spec_info,
+
+ /* FRU */
+ (sense_src->fru != 0) ?
+ SSD_ELEM_FRU : SSD_ELEM_SKIP,
+ sizeof(sense_src->fru),
+ &sense_src->fru,
+
+ /* Sense Key Specific */
+ (sense_src->sense_key_spec[0] & SSD_SCS_VALID) ?
+ SSD_ELEM_SKS : SSD_ELEM_SKIP,
+ sizeof(sense_src->sense_key_spec),
+ sense_src->sense_key_spec,
+
+ /* Tape bits */
+ (stream_bits != 0) ?
+ SSD_ELEM_STREAM : SSD_ELEM_SKIP,
+ sizeof(stream_bits),
+ &stream_bits,
+
+ SSD_ELEM_NONE);
+}
+
+/*
+ * Transform descriptor format sense data into fixed sense data.
+ *
+ * Some data may be lost in translation, because there are descriptors
+ * thant can't be represented as fixed sense data.
+ *
+ * For simplicity's sake, we assume that both sense structures are
+ * SSD_FULL_SIZE. Otherwise, the logic gets more complicated.
+ */
+void
+ctl_sense_to_fixed(struct scsi_sense_data_desc *sense_src,
+ struct scsi_sense_data_fixed *sense_dest)
+{
+ int current_error;
+ uint8_t *info_ptr = NULL, *cmd_ptr = NULL, *fru_ptr = NULL;
+ uint8_t *sks_ptr = NULL, *stream_ptr = NULL;
+ int info_size = 0, cmd_size = 0, fru_size = 0;
+ int sks_size = 0, stream_size = 0;
+ int pos;
+
+ if ((sense_src->error_code & SSD_ERRCODE) == SSD_DESC_CURRENT_ERROR)
+ current_error = 1;
+ else
+ current_error = 0;
+
+ for (pos = 0; pos < (int)(sense_src->extra_len - 1);) {
+ struct scsi_sense_desc_header *header;
+
+ header = (struct scsi_sense_desc_header *)
+ &sense_src->sense_desc[pos];
+
+ /*
+ * See if this record goes past the end of the sense data.
+ * It shouldn't, but check just in case.
+ */
+ if ((pos + header->length + sizeof(*header)) >
+ sense_src->extra_len)
+ break;
+
+ switch (sense_src->sense_desc[pos]) {
+ case SSD_DESC_INFO: {
+ struct scsi_sense_info *info;
+
+ info = (struct scsi_sense_info *)header;
+
+ info_ptr = info->info;
+ info_size = sizeof(info->info);
+
+ pos += info->length +
+ sizeof(struct scsi_sense_desc_header);
+ break;
+ }
+ case SSD_DESC_COMMAND: {
+ struct scsi_sense_command *cmd;
+
+ cmd = (struct scsi_sense_command *)header;
+ cmd_ptr = cmd->command_info;
+ cmd_size = sizeof(cmd->command_info);
+
+ pos += cmd->length +
+ sizeof(struct scsi_sense_desc_header);
+ break;
+ }
+ case SSD_DESC_FRU: {
+ struct scsi_sense_fru *fru;
+
+ fru = (struct scsi_sense_fru *)header;
+ fru_ptr = &fru->fru;
+ fru_size = sizeof(fru->fru);
+ pos += fru->length +
+ sizeof(struct scsi_sense_desc_header);
+ break;
+ }
+ case SSD_DESC_SKS: {
+ struct scsi_sense_sks *sks;
+
+ sks = (struct scsi_sense_sks *)header;
+ sks_ptr = sks->sense_key_spec;
+ sks_size = sizeof(sks->sense_key_spec);
+
+ pos = sks->length +
+ sizeof(struct scsi_sense_desc_header);
+ break;
+ }
+ case SSD_DESC_STREAM: {
+ struct scsi_sense_stream *stream_sense;
+
+ stream_sense = (struct scsi_sense_stream *)header;
+ stream_ptr = &stream_sense->byte3;
+ stream_size = sizeof(stream_sense->byte3);
+ pos = stream_sense->length +
+ sizeof(struct scsi_sense_desc_header);
+ break;
+ }
+ default:
+ /*
+ * We don't recognize this particular sense
+ * descriptor type, so just skip it.
+ */
+ pos += sizeof(*header) + header->length;
+ break;
+ }
+ }
+
+ ctl_set_sense_data((struct scsi_sense_data *)sense_dest,
+ /*lun*/ NULL,
+ /*sense_format*/ SSD_TYPE_FIXED,
+ current_error,
+ /*sense_key*/ sense_src->sense_key & SSD_KEY,
+ /*asc*/ sense_src->add_sense_code,
+ /*ascq*/ sense_src->add_sense_code_qual,
+
+ /* Information Bytes */
+ (info_ptr != NULL) ? SSD_ELEM_INFO : SSD_ELEM_SKIP,
+ info_size,
+ info_ptr,
+
+ /* Command specific bytes */
+ (cmd_ptr != NULL) ? SSD_ELEM_COMMAND : SSD_ELEM_SKIP,
+ cmd_size,
+ cmd_ptr,
+
+ /* FRU */
+ (fru_ptr != NULL) ? SSD_ELEM_FRU : SSD_ELEM_SKIP,
+ fru_size,
+ fru_ptr,
+
+ /* Sense Key Specific */
+ (sks_ptr != NULL) ? SSD_ELEM_SKS : SSD_ELEM_SKIP,
+ sks_size,
+ sks_ptr,
+
+ /* Tape bits */
+ (stream_ptr != NULL) ? SSD_ELEM_STREAM : SSD_ELEM_SKIP,
+ stream_size,
+ stream_ptr,
+
+ SSD_ELEM_NONE);
+}
+
+ctl_sense_format
+ctl_get_sense_format(struct scsi_sense_data *sense_data)
+{
+ switch (sense_data->error_code & SSD_ERRCODE) {
+ case SSD_DESC_CURRENT_ERROR:
+ case SSD_DESC_DEFERRED_ERROR:
+ return (SSD_TYPE_DESC);
+ case SSD_CURRENT_ERROR:
+ case SSD_DEFERRED_ERROR:
+ default:
+ return (SSD_TYPE_FIXED);
+ break;
+ }
+}
+
+void
+ctl_set_ua(struct ctl_scsiio *ctsio, int asc, int ascq)
+{
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_UNIT_ATTENTION,
+ asc,
+ ascq,
+ SSD_ELEM_NONE);
+}
+
+ctl_ua_type
+ctl_build_ua(ctl_ua_type ua_type, struct scsi_sense_data *sense,
+ ctl_sense_format sense_format)
+{
+ ctl_ua_type ua_to_build;
+ int i, asc, ascq;
+
+ if (ua_type == CTL_UA_NONE)
+ return (ua_type);
+
+ ua_to_build = CTL_UA_NONE;
+
+ for (i = 0; i < (sizeof(ua_type) * 8); i++) {
+ if (ua_type & (1 << i)) {
+ ua_to_build = 1 << i;
+ break;
+ }
+ }
+
+ switch (ua_to_build) {
+ case CTL_UA_POWERON:
+ /* 29h/01h POWER ON OCCURRED */
+ asc = 0x29;
+ ascq = 0x01;
+ break;
+ case CTL_UA_BUS_RESET:
+ /* 29h/02h SCSI BUS RESET OCCURRED */
+ asc = 0x29;
+ ascq = 0x02;
+ break;
+ case CTL_UA_TARG_RESET:
+ /* 29h/03h BUS DEVICE RESET FUNCTION OCCURRED*/
+ asc = 0x29;
+ ascq = 0x03;
+ break;
+ case CTL_UA_LUN_RESET:
+ /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
+ /*
+ * Since we don't have a specific ASC/ASCQ pair for a LUN
+ * reset, just return the generic reset code.
+ */
+ asc = 0x29;
+ ascq = 0x00;
+ break;
+ case CTL_UA_LUN_CHANGE:
+ /* 3Fh/0Eh REPORTED LUNS DATA HAS CHANGED */
+ asc = 0x3F;
+ ascq = 0x0E;
+ break;
+ case CTL_UA_MODE_CHANGE:
+ /* 2Ah/01h MODE PARAMETERS CHANGED */
+ asc = 0x2A;
+ ascq = 0x01;
+ break;
+ case CTL_UA_LOG_CHANGE:
+ /* 2Ah/02h LOG PARAMETERS CHANGED */
+ asc = 0x2A;
+ ascq = 0x02;
+ break;
+ case CTL_UA_LVD:
+ /* 29h/06h TRANSCEIVER MODE CHANGED TO LVD */
+ asc = 0x29;
+ ascq = 0x06;
+ break;
+ case CTL_UA_SE:
+ /* 29h/05h TRANSCEIVER MODE CHANGED TO SINGLE-ENDED */
+ asc = 0x29;
+ ascq = 0x05;
+ break;
+ case CTL_UA_RES_PREEMPT:
+ /* 2Ah/03h RESERVATIONS PREEMPTED */
+ asc = 0x2A;
+ ascq = 0x03;
+ break;
+ case CTL_UA_RES_RELEASE:
+ /* 2Ah/04h RESERVATIONS RELEASED */
+ asc = 0x2A;
+ ascq = 0x04;
+ break;
+ case CTL_UA_REG_PREEMPT:
+ /* 2Ah/05h REGISTRATIONS PREEMPTED */
+ asc = 0x2A;
+ ascq = 0x05;
+ break;
+ case CTL_UA_ASYM_ACC_CHANGE:
+ /* 2Ah/06n ASYMMETRIC ACCESS STATE CHANGED */
+ asc = 0x2A;
+ ascq = 0x06;
+ break;
+ default:
+ ua_to_build = CTL_UA_NONE;
+ return (ua_to_build);
+ break; /* NOTREACHED */
+ }
+
+ ctl_set_sense_data(sense,
+ /*lun*/ NULL,
+ sense_format,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_UNIT_ATTENTION,
+ asc,
+ ascq,
+ SSD_ELEM_NONE);
+
+ return (ua_to_build);
+}
+
+void
+ctl_set_overlapped_cmd(struct ctl_scsiio *ctsio)
+{
+ /* OVERLAPPED COMMANDS ATTEMPTED */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x4E,
+ /*ascq*/ 0x00,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_overlapped_tag(struct ctl_scsiio *ctsio, uint8_t tag)
+{
+ /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x4D,
+ /*ascq*/ tag,
+ SSD_ELEM_NONE);
+}
+
+/*
+ * Tell the user that there was a problem with the command or data he sent.
+ */
+void
+ctl_set_invalid_field(struct ctl_scsiio *ctsio, int sks_valid, int command,
+ int field, int bit_valid, int bit)
+{
+ uint8_t sks[3];
+ int asc;
+
+ if (command != 0) {
+ /* "Invalid field in CDB" */
+ asc = 0x24;
+ } else {
+ /* "Invalid field in parameter list" */
+ asc = 0x26;
+ }
+
+ if (sks_valid) {
+ sks[0] = SSD_SCS_VALID;
+ if (command)
+ sks[0] |= SSD_FIELDPTR_CMD;
+ scsi_ulto2b(field, &sks[1]);
+
+ if (bit_valid)
+ sks[0] |= SSD_BITPTR_VALID | bit;
+ }
+
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ asc,
+ /*ascq*/ 0x00,
+ /*type*/ (sks_valid != 0) ? SSD_ELEM_SKS : SSD_ELEM_SKIP,
+ /*size*/ sizeof(sks),
+ /*data*/ sks,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_invalid_opcode(struct ctl_scsiio *ctsio)
+{
+ struct scsi_sense_data *sense;
+ uint8_t sks[3];
+
+ sense = &ctsio->sense_data;
+
+ sks[0] = SSD_SCS_VALID | SSD_FIELDPTR_CMD;
+ scsi_ulto2b(0, &sks[1]);
+
+ /* "Invalid command operation code" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x20,
+ /*ascq*/ 0x00,
+ /*type*/ SSD_ELEM_SKS,
+ /*size*/ sizeof(sks),
+ /*data*/ sks,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_param_len_error(struct ctl_scsiio *ctsio)
+{
+ /* "Parameter list length error" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x1a,
+ /*ascq*/ 0x00,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_already_locked(struct ctl_scsiio *ctsio)
+{
+ /* Vendor unique "Somebody already is locked" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x81,
+ /*ascq*/ 0x00,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_unsupported_lun(struct ctl_scsiio *ctsio)
+{
+ /* "Logical unit not supported" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x25,
+ /*ascq*/ 0x00,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_internal_failure(struct ctl_scsiio *ctsio, int sks_valid,
+ uint16_t retry_count)
+{
+ uint8_t sks[3];
+
+ if (sks_valid) {
+ sks[0] = SSD_SCS_VALID;
+ sks[1] = (retry_count >> 8) & 0xff;
+ sks[2] = retry_count & 0xff;
+ }
+
+ /* "Internal target failure" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
+ /*asc*/ 0x44,
+ /*ascq*/ 0x00,
+ /*type*/ (sks_valid != 0) ? SSD_ELEM_SKS : SSD_ELEM_SKIP,
+ /*size*/ sizeof(sks),
+ /*data*/ sks,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_medium_error(struct ctl_scsiio *ctsio)
+{
+ if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
+ /* "Unrecovered read error" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_MEDIUM_ERROR,
+ /*asc*/ 0x11,
+ /*ascq*/ 0x00,
+ SSD_ELEM_NONE);
+ } else {
+ /* "Write error - auto reallocation failed" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_MEDIUM_ERROR,
+ /*asc*/ 0x0C,
+ /*ascq*/ 0x02,
+ SSD_ELEM_NONE);
+ }
+}
+
+void
+ctl_set_aborted(struct ctl_scsiio *ctsio)
+{
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ABORTED_COMMAND,
+ /*asc*/ 0x45,
+ /*ascq*/ 0x00,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_lba_out_of_range(struct ctl_scsiio *ctsio)
+{
+ /* "Logical block address out of range" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x21,
+ /*ascq*/ 0x00,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_lun_stopped(struct ctl_scsiio *ctsio)
+{
+ /* "Logical unit not ready, initializing cmd. required" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_NOT_READY,
+ /*asc*/ 0x04,
+ /*ascq*/ 0x02,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_lun_not_ready(struct ctl_scsiio *ctsio)
+{
+ /* "Logical unit not ready, manual intervention required" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_NOT_READY,
+ /*asc*/ 0x04,
+ /*ascq*/ 0x05,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_illegal_pr_release(struct ctl_scsiio *ctsio)
+{
+ /* "Invalid release of persistent reservation" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x26,
+ /*ascq*/ 0x04,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_lun_standby(struct ctl_scsiio *ctsio)
+{
+ /* "Logical unit not ready, target port in standby state" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_NOT_READY,
+ /*asc*/ 0x04,
+ /*ascq*/ 0x0b,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_medium_format_corrupted(struct ctl_scsiio *ctsio)
+{
+ /* "Medium format corrupted" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_MEDIUM_ERROR,
+ /*asc*/ 0x31,
+ /*ascq*/ 0x00,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_medium_magazine_inaccessible(struct ctl_scsiio *ctsio)
+{
+ /* "Medium magazine not accessible" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_NOT_READY,
+ /*asc*/ 0x3b,
+ /*ascq*/ 0x11,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_data_phase_error(struct ctl_scsiio *ctsio)
+{
+ /* "Data phase error" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_NOT_READY,
+ /*asc*/ 0x4b,
+ /*ascq*/ 0x00,
+ SSD_ELEM_NONE);
+}
+
+void
+ctl_set_reservation_conflict(struct ctl_scsiio *ctsio)
+{
+ struct scsi_sense_data *sense;
+
+ sense = &ctsio->sense_data;
+ memset(sense, 0, sizeof(*sense));
+ ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
+ ctsio->sense_len = 0;
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+}
+
+void
+ctl_set_queue_full(struct ctl_scsiio *ctsio)
+{
+ struct scsi_sense_data *sense;
+
+ sense = &ctsio->sense_data;
+ memset(sense, 0, sizeof(*sense));
+ ctsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
+ ctsio->sense_len = 0;
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+}
+
+void
+ctl_set_busy(struct ctl_scsiio *ctsio)
+{
+ struct scsi_sense_data *sense;
+
+ sense = &ctsio->sense_data;
+ memset(sense, 0, sizeof(*sense));
+ ctsio->scsi_status = SCSI_STATUS_BUSY;
+ ctsio->sense_len = 0;
+ ctsio->io_hdr.status = CTL_SCSI_ERROR;
+}
+
+void
+ctl_set_success(struct ctl_scsiio *ctsio)
+{
+ struct scsi_sense_data *sense;
+
+ sense = &ctsio->sense_data;
+ memset(sense, 0, sizeof(*sense));
+ ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->sense_len = 0;
+ ctsio->io_hdr.status = CTL_SUCCESS;
+}
diff --git a/sys/cam/ctl/ctl_error.h b/sys/cam/ctl/ctl_error.h
new file mode 100644
index 0000000..6b97463
--- /dev/null
+++ b/sys/cam/ctl/ctl_error.h
@@ -0,0 +1,92 @@
+/*-
+ * Copyright (c) 2003 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_error.h#1 $
+ * $FreeBSD$
+ */
+/*
+ * Function definitions for various error reporting routines used both
+ * within CTL and various CTL clients.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#include <machine/stdarg.h>
+
+#ifndef _CTL_ERROR_H_
+#define _CTL_ERROR_H_
+
+typedef enum {
+ CTL_SENSE_NOT_SPECIFIED,
+ CTL_SENSE_FIXED,
+ CTL_SENSE_DESCRIPTOR
+} ctl_sense_format;
+
+void ctl_set_sense_data_va(struct scsi_sense_data *sense_data, void *lun,
+ scsi_sense_data_type sense_format, int current_error,
+ int sense_key, int asc, int ascq, va_list ap);
+void ctl_set_sense_data(struct scsi_sense_data *sense_data, void *lun,
+ scsi_sense_data_type sense_format, int current_error,
+ int sense_key, int asc, int ascq, ...);
+void ctl_set_sense(struct ctl_scsiio *ctsio, int current_error, int sense_key,
+ int asc, int ascq, ...);
+void ctl_sense_to_desc(struct scsi_sense_data_fixed *sense_src,
+ struct scsi_sense_data_desc *sense_dest);
+void ctl_sense_to_fixed(struct scsi_sense_data_desc *sense_src,
+ struct scsi_sense_data_fixed *sense_dest);
+ctl_sense_format ctl_get_sense_format(struct scsi_sense_data *sense_data);
+void ctl_set_ua(struct ctl_scsiio *ctsio, int asc, int ascq);
+ctl_ua_type ctl_build_ua(ctl_ua_type ua_type, struct scsi_sense_data *sense,
+ ctl_sense_format sense_format);
+void ctl_set_overlapped_cmd(struct ctl_scsiio *ctsio);
+void ctl_set_overlapped_tag(struct ctl_scsiio *ctsio, uint8_t tag);
+void ctl_set_invalid_field(struct ctl_scsiio *ctsio, int sks_valid, int command,
+ int field, int bit_valid, int bit);
+void ctl_set_invalid_opcode(struct ctl_scsiio *ctsio);
+void ctl_set_param_len_error(struct ctl_scsiio *ctsio);
+void ctl_set_already_locked(struct ctl_scsiio *ctsio);
+void ctl_set_unsupported_lun(struct ctl_scsiio *ctsio);
+void ctl_set_lun_standby(struct ctl_scsiio *ctsio);
+void ctl_set_internal_failure(struct ctl_scsiio *ctsio, int sks_valid,
+ uint16_t retry_count);
+void ctl_set_medium_error(struct ctl_scsiio *ctsio);
+void ctl_set_aborted(struct ctl_scsiio *ctsio);
+void ctl_set_lba_out_of_range(struct ctl_scsiio *ctsio);
+void ctl_set_lun_stopped(struct ctl_scsiio *ctsio);
+void ctl_set_lun_not_ready(struct ctl_scsiio *ctsio);
+void ctl_set_illegal_pr_release(struct ctl_scsiio *ctsio);
+void ctl_set_medium_format_corrupted(struct ctl_scsiio *ctsio);
+void ctl_set_medium_magazine_inaccessible(struct ctl_scsiio *ctsio);
+void ctl_set_data_phase_error(struct ctl_scsiio *ctsio);
+void ctl_set_reservation_conflict(struct ctl_scsiio *ctsio);
+void ctl_set_queue_full(struct ctl_scsiio *ctsio);
+void ctl_set_busy(struct ctl_scsiio *ctsio);
+void ctl_set_success(struct ctl_scsiio *ctsio);
+
+#endif /* _CTL_ERROR_H_ */
diff --git a/sys/cam/ctl/ctl_frontend.c b/sys/cam/ctl/ctl_frontend.c
new file mode 100644
index 0000000..ec0ec32
--- /dev/null
+++ b/sys/cam/ctl/ctl_frontend.c
@@ -0,0 +1,187 @@
+/*-
+ * Copyright (c) 2003 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend.c#4 $
+ */
+/*
+ * CAM Target Layer front end interface code
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/endian.h>
+#include <sys/queue.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_frontend_internal.h>
+#include <cam/ctl/ctl_backend.h>
+/* XXX KDM move defines from ctl_ioctl.h to somewhere else */
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
+#include <cam/ctl/ctl_debug.h>
+
+extern struct ctl_softc *control_softc;
+
+int
+ctl_frontend_register(struct ctl_frontend *fe, int master_shelf)
+{
+ struct ctl_io_pool *pool;
+ int port_num;
+ int retval;
+
+ retval = 0;
+
+ KASSERT(control_softc != NULL, ("CTL is not initialized"));
+
+ mtx_lock(&control_softc->ctl_lock);
+ port_num = ctl_ffz(&control_softc->ctl_port_mask, CTL_MAX_PORTS);
+ if ((port_num == -1)
+ || (ctl_set_mask(&control_softc->ctl_port_mask, port_num) == -1)) {
+ fe->targ_port = -1;
+ mtx_unlock(&control_softc->ctl_lock);
+ return (1);
+ }
+ control_softc->num_frontends++;
+
+ mtx_unlock(&control_softc->ctl_lock);
+ /*
+ * We add 20 to whatever the caller requests, so he doesn't get
+ * burned by queueing things back to the pending sense queue. In
+ * theory, there should probably only be one outstanding item, at
+ * most, on the pending sense queue for a LUN. We'll clear the
+ * pending sense queue on the next command, whether or not it is
+ * a REQUEST SENSE.
+ */
+ retval = ctl_pool_create(control_softc,
+ (fe->port_type != CTL_PORT_IOCTL) ?
+ CTL_POOL_FETD : CTL_POOL_IOCTL,
+ fe->num_requested_ctl_io + 20, &pool);
+ if (retval != 0) {
+ fe->targ_port = -1;
+ mtx_lock(&control_softc->ctl_lock);
+ ctl_clear_mask(&control_softc->ctl_port_mask, port_num);
+ mtx_unlock(&control_softc->ctl_lock);
+ return (retval);
+ }
+
+ mtx_lock(&control_softc->ctl_lock);
+
+ /* For now assume master shelf */
+ //fe->targ_port = port_num;
+ fe->targ_port = port_num + (master_shelf!=0 ? 0 : CTL_MAX_PORTS);
+ fe->max_initiators = CTL_MAX_INIT_PER_PORT;
+ STAILQ_INSERT_TAIL(&control_softc->fe_list, fe, links);
+ ctl_pool_acquire(pool);
+ control_softc->ctl_ports[port_num] = fe;
+
+ mtx_unlock(&control_softc->ctl_lock);
+
+ fe->ctl_pool_ref = pool;
+
+ return (retval);
+}
+
+int
+ctl_frontend_deregister(struct ctl_frontend *fe)
+{
+ struct ctl_io_pool *pool;
+ int port_num;
+ int retval;
+
+ retval = 0;
+
+ pool = (struct ctl_io_pool *)fe->ctl_pool_ref;
+
+ if (fe->targ_port == -1) {
+ retval = 1;
+ goto bailout;
+ }
+
+ mtx_lock(&control_softc->ctl_lock);
+
+ ctl_pool_invalidate(pool);
+ ctl_pool_release(pool);
+
+ STAILQ_REMOVE(&control_softc->fe_list, fe, ctl_frontend, links);
+ control_softc->num_frontends--;
+ port_num = (fe->targ_port < CTL_MAX_PORTS) ? fe->targ_port :
+ fe->targ_port - CTL_MAX_PORTS;
+ ctl_clear_mask(&control_softc->ctl_port_mask, port_num);
+ control_softc->ctl_ports[port_num] = NULL;
+ mtx_unlock(&control_softc->ctl_lock);
+bailout:
+ return (retval);
+}
+
+void
+ctl_frontend_set_wwns(struct ctl_frontend *fe, int wwnn_valid, uint64_t wwnn,
+ int wwpn_valid, uint64_t wwpn)
+{
+ if (wwnn_valid)
+ fe->wwnn = wwnn;
+
+ if (wwpn_valid)
+ fe->wwpn = wwpn;
+}
+
+void
+ctl_frontend_online(struct ctl_frontend *fe)
+{
+ fe->port_online(fe->onoff_arg);
+ /* XXX KDM need a lock here? */
+ fe->status |= CTL_PORT_STATUS_ONLINE;
+}
+
+void
+ctl_frontend_offline(struct ctl_frontend *fe)
+{
+ fe->port_offline(fe->onoff_arg);
+ /* XXX KDM need a lock here? */
+ fe->status &= ~CTL_PORT_STATUS_ONLINE;
+}
+
+/*
+ * vim: ts=8
+ */
diff --git a/sys/cam/ctl/ctl_frontend.h b/sys/cam/ctl/ctl_frontend.h
new file mode 100644
index 0000000..b286476
--- /dev/null
+++ b/sys/cam/ctl/ctl_frontend.h
@@ -0,0 +1,295 @@
+/*-
+ * Copyright (c) 2003 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend.h#2 $
+ * $FreeBSD$
+ */
+/*
+ * CAM Target Layer front end registration hooks
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#ifndef _CTL_FRONTEND_H_
+#define _CTL_FRONTEND_H_
+
+typedef enum {
+ CTL_PORT_STATUS_NONE = 0x00,
+ CTL_PORT_STATUS_ONLINE = 0x01,
+ CTL_PORT_STATUS_TARG_ONLINE = 0x02,
+ CTL_PORT_STATUS_LUN_ONLINE = 0x04
+} ctl_port_status;
+
+typedef void (*port_func_t)(void *onoff_arg);
+typedef int (*targ_func_t)(void *arg, struct ctl_id targ_id);
+typedef int (*lun_func_t)(void *arg, struct ctl_id targ_id, int lun_id);
+
+/*
+ * The ctl_frontend structure is the registration mechanism between a FETD
+ * (Front End Target Driver) and the CTL layer. Here is a description of
+ * the fields:
+ *
+ * port_type: This field tells CTL what kind of front end it is
+ * dealing with. This field serves two purposes.
+ * The first is to let CTL know whether the frontend
+ * in question is inside the main CTL module (i.e.
+ * the ioctl front end), and therefore its module
+ * reference count shouldn't be incremented. The
+ * CTL ioctl front end should continue to use the
+ * CTL_PORT_IOCTL argument as long as it is part of
+ * the main CTL module. The second is to let CTL
+ * know what kind of front end it is dealing with, so
+ * it can return the proper inquiry data for that
+ * particular port.
+ *
+ * num_requested_ctl_io: This is the number of ctl_io structures that the
+ * front end needs for its pool. This should
+ * generally be the maximum number of outstanding
+ * transactions that the FETD can handle. The CTL
+ * layer will add a few to this to account for
+ * ctl_io buffers queued for pending sense data.
+ * (Pending sense only gets queued if the FETD
+ * doesn't support autosense. e.g. non-packetized
+ * parallel SCSI doesn't support autosense.)
+ *
+ * port_name: A string describing the FETD. e.g. "LSI 1030T U320"
+ * or whatever you want to use to describe the driver.
+ *
+ *
+ * physical_port: This is the physical port number of this
+ * particular port within the driver/hardware. This
+ * number is hardware/driver specific.
+ * virtual_port: This is the virtual port number of this
+ * particular port. This is for things like NP-IV.
+ *
+ * port_online(): This function is called, with onoff_arg as its
+ * argument, by the CTL layer when it wants the FETD
+ * to start responding to selections on the specified
+ * target ID. (targ_target)
+ *
+ * port_offline(): This function is called, with onoff_arg as its
+ * argument, by the CTL layer when it wants the FETD
+ * to stop responding to selection on the specified
+ * target ID. (targ_target)
+ *
+ * onoff_arg: This is supplied as an argument to port_online()
+ * and port_offline(). This is specified by the
+ * FETD.
+ *
+ * targ_enable(): This function is called, with targ_lun_arg and a
+ * target ID as its arguments, by CTL when it wants
+ * the FETD to enable a particular target. targ_enable()
+ * will always be called for a particular target ID
+ * before any LUN is enabled for that target. If the
+ * FETD does not support enabling targets, but rather
+ * LUNs, it should ignore this call and return 0. If
+ * the FETD does support enabling targets, it should
+ * return 0 for success and non-zero if it cannot
+ * enable the given target.
+ *
+ * TODO: Add the ability to specify a WWID here.
+ *
+ * targ_disable(): This function is called, with targ_lun_arg and a
+ * target ID as its arguments, by CTL when it wants
+ * the FETD to disable a particular target.
+ * targ_disable() will always be called for a
+ * particular target ID after all LUNs are disabled
+ * on that particular target. If the FETD does not
+ * support enabling targets, it should ignore this
+ * call and return 0. If the FETD does support
+ * enabling targets, it should return 0 for success,
+ * and non-zero if it cannot disable the given target.
+ *
+ * lun_enable(): This function is called, with targ_lun_arg, a target
+ * ID and a LUN ID as its arguments, by CTL when it
+ * wants the FETD to enable a particular LUN. If the
+ * FETD doesn't really know about LUNs, it should
+ * just ignore this call and return 0. If the FETD
+ * cannot enable the requested LUN for some reason, the
+ * FETD should return non-zero status.
+ *
+ * lun_disable(): This function is called, with targ_lun_arg, a target
+ * ID and LUN ID as its arguments, by CTL when it
+ * wants the FETD to disable a particular LUN. If the
+ * FETD doesn't really know about LUNs, it should just
+ * ignore this call and return 0. If the FETD cannot
+ * disable the requested LUN for some reason, the
+ * FETD should return non-zero status.
+ *
+ * targ_lun_arg: This is supplied as an argument to the targ/lun
+ * enable/disable() functions. This is specified by
+ * the FETD.
+ *
+ * fe_datamove(): This function is called one or more times per I/O
+ * by the CTL layer to tell the FETD to initiate a
+ * DMA to or from the data buffer(s) specified by
+ * the passed-in ctl_io structure.
+ *
+ * fe_done(): This function is called by the CTL layer when a
+ * particular SCSI I/O or task management command has
+ * completed. For SCSI I/O requests (CTL_IO_SCSI),
+ * sense data is always supplied if the status is
+ * CTL_SCSI_ERROR and the SCSI status byte is
+ * SCSI_STATUS_CHECK_COND. If the FETD doesn't
+ * support autosense, the sense should be queued
+ * back to the CTL layer via ctl_queue_sense().
+ *
+ * fe_dump(): This function, if it exists, is called by CTL
+ * to request a dump of any debugging information or
+ * state to the console.
+ *
+ * max_targets: The maximum number of targets that we can create
+ * per-port.
+ *
+ * max_target_id: The highest target ID that we can use.
+ *
+ * targ_port: The CTL layer assigns a "port number" to every
+ * FETD. This port number should be passed back in
+ * in the header of every ctl_io that is queued to
+ * the CTL layer. This enables us to determine
+ * which bus the command came in on.
+ *
+ * ctl_pool_ref: Memory pool reference used by the FETD in calls to
+ * ctl_alloc_io().
+ *
+ * max_initiators: Maximum number of initiators that the FETD is
+ * allowed to have. Initiators should be numbered
+ * from 0 to max_initiators - 1. This value will
+ * typically be 16, and thus not a problem for
+ * parallel SCSI. This may present issues for Fibre
+ * Channel.
+ *
+ * wwnn World Wide Node Name to be used by the FETD.
+ * Note that this is set *after* registration. It
+ * will be set prior to the online function getting
+ * called.
+ *
+ * wwpn World Wide Port Name to be used by the FETD.
+ * Note that this is set *after* registration. It
+ * will be set prior to the online function getting
+ * called.
+ *
+ * status: Used by CTL to keep track of per-FETD state.
+ *
+ * links: Linked list pointers, used by CTL. The FETD
+ * shouldn't touch this field.
+ */
+struct ctl_frontend {
+ ctl_port_type port_type; /* passed to CTL */
+ int num_requested_ctl_io; /* passed to CTL */
+ char *port_name; /* passed to CTL */
+ int physical_port; /* passed to CTL */
+ int virtual_port; /* passed to CTL */
+ port_func_t port_online; /* passed to CTL */
+ port_func_t port_offline; /* passed to CTL */
+ void *onoff_arg; /* passed to CTL */
+ targ_func_t targ_enable; /* passed to CTL */
+ targ_func_t targ_disable; /* passed to CTL */
+ lun_func_t lun_enable; /* passed to CTL */
+ lun_func_t lun_disable; /* passed to CTL */
+ void *targ_lun_arg; /* passed to CTL */
+ void (*fe_datamove)(union ctl_io *io); /* passed to CTL */
+ void (*fe_done)(union ctl_io *io); /* passed to CTL */
+ void (*fe_dump)(void); /* passed to CTL */
+ int max_targets; /* passed to CTL */
+ int max_target_id; /* passed to CTL */
+ int32_t targ_port; /* passed back to FETD */
+ void *ctl_pool_ref; /* passed back to FETD */
+ uint32_t max_initiators; /* passed back to FETD */
+ uint64_t wwnn; /* set by CTL before online */
+ uint64_t wwpn; /* set by CTL before online */
+ ctl_port_status status; /* used by CTL */
+ STAILQ_ENTRY(ctl_frontend) links; /* used by CTL */
+};
+
+/*
+ * This may block until resources are allocated. Called at FETD module load
+ * time. Returns 0 for success, non-zero for failure.
+ */
+int ctl_frontend_register(struct ctl_frontend *fe, int master_SC);
+
+/*
+ * Called at FETD module unload time.
+ * Returns 0 for success, non-zero for failure.
+ */
+int ctl_frontend_deregister(struct ctl_frontend *fe);
+
+/*
+ * Called to set the WWNN and WWPN for a particular frontend.
+ */
+void ctl_frontend_set_wwns(struct ctl_frontend *fe, int wwnn_valid,
+ uint64_t wwnn, int wwpn_valid, uint64_t wwpn);
+
+/*
+ * Called to bring a particular frontend online.
+ */
+void ctl_frontend_online(struct ctl_frontend *fe);
+
+/*
+ * Called to take a particular frontend offline.
+ */
+void ctl_frontend_offline(struct ctl_frontend *fe);
+
+/*
+ * This routine queues I/O and task management requests from the FETD to the
+ * CTL layer. Returns immediately. Returns 0 for success, non-zero for
+ * failure.
+ */
+int ctl_queue(union ctl_io *io);
+
+/*
+ * This routine is used if the front end interface doesn't support
+ * autosense (e.g. non-packetized parallel SCSI). This will queue the
+ * scsiio structure back to a per-lun pending sense queue. This MUST be
+ * called BEFORE any request sense can get queued to the CTL layer -- I
+ * need it in the queue in order to service the request. The scsiio
+ * structure passed in here will be freed by the CTL layer when sense is
+ * retrieved by the initiator. Returns 0 for success, non-zero for failure.
+ */
+int ctl_queue_sense(union ctl_io *io);
+
+/*
+ * This routine adds an initiator to CTL's port database. The WWPN should
+ * be the FC WWPN, if available. The targ_port field should be the same as
+ * the targ_port passed back from CTL in the ctl_frontend structure above.
+ * The iid field should be the same as the iid passed in the nexus of each
+ * ctl_io from this initiator.
+ */
+int ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid);
+
+/*
+ * This routine will remove an initiator from CTL's port database. The
+ * targ_port field should be the same as the targ_port passed back in the
+ * ctl_frontend structure above. The iid field should be the same as the
+ * iid passed in the nexus of each ctl_io from this initiator.
+ */
+int
+ctl_remove_initiator(int32_t targ_port, uint32_t iid);
+
+#endif /* _CTL_FRONTEND_H_ */
diff --git a/sys/cam/ctl/ctl_frontend_cam_sim.c b/sys/cam/ctl/ctl_frontend_cam_sim.c
new file mode 100644
index 0000000..bfd1e60
--- /dev/null
+++ b/sys/cam/ctl/ctl_frontend_cam_sim.c
@@ -0,0 +1,866 @@
+/*-
+ * Copyright (c) 2009 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_cam_sim.c#4 $
+ */
+/*
+ * CTL frontend to CAM SIM interface. This allows access to CTL LUNs via
+ * the da(4) and pass(4) drivers from inside the system.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/queue.h>
+#include <sys/bus.h>
+#include <sys/sysctl.h>
+#include <machine/bus.h>
+#include <sys/sbuf.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/cam_xpt.h>
+#include <cam/cam_periph.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_frontend_internal.h>
+#include <cam/ctl/ctl_mem_pool.h>
+#include <cam/ctl/ctl_debug.h>
+
+#define io_ptr spriv_ptr1
+
+struct cfcs_io {
+ union ccb *ccb;
+};
+
+struct cfcs_softc {
+ struct ctl_frontend fe;
+ char port_name[32];
+ struct cam_sim *sim;
+ struct cam_devq *devq;
+ struct cam_path *path;
+ struct mtx lock;
+ char lock_desc[32];
+ uint64_t wwnn;
+ uint64_t wwpn;
+ uint32_t cur_tag_num;
+ int online;
+};
+
+/*
+ * We can't handle CCBs with these flags. For the most part, we just don't
+ * handle physical addresses yet. That would require mapping things in
+ * order to do the copy.
+ */
+#define CFCS_BAD_CCB_FLAGS (CAM_DATA_PHYS | CAM_SG_LIST_PHYS | \
+ CAM_MSG_BUF_PHYS | CAM_SNS_BUF_PHYS | CAM_CDB_PHYS | CAM_SENSE_PTR |\
+ CAM_SENSE_PHYS)
+
+int cfcs_init(void);
+void cfcs_shutdown(void);
+static void cfcs_poll(struct cam_sim *sim);
+static void cfcs_online(void *arg);
+static void cfcs_offline(void *arg);
+static int cfcs_targ_enable(void *arg, struct ctl_id targ_id);
+static int cfcs_targ_disable(void *arg, struct ctl_id targ_id);
+static int cfcs_lun_enable(void *arg, struct ctl_id target_id, int lun_id);
+static int cfcs_lun_disable(void *arg, struct ctl_id target_id, int lun_id);
+static void cfcs_datamove(union ctl_io *io);
+static void cfcs_done(union ctl_io *io);
+void cfcs_action(struct cam_sim *sim, union ccb *ccb);
+static void cfcs_async(void *callback_arg, uint32_t code,
+ struct cam_path *path, void *arg);
+
+struct cfcs_softc cfcs_softc;
+/*
+ * This is primarly intended to allow for error injection to test the CAM
+ * sense data and sense residual handling code. This sets the maximum
+ * amount of SCSI sense data that we will report to CAM.
+ */
+static int cfcs_max_sense = sizeof(struct scsi_sense_data);
+
+SYSINIT(cfcs_init, SI_SUB_CONFIGURE, SI_ORDER_FOURTH, cfcs_init, NULL);
+SYSCTL_NODE(_kern_cam, OID_AUTO, ctl2cam, CTLFLAG_RD, 0,
+ "CAM Target Layer SIM frontend");
+SYSCTL_INT(_kern_cam_ctl2cam, OID_AUTO, max_sense, CTLFLAG_RW,
+ &cfcs_max_sense, 0, "Maximum sense data size");
+
+
+int
+cfcs_init(void)
+{
+ struct cfcs_softc *softc;
+ struct ccb_setasync csa;
+ struct ctl_frontend *fe;
+#ifdef NEEDTOPORT
+ char wwnn[8];
+#endif
+ int retval;
+
+ softc = &cfcs_softc;
+ retval = 0;
+ bzero(softc, sizeof(*softc));
+ sprintf(softc->lock_desc, "ctl2cam");
+ mtx_init(&softc->lock, softc->lock_desc, NULL, MTX_DEF);
+ fe = &softc->fe;
+
+ fe->port_type = CTL_PORT_INTERNAL;
+ /* XXX KDM what should the real number be here? */
+ fe->num_requested_ctl_io = 4096;
+ snprintf(softc->port_name, sizeof(softc->port_name), "ctl2cam");
+ fe->port_name = softc->port_name;
+ fe->port_online = cfcs_online;
+ fe->port_offline = cfcs_offline;
+ fe->onoff_arg = softc;
+ fe->targ_enable = cfcs_targ_enable;
+ fe->targ_disable = cfcs_targ_disable;
+ fe->lun_enable = cfcs_lun_enable;
+ fe->lun_disable = cfcs_lun_disable;
+ fe->targ_lun_arg = softc;
+ fe->fe_datamove = cfcs_datamove;
+ fe->fe_done = cfcs_done;
+
+ /* XXX KDM what should we report here? */
+ /* XXX These should probably be fetched from CTL. */
+ fe->max_targets = 1;
+ fe->max_target_id = 15;
+
+ retval = ctl_frontend_register(fe, /*master_SC*/ 1);
+ if (retval != 0) {
+ printf("%s: ctl_frontend_register() failed with error %d!\n",
+ __func__, retval);
+ retval = 1;
+ goto bailout;
+ }
+
+ /*
+ * Get the WWNN out of the database, and create a WWPN as well.
+ */
+#ifdef NEEDTOPORT
+ ddb_GetWWNN((char *)wwnn);
+ softc->wwnn = be64dec(wwnn);
+ softc->wwpn = softc->wwnn + (softc->fe.targ_port & 0xff);
+#endif
+
+ /*
+ * If the CTL frontend didn't tell us what our WWNN/WWPN is, go
+ * ahead and set something random.
+ */
+ if (fe->wwnn == 0) {
+ uint64_t random_bits;
+
+ arc4rand(&random_bits, sizeof(random_bits), 0);
+ softc->wwnn = (random_bits & 0x0000000fffffff00ULL) |
+ /* Company ID */ 0x5000000000000000ULL |
+ /* NL-Port */ 0x0300;
+ softc->wwpn = softc->wwnn + fe->targ_port + 1;
+ fe->wwnn = softc->wwnn;
+ fe->wwpn = softc->wwpn;
+ } else {
+ softc->wwnn = fe->wwnn;
+ softc->wwpn = fe->wwpn;
+ }
+
+
+ softc->devq = cam_simq_alloc(fe->num_requested_ctl_io);
+ if (softc->devq == NULL) {
+ printf("%s: error allocating devq\n", __func__);
+ retval = ENOMEM;
+ goto bailout;
+ }
+
+ softc->sim = cam_sim_alloc(cfcs_action, cfcs_poll, softc->port_name,
+ softc, /*unit*/ 0, &softc->lock, 1,
+ fe->num_requested_ctl_io, softc->devq);
+ if (softc->sim == NULL) {
+ printf("%s: error allocating SIM\n", __func__);
+ retval = ENOMEM;
+ goto bailout;
+ }
+
+ mtx_lock(&softc->lock);
+ if (xpt_bus_register(softc->sim, NULL, 0) != CAM_SUCCESS) {
+ printf("%s: error registering SIM\n", __func__);
+ retval = ENOMEM;
+ goto bailout;
+ }
+
+ if (xpt_create_path(&softc->path, /*periph*/NULL,
+ cam_sim_path(softc->sim),
+ CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ printf("%s: error creating path\n", __func__);
+ xpt_bus_deregister(cam_sim_path(softc->sim));
+ retval = 1;
+ goto bailout;
+ }
+
+ mtx_unlock(&softc->lock);
+
+ xpt_setup_ccb(&csa.ccb_h, softc->path, /*priority*/ 5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_LOST_DEVICE;
+ csa.callback = cfcs_async;
+ csa.callback_arg = softc->sim;
+ xpt_action((union ccb *)&csa);
+
+ return (retval);
+
+bailout:
+ if (softc->sim)
+ cam_sim_free(softc->sim, /*free_devq*/ TRUE);
+ else if (softc->devq)
+ cam_simq_free(softc->devq);
+
+ mtx_unlock(&softc->lock);
+
+ return (retval);
+}
+
+static void
+cfcs_poll(struct cam_sim *sim)
+{
+
+}
+
+void
+cfcs_shutdown(void)
+{
+
+}
+
+static void
+cfcs_online(void *arg)
+{
+ struct cfcs_softc *softc;
+ union ccb *ccb;
+
+ softc = (struct cfcs_softc *)arg;
+
+ mtx_lock(&softc->lock);
+ softc->online = 1;
+ mtx_unlock(&softc->lock);
+
+ ccb = xpt_alloc_ccb_nowait();
+ if (ccb == NULL) {
+ printf("%s: unable to allocate CCB for rescan\n", __func__);
+ return;
+ }
+
+ if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
+ cam_sim_path(softc->sim), CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ printf("%s: can't allocate path for rescan\n", __func__);
+ xpt_free_ccb(ccb);
+ return;
+ }
+ xpt_rescan(ccb);
+}
+
+static void
+cfcs_offline(void *arg)
+{
+ struct cfcs_softc *softc;
+ union ccb *ccb;
+
+ softc = (struct cfcs_softc *)arg;
+
+ mtx_lock(&softc->lock);
+ softc->online = 0;
+ mtx_unlock(&softc->lock);
+
+ ccb = xpt_alloc_ccb_nowait();
+ if (ccb == NULL) {
+ printf("%s: unable to allocate CCB for rescan\n", __func__);
+ return;
+ }
+
+ if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
+ cam_sim_path(softc->sim), CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ printf("%s: can't allocate path for rescan\n", __func__);
+ xpt_free_ccb(ccb);
+ return;
+ }
+ xpt_rescan(ccb);
+}
+
+static int
+cfcs_targ_enable(void *arg, struct ctl_id targ_id)
+{
+ return (0);
+}
+
+static int
+cfcs_targ_disable(void *arg, struct ctl_id targ_id)
+{
+ return (0);
+}
+
+static int
+cfcs_lun_enable(void *arg, struct ctl_id target_id, int lun_id)
+{
+ return (0);
+}
+static int
+cfcs_lun_disable(void *arg, struct ctl_id target_id, int lun_id)
+{
+ return (0);
+}
+
+/*
+ * This function is very similar to ctl_ioctl_do_datamove(). Is there a
+ * way to combine the functionality?
+ *
+ * XXX KDM may need to move this into a thread. We're doing a bcopy in the
+ * caller's context, which will usually be the backend. That may not be a
+ * good thing.
+ */
+static void
+cfcs_datamove(union ctl_io *io)
+{
+ union ccb *ccb;
+ bus_dma_segment_t cam_sg_entry, *cam_sglist;
+ struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
+ int cam_sg_count, ctl_sg_count, cam_sg_start;
+ int cam_sg_offset;
+ int len_to_copy, len_copied;
+ int ctl_watermark, cam_watermark;
+ int i, j;
+
+
+ cam_sg_offset = 0;
+ cam_sg_start = 0;
+
+ ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+
+ /*
+ * Note that we have a check in cfcs_action() to make sure that any
+ * CCBs with "bad" flags are returned with CAM_REQ_INVALID. This
+ * is just to make sure no one removes that check without updating
+ * this code to provide the additional functionality necessary to
+ * support those modes of operation.
+ */
+ KASSERT(((ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS) == 0), ("invalid "
+ "CAM flags %#x", (ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS)));
+
+ /*
+ * Simplify things on both sides by putting single buffers into a
+ * single entry S/G list.
+ */
+ if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
+ if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) {
+ /* We should filter this out on entry */
+ panic("%s: physical S/G list, should not get here",
+ __func__);
+ } else {
+ int len_seen;
+
+ cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
+ cam_sg_count = ccb->csio.sglist_cnt;
+
+ for (i = 0, len_seen = 0; i < cam_sg_count; i++) {
+ if ((len_seen + cam_sglist[i].ds_len) >=
+ io->scsiio.kern_rel_offset) {
+ cam_sg_start = i;
+ cam_sg_offset =
+ io->scsiio.kern_rel_offset -
+ len_seen;
+ break;
+ }
+ len_seen += cam_sglist[i].ds_len;
+ }
+ }
+ } else {
+ cam_sglist = &cam_sg_entry;
+ cam_sglist[0].ds_len = ccb->csio.dxfer_len;
+ cam_sglist[0].ds_addr = (bus_addr_t)ccb->csio.data_ptr;
+ cam_sg_count = 1;
+ cam_sg_start = 0;
+ cam_sg_offset = io->scsiio.kern_rel_offset;
+ }
+
+ if (io->scsiio.kern_sg_entries > 0) {
+ ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
+ ctl_sg_count = io->scsiio.kern_sg_entries;
+ } else {
+ ctl_sglist = &ctl_sg_entry;
+ ctl_sglist->addr = io->scsiio.kern_data_ptr;
+ ctl_sglist->len = io->scsiio.kern_data_len;
+ ctl_sg_count = 1;
+ }
+
+ ctl_watermark = 0;
+ cam_watermark = cam_sg_offset;
+ len_copied = 0;
+ for (i = cam_sg_start, j = 0;
+ i < cam_sg_count && j < ctl_sg_count;) {
+ uint8_t *cam_ptr, *ctl_ptr;
+
+ len_to_copy = ctl_min(cam_sglist[i].ds_len - cam_watermark,
+ ctl_sglist[j].len - ctl_watermark);
+
+ cam_ptr = (uint8_t *)cam_sglist[i].ds_addr;
+ cam_ptr = cam_ptr + cam_watermark;
+ if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
+ /*
+ * XXX KDM fix this!
+ */
+ panic("need to implement bus address support");
+#if 0
+ kern_ptr = bus_to_virt(kern_sglist[j].addr);
+#endif
+ } else
+ ctl_ptr = (uint8_t *)ctl_sglist[j].addr;
+ ctl_ptr = ctl_ptr + ctl_watermark;
+
+ ctl_watermark += len_to_copy;
+ cam_watermark += len_to_copy;
+
+ if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+ CTL_FLAG_DATA_IN) {
+ CTL_DEBUG_PRINT(("%s: copying %d bytes to CAM\n",
+ __func__, len_to_copy));
+ CTL_DEBUG_PRINT(("%s: from %p to %p\n", ctl_ptr,
+ __func__, cam_ptr));
+ bcopy(ctl_ptr, cam_ptr, len_to_copy);
+ } else {
+ CTL_DEBUG_PRINT(("%s: copying %d bytes from CAM\n",
+ __func__, len_to_copy));
+ CTL_DEBUG_PRINT(("%s: from %p to %p\n", cam_ptr,
+ __func__, ctl_ptr));
+ bcopy(cam_ptr, ctl_ptr, len_to_copy);
+ }
+
+ len_copied += len_to_copy;
+
+ if (cam_sglist[i].ds_len == cam_watermark) {
+ i++;
+ cam_watermark = 0;
+ }
+
+ if (ctl_sglist[j].len == ctl_watermark) {
+ j++;
+ ctl_watermark = 0;
+ }
+ }
+
+ io->scsiio.ext_data_filled += len_copied;
+
+ io->scsiio.be_move_done(io);
+}
+
+static void
+cfcs_done(union ctl_io *io)
+{
+ union ccb *ccb;
+ struct cfcs_softc *softc;
+ struct cam_sim *sim;
+
+ ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+
+ sim = xpt_path_sim(ccb->ccb_h.path);
+ softc = (struct cfcs_softc *)cam_sim_softc(sim);
+
+ /*
+ * At this point we should have status. If we don't, that's a bug.
+ */
+ KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE),
+ ("invalid CTL status %#x", io->io_hdr.status));
+
+ /*
+ * Translate CTL status to CAM status.
+ */
+ switch (io->io_hdr.status & CTL_STATUS_MASK) {
+ case CTL_SUCCESS:
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case CTL_SCSI_ERROR:
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
+ ccb->csio.scsi_status = io->scsiio.scsi_status;
+ bcopy(&io->scsiio.sense_data, &ccb->csio.sense_data,
+ min(io->scsiio.sense_len, ccb->csio.sense_len));
+ if (ccb->csio.sense_len > io->scsiio.sense_len)
+ ccb->csio.sense_resid = ccb->csio.sense_len -
+ io->scsiio.sense_len;
+ else
+ ccb->csio.sense_resid = 0;
+ if ((ccb->csio.sense_len - ccb->csio.sense_resid) >
+ cfcs_max_sense) {
+ ccb->csio.sense_resid = ccb->csio.sense_len -
+ cfcs_max_sense;
+ }
+ break;
+ case CTL_CMD_ABORTED:
+ ccb->ccb_h.status = CAM_REQ_ABORTED;
+ break;
+ case CTL_ERROR:
+ default:
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ break;
+ }
+
+ mtx_lock(sim->mtx);
+ xpt_done(ccb);
+ mtx_unlock(sim->mtx);
+
+ ctl_free_io(io);
+}
+
+void
+cfcs_action(struct cam_sim *sim, union ccb *ccb)
+{
+ struct cfcs_softc *softc;
+ int err;
+
+ softc = (struct cfcs_softc *)cam_sim_softc(sim);
+ mtx_assert(&softc->lock, MA_OWNED);
+
+ switch (ccb->ccb_h.func_code) {
+ case XPT_SCSI_IO: {
+ union ctl_io *io;
+ struct ccb_scsiio *csio;
+
+ csio = &ccb->csio;
+
+ /*
+ * Catch CCB flags, like physical address flags, that
+ * indicate situations we currently can't handle.
+ */
+ if (ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS) {
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ printf("%s: bad CCB flags %#x (all flags %#x)\n",
+ __func__, ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS,
+ ccb->ccb_h.flags);
+ xpt_done(ccb);
+ return;
+ }
+
+ /*
+ * If we aren't online, there are no devices to see.
+ */
+ if (softc->online == 0) {
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ xpt_done(ccb);
+ return;
+ }
+
+ io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+ if (io == NULL) {
+ printf("%s: can't allocate ctl_io\n", __func__);
+ ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
+ xpt_freeze_devq(ccb->ccb_h.path, 1);
+ xpt_done(ccb);
+ return;
+ }
+ ctl_zero_io(io);
+ /* Save pointers on both sides */
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb;
+ ccb->ccb_h.io_ptr = io;
+
+ /*
+ * Only SCSI I/O comes down this path, resets, etc. come
+ * down via the XPT_RESET_BUS/LUN CCBs below.
+ */
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ io->io_hdr.nexus.initid.id = 1;
+ io->io_hdr.nexus.targ_port = softc->fe.targ_port;
+ /*
+ * XXX KDM how do we handle target IDs?
+ */
+ io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id;
+ io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun;
+ /*
+ * This tag scheme isn't the best, since we could in theory
+ * have a very long-lived I/O and tag collision, especially
+ * in a high I/O environment. But it should work well
+ * enough for now. Since we're using unsigned ints,
+ * they'll just wrap around.
+ */
+ io->scsiio.tag_num = softc->cur_tag_num++;
+ csio->tag_id = io->scsiio.tag_num;
+ switch (csio->tag_action) {
+ case CAM_TAG_ACTION_NONE:
+ io->scsiio.tag_type = CTL_TAG_UNTAGGED;
+ break;
+ case MSG_SIMPLE_TASK:
+ io->scsiio.tag_type = CTL_TAG_SIMPLE;
+ break;
+ case MSG_HEAD_OF_QUEUE_TASK:
+ io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
+ break;
+ case MSG_ORDERED_TASK:
+ io->scsiio.tag_type = CTL_TAG_ORDERED;
+ break;
+ case MSG_ACA_TASK:
+ io->scsiio.tag_type = CTL_TAG_ACA;
+ break;
+ default:
+ io->scsiio.tag_type = CTL_TAG_UNTAGGED;
+ printf("%s: unhandled tag type %#x!!\n", __func__,
+ csio->tag_action);
+ break;
+ }
+ if (csio->cdb_len > sizeof(io->scsiio.cdb)) {
+ printf("%s: WARNING: CDB len %d > ctl_io space %zd\n",
+ __func__, csio->cdb_len, sizeof(io->scsiio.cdb));
+ }
+ io->scsiio.cdb_len = min(csio->cdb_len, sizeof(io->scsiio.cdb));
+ bcopy(csio->cdb_io.cdb_bytes, io->scsiio.cdb,
+ io->scsiio.cdb_len);
+
+ err = ctl_queue(io);
+ if (err != CTL_RETVAL_COMPLETE) {
+ printf("%s: func %d: error %d returned by "
+ "ctl_queue()!\n", __func__,
+ ccb->ccb_h.func_code, err);
+ ctl_free_io(io);
+ } else {
+ ccb->ccb_h.status |= CAM_SIM_QUEUED;
+ }
+ break;
+ }
+ case XPT_ABORT: {
+ union ctl_io *io;
+ union ccb *abort_ccb;
+
+ abort_ccb = ccb->cab.abort_ccb;
+
+ if (abort_ccb->ccb_h.func_code != XPT_SCSI_IO) {
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ }
+
+ /*
+ * If we aren't online, there are no devices to talk to.
+ */
+ if (softc->online == 0) {
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ xpt_done(ccb);
+ return;
+ }
+
+ io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+ if (io == NULL) {
+ ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
+ xpt_freeze_devq(ccb->ccb_h.path, 1);
+ xpt_done(ccb);
+ return;
+ }
+
+ ctl_zero_io(io);
+ /* Save pointers on both sides */
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb;
+ ccb->ccb_h.io_ptr = io;
+
+ io->io_hdr.io_type = CTL_IO_TASK;
+ io->io_hdr.nexus.initid.id = 1;
+ io->io_hdr.nexus.targ_port = softc->fe.targ_port;
+ io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id;
+ io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun;
+ io->taskio.task_action = CTL_TASK_ABORT_TASK;
+ io->taskio.tag_num = abort_ccb->csio.tag_id;
+ switch (abort_ccb->csio.tag_action) {
+ case CAM_TAG_ACTION_NONE:
+ io->taskio.tag_type = CTL_TAG_UNTAGGED;
+ break;
+ case MSG_SIMPLE_TASK:
+ io->taskio.tag_type = CTL_TAG_SIMPLE;
+ break;
+ case MSG_HEAD_OF_QUEUE_TASK:
+ io->taskio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
+ break;
+ case MSG_ORDERED_TASK:
+ io->taskio.tag_type = CTL_TAG_ORDERED;
+ break;
+ case MSG_ACA_TASK:
+ io->taskio.tag_type = CTL_TAG_ACA;
+ break;
+ default:
+ io->taskio.tag_type = CTL_TAG_UNTAGGED;
+ printf("%s: unhandled tag type %#x!!\n", __func__,
+ abort_ccb->csio.tag_action);
+ break;
+ }
+ err = ctl_queue(io);
+ if (err != CTL_RETVAL_COMPLETE) {
+ printf("%s func %d: error %d returned by "
+ "ctl_queue()!\n", __func__,
+ ccb->ccb_h.func_code, err);
+ ctl_free_io(io);
+ }
+ break;
+ }
+ case XPT_GET_TRAN_SETTINGS: {
+ struct ccb_trans_settings *cts;
+ struct ccb_trans_settings_scsi *scsi;
+ struct ccb_trans_settings_fc *fc;
+
+ cts = &ccb->cts;
+ scsi = &cts->proto_specific.scsi;
+ fc = &cts->xport_specific.fc;
+
+
+ cts->protocol = PROTO_SCSI;
+ cts->protocol_version = SCSI_REV_SPC2;
+ cts->transport = XPORT_FC;
+ cts->transport_version = 0;
+
+ scsi->valid = CTS_SCSI_VALID_TQ;
+ scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
+ fc->valid = CTS_FC_VALID_SPEED;
+ fc->bitrate = 800000;
+ fc->wwnn = softc->wwnn;
+ fc->wwpn = softc->wwpn;
+ fc->port = softc->fe.targ_port;
+ fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN |
+ CTS_FC_VALID_PORT;
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ }
+ case XPT_SET_TRAN_SETTINGS:
+ /* XXX KDM should we actually do something here? */
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case XPT_RESET_BUS:
+ case XPT_RESET_DEV: {
+ union ctl_io *io;
+
+ /*
+ * If we aren't online, there are no devices to talk to.
+ */
+ if (softc->online == 0) {
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ xpt_done(ccb);
+ return;
+ }
+
+ io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+ if (io == NULL) {
+ ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
+ xpt_freeze_devq(ccb->ccb_h.path, 1);
+ xpt_done(ccb);
+ return;
+ }
+
+ ctl_zero_io(io);
+ /* Save pointers on both sides */
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb;
+ ccb->ccb_h.io_ptr = io;
+
+ io->io_hdr.io_type = CTL_IO_TASK;
+ io->io_hdr.nexus.initid.id = 0;
+ io->io_hdr.nexus.targ_port = softc->fe.targ_port;
+ io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id;
+ io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun;
+ if (ccb->ccb_h.func_code == XPT_RESET_BUS)
+ io->taskio.task_action = CTL_TASK_BUS_RESET;
+ else
+ io->taskio.task_action = CTL_TASK_LUN_RESET;
+
+ err = ctl_queue(io);
+ if (err != CTL_RETVAL_COMPLETE) {
+ printf("%s func %d: error %d returned by "
+ "ctl_queue()!\n", __func__,
+ ccb->ccb_h.func_code, err);
+ ctl_free_io(io);
+ }
+ break;
+ }
+ case XPT_CALC_GEOMETRY:
+ cam_calc_geometry(&ccb->ccg, 1);
+ xpt_done(ccb);
+ break;
+ case XPT_PATH_INQ: {
+ struct ccb_pathinq *cpi;
+
+ cpi = &ccb->cpi;
+
+ cpi->version_num = 0;
+ cpi->hba_inquiry = PI_TAG_ABLE;
+ cpi->target_sprt = 0;
+ cpi->hba_misc = 0;
+ cpi->hba_eng_cnt = 0;
+ cpi->max_target = 1;
+ cpi->max_lun = 1024;
+ /* Do we really have a limit? */
+ cpi->maxio = 1024 * 1024;
+ cpi->async_flags = 0;
+ cpi->hpath_id = 0;
+ cpi->initiator_id = 0;
+
+ strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+ strncpy(cpi->hba_vid, "FreeBSD", HBA_IDLEN);
+ strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+ cpi->unit_number = 0;
+ cpi->bus_id = 0;
+ cpi->base_transfer_speed = 800000;
+ cpi->protocol = PROTO_SCSI;
+ cpi->protocol_version = SCSI_REV_SPC2;
+ /*
+ * Pretend to be Fibre Channel.
+ */
+ cpi->transport = XPORT_FC;
+ cpi->transport_version = 0;
+ cpi->xport_specific.fc.wwnn = softc->wwnn;
+ cpi->xport_specific.fc.wwpn = softc->wwpn;
+ cpi->xport_specific.fc.port = softc->fe.targ_port;
+ cpi->xport_specific.fc.bitrate = 8 * 1000 * 1000;
+ cpi->ccb_h.status = CAM_REQ_CMP;
+ break;
+ }
+ default:
+ ccb->ccb_h.status = CAM_PROVIDE_FAIL;
+ printf("%s: unsupported CCB type %#x\n", __func__,
+ ccb->ccb_h.func_code);
+ xpt_done(ccb);
+ break;
+ }
+}
+
+static void
+cfcs_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
+{
+
+}
diff --git a/sys/cam/ctl/ctl_frontend_internal.c b/sys/cam/ctl/ctl_frontend_internal.c
new file mode 100644
index 0000000..6653ed1
--- /dev/null
+++ b/sys/cam/ctl/ctl_frontend_internal.c
@@ -0,0 +1,1782 @@
+/*-
+ * Copyright (c) 2004, 2005 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_internal.c#5 $
+ */
+/*
+ * CTL kernel internal frontend target driver. This allows kernel-level
+ * clients to send commands into CTL.
+ *
+ * This has elements of a FETD (e.g. it has to set tag numbers, initiator,
+ * port, target, and LUN) and elements of an initiator (LUN discovery and
+ * probing, error recovery, command initiation). Even though this has some
+ * initiator type elements, this is not intended to be a full fledged
+ * initiator layer. It is only intended to send a limited number of
+ * commands to a well known target layer.
+ *
+ * To be able to fulfill the role of a full initiator layer, it would need
+ * a whole lot more functionality.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/queue.h>
+#include <sys/sbuf.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_frontend_internal.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
+#include <cam/ctl/ctl_mem_pool.h>
+#include <cam/ctl/ctl_debug.h>
+#include <cam/ctl/ctl_scsi_all.h>
+#include <cam/ctl/ctl_error.h>
+
+/*
+ * Task structure:
+ * - overall metatask, different potential metatask types (e.g. forced
+ * shutdown, gentle shutdown)
+ * - forced shutdown metatask:
+ * - states: report luns, pending, done?
+ * - list of luns pending, with the relevant I/O for that lun attached.
+ * This would allow moving ahead on LUNs with no errors, and going
+ * into error recovery on LUNs with problems. Per-LUN states might
+ * include inquiry, stop/offline, done.
+ *
+ * Use LUN enable for LUN list instead of getting it manually? We'd still
+ * need inquiry data for each LUN.
+ *
+ * How to handle processor LUN w.r.t. found/stopped counts?
+ */
+#ifdef oldapi
+typedef enum {
+ CFI_TASK_NONE,
+ CFI_TASK_SHUTDOWN,
+ CFI_TASK_STARTUP
+} cfi_tasktype;
+
+struct cfi_task_startstop {
+ int total_luns;
+ int luns_complete;
+ int luns_failed;
+ cfi_cb_t callback;
+ void *callback_arg;
+ /* XXX KDM add more fields here */
+};
+
+union cfi_taskinfo {
+ struct cfi_task_startstop startstop;
+};
+
+struct cfi_metatask {
+ cfi_tasktype tasktype;
+ cfi_mt_status status;
+ union cfi_taskinfo taskinfo;
+ struct ctl_mem_element *element;
+ void *cfi_context;
+ STAILQ_ENTRY(cfi_metatask) links;
+};
+#endif
+
+typedef enum {
+ CFI_ERR_RETRY = 0x000,
+ CFI_ERR_FAIL = 0x001,
+ CFI_ERR_LUN_RESET = 0x002,
+ CFI_ERR_MASK = 0x0ff,
+ CFI_ERR_NO_DECREMENT = 0x100
+} cfi_error_action;
+
+typedef enum {
+ CFI_ERR_SOFT,
+ CFI_ERR_HARD
+} cfi_error_policy;
+
+typedef enum {
+ CFI_LUN_INQUIRY,
+ CFI_LUN_READCAPACITY,
+ CFI_LUN_READCAPACITY_16,
+ CFI_LUN_READY
+} cfi_lun_state;
+
+struct cfi_lun {
+ struct ctl_id target_id;
+ int lun_id;
+ struct scsi_inquiry_data inq_data;
+ uint64_t num_blocks;
+ uint32_t blocksize;
+ int blocksize_powerof2;
+ uint32_t cur_tag_num;
+ cfi_lun_state state;
+ struct ctl_mem_element *element;
+ struct cfi_softc *softc;
+ STAILQ_HEAD(, cfi_lun_io) io_list;
+ STAILQ_ENTRY(cfi_lun) links;
+};
+
+struct cfi_lun_io {
+ struct cfi_lun *lun;
+ struct cfi_metatask *metatask;
+ cfi_error_policy policy;
+ void (*done_function)(union ctl_io *io);
+ union ctl_io *ctl_io;
+ struct cfi_lun_io *orig_lun_io;
+ STAILQ_ENTRY(cfi_lun_io) links;
+};
+
+typedef enum {
+ CFI_NONE = 0x00,
+ CFI_ONLINE = 0x01,
+} cfi_flags;
+
+struct cfi_softc {
+ struct ctl_frontend fe;
+ char fe_name[40];
+ struct mtx lock;
+ cfi_flags flags;
+ STAILQ_HEAD(, cfi_lun) lun_list;
+ STAILQ_HEAD(, cfi_metatask) metatask_list;
+ struct ctl_mem_pool lun_pool;
+ struct ctl_mem_pool metatask_pool;
+};
+
+MALLOC_DEFINE(M_CTL_CFI, "ctlcfi", "CTL CFI");
+
+static struct cfi_softc fetd_internal_softc;
+
+void cfi_init(void);
+void cfi_shutdown(void) __unused;
+static void cfi_online(void *arg);
+static void cfi_offline(void *arg);
+static int cfi_targ_enable(void *arg, struct ctl_id targ_id);
+static int cfi_targ_disable(void *arg, struct ctl_id targ_id);
+static int cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id);
+static int cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id);
+static void cfi_datamove(union ctl_io *io);
+static cfi_error_action cfi_checkcond_parse(union ctl_io *io,
+ struct cfi_lun_io *lun_io);
+static cfi_error_action cfi_error_parse(union ctl_io *io,
+ struct cfi_lun_io *lun_io);
+static void cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
+ struct cfi_metatask *metatask, cfi_error_policy policy,
+ int retries, struct cfi_lun_io *orig_lun_io,
+ void (*done_function)(union ctl_io *io));
+static void cfi_done(union ctl_io *io);
+static void cfi_lun_probe_done(union ctl_io *io);
+static void cfi_lun_probe(struct cfi_lun *lun, int have_lock);
+static void cfi_metatask_done(struct cfi_softc *softc,
+ struct cfi_metatask *metatask);
+static void cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask,
+ union ctl_io *io);
+static void cfi_metatask_io_done(union ctl_io *io);
+static void cfi_err_recovery_done(union ctl_io *io);
+static void cfi_lun_io_done(union ctl_io *io);
+
+SYSINIT(cfi_init, SI_SUB_CONFIGURE, SI_ORDER_FOURTH, cfi_init, NULL);
+
+void
+cfi_init(void)
+{
+ struct cfi_softc *softc;
+ struct ctl_frontend *fe;
+ int retval;
+
+ softc = &fetd_internal_softc;
+
+ fe = &softc->fe;
+
+ retval = 0;
+
+ if (sizeof(struct cfi_lun_io) > CTL_PORT_PRIV_SIZE) {
+ printf("%s: size of struct cfi_lun_io %zd > "
+ "CTL_PORT_PRIV_SIZE %d\n", __func__,
+ sizeof(struct cfi_lun_io),
+ CTL_PORT_PRIV_SIZE);
+ }
+ memset(softc, 0, sizeof(softc));
+
+ mtx_init(&softc->lock, "CTL frontend mutex", NULL, MTX_DEF);
+ softc->flags |= CTL_FLAG_MASTER_SHELF;
+
+ STAILQ_INIT(&softc->lun_list);
+ STAILQ_INIT(&softc->metatask_list);
+ sprintf(softc->fe_name, "CTL internal");
+ fe->port_type = CTL_PORT_INTERNAL;
+ fe->num_requested_ctl_io = 100;
+ fe->port_name = softc->fe_name;
+ fe->port_online = cfi_online;
+ fe->port_offline = cfi_offline;
+ fe->onoff_arg = softc;
+ fe->targ_enable = cfi_targ_enable;
+ fe->targ_disable = cfi_targ_disable;
+ fe->lun_enable = cfi_lun_enable;
+ fe->lun_disable = cfi_lun_disable;
+ fe->targ_lun_arg = softc;
+ fe->fe_datamove = cfi_datamove;
+ fe->fe_done = cfi_done;
+ fe->max_targets = 15;
+ fe->max_target_id = 15;
+
+ if (ctl_frontend_register(fe, (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0)
+ {
+ printf("%s: internal frontend registration failed\n", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (ctl_init_mem_pool(&softc->lun_pool,
+ sizeof(struct cfi_lun),
+ CTL_MEM_POOL_PERM_GROW, /*grow_inc*/ 3,
+ /* initial_pool_size */ CTL_MAX_LUNS) != 0) {
+ printf("%s: can't initialize LUN memory pool\n", __func__);
+ retval = 1;
+ goto bailout_error;
+ }
+
+ if (ctl_init_mem_pool(&softc->metatask_pool,
+ sizeof(struct cfi_metatask),
+ CTL_MEM_POOL_PERM_GROW, /*grow_inc*/ 3,
+ /*initial_pool_size*/ 10) != 0) {
+ printf("%s: can't initialize metatask memory pool\n", __func__);
+ retval = 2;
+ goto bailout_error;
+ }
+bailout:
+
+ return;
+
+bailout_error:
+
+ switch (retval) {
+ case 3:
+ ctl_shrink_mem_pool(&softc->metatask_pool);
+ /* FALLTHROUGH */
+ case 2:
+ ctl_shrink_mem_pool(&softc->lun_pool);
+ /* FALLTHROUGH */
+ case 1:
+ ctl_frontend_deregister(fe);
+ break;
+ default:
+ break;
+ }
+}
+
+void
+cfi_shutdown(void)
+{
+ struct cfi_softc *softc;
+
+ softc = &fetd_internal_softc;
+
+ /*
+ * XXX KDM need to clear out any I/O pending on each LUN.
+ */
+ if (ctl_frontend_deregister(&softc->fe) != 0)
+ printf("%s: ctl_frontend_deregister() failed\n", __func__);
+
+ if (ctl_shrink_mem_pool(&softc->lun_pool) != 0)
+ printf("%s: error shrinking LUN pool\n", __func__);
+
+ if (ctl_shrink_mem_pool(&softc->metatask_pool) != 0)
+ printf("%s: error shrinking LUN pool\n", __func__);
+}
+
+static void
+cfi_online(void *arg)
+{
+ struct cfi_softc *softc;
+ struct cfi_lun *lun;
+
+ softc = (struct cfi_softc *)arg;
+
+ softc->flags |= CFI_ONLINE;
+
+ /*
+ * Go through and kick off the probe for each lun. Should we check
+ * the LUN flags here to determine whether or not to probe it?
+ */
+ mtx_lock(&softc->lock);
+ STAILQ_FOREACH(lun, &softc->lun_list, links)
+ cfi_lun_probe(lun, /*have_lock*/ 1);
+ mtx_unlock(&softc->lock);
+}
+
+static void
+cfi_offline(void *arg)
+{
+ struct cfi_softc *softc;
+
+ softc = (struct cfi_softc *)arg;
+
+ softc->flags &= ~CFI_ONLINE;
+}
+
+static int
+cfi_targ_enable(void *arg, struct ctl_id targ_id)
+{
+ return (0);
+}
+
+static int
+cfi_targ_disable(void *arg, struct ctl_id targ_id)
+{
+ return (0);
+}
+
+static int
+cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id)
+{
+ struct ctl_mem_element *element;
+ struct cfi_softc *softc;
+ struct cfi_lun *lun;
+ int found;
+
+ softc = (struct cfi_softc *)arg;
+
+ found = 0;
+ mtx_lock(&softc->lock);
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ if ((lun->target_id.id == target_id.id)
+ && (lun->lun_id == lun_id)) {
+ found = 1;
+ break;
+ }
+ }
+ mtx_unlock(&softc->lock);
+
+ /*
+ * If we already have this target/LUN, there is no reason to add
+ * it to our lists again.
+ */
+ if (found != 0)
+ return (0);
+
+ element = ctl_alloc_mem_element(&softc->lun_pool, /*can_wait*/ 0);
+
+ if (element == NULL) {
+ printf("%s: unable to allocate LUN structure\n", __func__);
+ return (1);
+ }
+
+ lun = (struct cfi_lun *)element->bytes;
+
+ lun->element = element;
+ lun->target_id = target_id;
+ lun->lun_id = lun_id;
+ lun->cur_tag_num = 0;
+ lun->state = CFI_LUN_INQUIRY;
+ lun->softc = softc;
+ STAILQ_INIT(&lun->io_list);
+
+ mtx_lock(&softc->lock);
+ STAILQ_INSERT_TAIL(&softc->lun_list, lun, links);
+ mtx_unlock(&softc->lock);
+
+ cfi_lun_probe(lun, /*have_lock*/ 0);
+
+ return (0);
+}
+
+static int
+cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id)
+{
+ struct cfi_softc *softc;
+ struct cfi_lun *lun;
+ int found;
+
+ softc = (struct cfi_softc *)arg;
+
+ found = 0;
+
+ /*
+ * XXX KDM need to do an invalidate and then a free when any
+ * pending I/O has completed. Or do we? CTL won't free a LUN
+ * while any I/O is pending. So we won't get this notification
+ * unless any I/O we have pending on a LUN has completed.
+ */
+ mtx_lock(&softc->lock);
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ if ((lun->target_id.id == target_id.id)
+ && (lun->lun_id == lun_id)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found != 0)
+ STAILQ_REMOVE(&softc->lun_list, lun, cfi_lun, links);
+
+ mtx_unlock(&softc->lock);
+
+ if (found == 0) {
+ printf("%s: can't find target %ju lun %d\n", __func__,
+ (uintmax_t)target_id.id, lun_id);
+ return (1);
+ }
+
+ ctl_free_mem_element(lun->element);
+
+ return (0);
+}
+
+/*
+ * XXX KDM run this inside a thread, or inside the caller's context?
+ */
+static void
+cfi_datamove(union ctl_io *io)
+{
+ struct ctl_sg_entry *ext_sglist, *kern_sglist;
+ struct ctl_sg_entry ext_entry, kern_entry;
+ int ext_sglen, ext_sg_entries, kern_sg_entries;
+ int ext_sg_start, ext_offset;
+ int len_to_copy, len_copied;
+ int kern_watermark, ext_watermark;
+ int ext_sglist_malloced;
+ struct ctl_scsiio *ctsio;
+ int i, j;
+
+ ext_sglist_malloced = 0;
+ ext_sg_start = 0;
+ ext_offset = 0;
+ ext_sglist = NULL;
+
+ CTL_DEBUG_PRINT(("%s\n", __func__));
+
+ ctsio = &io->scsiio;
+
+ /*
+ * If this is the case, we're probably doing a BBR read and don't
+ * actually need to transfer the data. This will effectively
+ * bit-bucket the data.
+ */
+ if (ctsio->ext_data_ptr == NULL)
+ goto bailout;
+
+ /*
+ * To simplify things here, if we have a single buffer, stick it in
+ * a S/G entry and just make it a single entry S/G list.
+ */
+ if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
+ int len_seen;
+
+ ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
+
+ /*
+ * XXX KDM GFP_KERNEL, don't know what the caller's context
+ * is. Need to figure that out.
+ */
+ ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL_CFI,
+ M_WAITOK);
+ if (ext_sglist == NULL) {
+ ctl_set_internal_failure(ctsio,
+ /*sks_valid*/ 0,
+ /*retry_count*/ 0);
+ return;
+ }
+ ext_sglist_malloced = 1;
+ if (memcpy(ext_sglist, ctsio->ext_data_ptr, ext_sglen) != 0) {
+ ctl_set_internal_failure(ctsio,
+ /*sks_valid*/ 0,
+ /*retry_count*/ 0);
+ goto bailout;
+ }
+ ext_sg_entries = ctsio->ext_sg_entries;
+ len_seen = 0;
+ for (i = 0; i < ext_sg_entries; i++) {
+ if ((len_seen + ext_sglist[i].len) >=
+ ctsio->ext_data_filled) {
+ ext_sg_start = i;
+ ext_offset = ctsio->ext_data_filled - len_seen;
+ break;
+ }
+ len_seen += ext_sglist[i].len;
+ }
+ } else {
+ ext_sglist = &ext_entry;
+ ext_sglist->addr = ctsio->ext_data_ptr;
+ ext_sglist->len = ctsio->ext_data_len;
+ ext_sg_entries = 1;
+ ext_sg_start = 0;
+ ext_offset = ctsio->ext_data_filled;
+ }
+
+ if (ctsio->kern_sg_entries > 0) {
+ kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
+ kern_sg_entries = ctsio->kern_sg_entries;
+ } else {
+ kern_sglist = &kern_entry;
+ kern_sglist->addr = ctsio->kern_data_ptr;
+ kern_sglist->len = ctsio->kern_data_len;
+ kern_sg_entries = 1;
+ }
+
+
+ kern_watermark = 0;
+ ext_watermark = ext_offset;
+ len_copied = 0;
+ for (i = ext_sg_start, j = 0;
+ i < ext_sg_entries && j < kern_sg_entries;) {
+ uint8_t *ext_ptr, *kern_ptr;
+
+ len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark,
+ kern_sglist[j].len - kern_watermark);
+
+ ext_ptr = (uint8_t *)ext_sglist[i].addr;
+ ext_ptr = ext_ptr + ext_watermark;
+ if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
+ /*
+ * XXX KDM fix this!
+ */
+ panic("need to implement bus address support");
+#if 0
+ kern_ptr = bus_to_virt(kern_sglist[j].addr);
+#endif
+ } else
+ kern_ptr = (uint8_t *)kern_sglist[j].addr;
+ kern_ptr = kern_ptr + kern_watermark;
+
+ kern_watermark += len_to_copy;
+ ext_watermark += len_to_copy;
+
+ if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+ CTL_FLAG_DATA_IN) {
+ CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n",
+ __func__, len_to_copy));
+ CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
+ kern_ptr, ext_ptr));
+ memcpy(ext_ptr, kern_ptr, len_to_copy);
+ } else {
+ CTL_DEBUG_PRINT(("%s: copying %d bytes from user\n",
+ __func__, len_to_copy));
+ CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
+ ext_ptr, kern_ptr));
+ memcpy(kern_ptr, ext_ptr, len_to_copy);
+ }
+
+ len_copied += len_to_copy;
+
+ if (ext_sglist[i].len == ext_watermark) {
+ i++;
+ ext_watermark = 0;
+ }
+
+ if (kern_sglist[j].len == kern_watermark) {
+ j++;
+ kern_watermark = 0;
+ }
+ }
+
+ ctsio->ext_data_filled += len_copied;
+
+ CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n",
+ __func__, ext_sg_entries, kern_sg_entries));
+ CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n",
+ __func__, ctsio->ext_data_len, ctsio->kern_data_len));
+
+
+ /* XXX KDM set residual?? */
+bailout:
+
+ if (ext_sglist_malloced != 0)
+ free(ext_sglist, M_CTL_CFI);
+
+ io->scsiio.be_move_done(io);
+
+ return;
+}
+
+/*
+ * For any sort of check condition, busy, etc., we just retry. We do not
+ * decrement the retry count for unit attention type errors. These are
+ * normal, and we want to save the retry count for "real" errors. Otherwise,
+ * we could end up with situations where a command will succeed in some
+ * situations and fail in others, depending on whether a unit attention is
+ * pending. Also, some of our error recovery actions, most notably the
+ * LUN reset action, will cause a unit attention.
+ *
+ * We can add more detail here later if necessary.
+ */
+static cfi_error_action
+cfi_checkcond_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
+{
+ cfi_error_action error_action;
+ int error_code, sense_key, asc, ascq;
+
+ /*
+ * Default to retrying the command.
+ */
+ error_action = CFI_ERR_RETRY;
+
+ scsi_extract_sense_len(&io->scsiio.sense_data,
+ io->scsiio.sense_len,
+ &error_code,
+ &sense_key,
+ &asc,
+ &ascq,
+ /*show_errors*/ 1);
+
+ switch (error_code) {
+ case SSD_DEFERRED_ERROR:
+ case SSD_DESC_DEFERRED_ERROR:
+ error_action |= CFI_ERR_NO_DECREMENT;
+ break;
+ case SSD_CURRENT_ERROR:
+ case SSD_DESC_CURRENT_ERROR:
+ default: {
+ switch (sense_key) {
+ case SSD_KEY_UNIT_ATTENTION:
+ error_action |= CFI_ERR_NO_DECREMENT;
+ break;
+ case SSD_KEY_HARDWARE_ERROR:
+ /*
+ * This is our generic "something bad happened"
+ * error code. It often isn't recoverable.
+ */
+ if ((asc == 0x44) && (ascq == 0x00))
+ error_action = CFI_ERR_FAIL;
+ break;
+ case SSD_KEY_NOT_READY:
+ /*
+ * If the LUN is powered down, there likely isn't
+ * much point in retrying right now.
+ */
+ if ((asc == 0x04) && (ascq == 0x02))
+ error_action = CFI_ERR_FAIL;
+ /*
+ * If the LUN is offline, there probably isn't much
+ * point in retrying, either.
+ */
+ if ((asc == 0x04) && (ascq == 0x03))
+ error_action = CFI_ERR_FAIL;
+ break;
+ }
+ }
+ }
+
+ return (error_action);
+}
+
+static cfi_error_action
+cfi_error_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
+{
+ cfi_error_action error_action;
+
+ error_action = CFI_ERR_RETRY;
+
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI:
+ switch (io->io_hdr.status & CTL_STATUS_MASK) {
+ case CTL_SCSI_ERROR:
+ switch (io->scsiio.scsi_status) {
+ case SCSI_STATUS_RESERV_CONFLICT:
+ /*
+ * For a reservation conflict, we'll usually
+ * want the hard error recovery policy, so
+ * we'll reset the LUN.
+ */
+ if (lun_io->policy == CFI_ERR_HARD)
+ error_action =
+ CFI_ERR_LUN_RESET;
+ else
+ error_action =
+ CFI_ERR_RETRY;
+ break;
+ case SCSI_STATUS_CHECK_COND:
+ default:
+ error_action = cfi_checkcond_parse(io, lun_io);
+ break;
+ }
+ break;
+ default:
+ error_action = CFI_ERR_RETRY;
+ break;
+ }
+ break;
+ case CTL_IO_TASK:
+ /*
+ * In theory task management commands shouldn't fail...
+ */
+ error_action = CFI_ERR_RETRY;
+ break;
+ default:
+ printf("%s: invalid ctl_io type %d\n", __func__,
+ io->io_hdr.io_type);
+ panic("%s: invalid ctl_io type %d\n", __func__,
+ io->io_hdr.io_type);
+ break;
+ }
+
+ return (error_action);
+}
+
+static void
+cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
+ struct cfi_metatask *metatask, cfi_error_policy policy, int retries,
+ struct cfi_lun_io *orig_lun_io,
+ void (*done_function)(union ctl_io *io))
+{
+ struct cfi_lun_io *lun_io;
+
+ io->io_hdr.nexus.initid.id = 7;
+ io->io_hdr.nexus.targ_port = lun->softc->fe.targ_port;
+ io->io_hdr.nexus.targ_target.id = lun->target_id.id;
+ io->io_hdr.nexus.targ_lun = lun->lun_id;
+ io->io_hdr.retries = retries;
+ lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = lun_io;
+ lun_io->lun = lun;
+ lun_io->metatask = metatask;
+ lun_io->ctl_io = io;
+ lun_io->policy = policy;
+ lun_io->orig_lun_io = orig_lun_io;
+ lun_io->done_function = done_function;
+ /*
+ * We only set the tag number for SCSI I/Os. For task management
+ * commands, the tag number is only really needed for aborts, so
+ * the caller can set it if necessary.
+ */
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI:
+ io->scsiio.tag_num = lun->cur_tag_num++;
+ break;
+ case CTL_IO_TASK:
+ default:
+ break;
+ }
+}
+
+static void
+cfi_done(union ctl_io *io)
+{
+ struct cfi_lun_io *lun_io;
+ struct cfi_softc *softc;
+ struct cfi_lun *lun;
+
+ lun_io = (struct cfi_lun_io *)
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+
+ lun = lun_io->lun;
+ softc = lun->softc;
+
+ /*
+ * Very minimal retry logic. We basically retry if we got an error
+ * back, and the retry count is greater than 0. If we ever want
+ * more sophisticated initiator type behavior, the CAM error
+ * recovery code in ../common might be helpful.
+ */
+ if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
+ && (io->io_hdr.retries > 0)) {
+ ctl_io_status old_status;
+ cfi_error_action error_action;
+
+ error_action = cfi_error_parse(io, lun_io);
+
+ switch (error_action & CFI_ERR_MASK) {
+ case CFI_ERR_FAIL:
+ goto done;
+ break; /* NOTREACHED */
+ case CFI_ERR_LUN_RESET: {
+ union ctl_io *new_io;
+ struct cfi_lun_io *new_lun_io;
+
+ new_io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+ if (new_io == NULL) {
+ printf("%s: unable to allocate ctl_io for "
+ "error recovery\n", __func__);
+ goto done;
+ }
+ ctl_zero_io(new_io);
+
+ new_io->io_hdr.io_type = CTL_IO_TASK;
+ new_io->taskio.task_action = CTL_TASK_LUN_RESET;
+
+ cfi_init_io(new_io,
+ /*lun*/ lun_io->lun,
+ /*metatask*/ NULL,
+ /*policy*/ CFI_ERR_SOFT,
+ /*retries*/ 0,
+ /*orig_lun_io*/lun_io,
+ /*done_function*/ cfi_err_recovery_done);
+
+
+ new_lun_io = (struct cfi_lun_io *)
+ new_io->io_hdr.port_priv;
+
+ mtx_lock(&lun->softc->lock);
+ STAILQ_INSERT_TAIL(&lun->io_list, new_lun_io, links);
+ mtx_unlock(&lun->softc->lock);
+
+ io = new_io;
+ break;
+ }
+ case CFI_ERR_RETRY:
+ default:
+ if ((error_action & CFI_ERR_NO_DECREMENT) == 0)
+ io->io_hdr.retries--;
+ break;
+ }
+
+ old_status = io->io_hdr.status;
+ io->io_hdr.status = CTL_STATUS_NONE;
+#if 0
+ io->io_hdr.flags &= ~CTL_FLAG_ALREADY_DONE;
+#endif
+ io->io_hdr.flags &= ~CTL_FLAG_ABORT;
+ io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
+
+ if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
+ printf("%s: error returned from ctl_queue()!\n",
+ __func__);
+ io->io_hdr.status = old_status;
+ } else
+ return;
+ }
+done:
+ lun_io->done_function(io);
+}
+
+static void
+cfi_lun_probe_done(union ctl_io *io)
+{
+ struct cfi_lun *lun;
+ struct cfi_lun_io *lun_io;
+
+ lun_io = (struct cfi_lun_io *)
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+ lun = lun_io->lun;
+
+ switch (lun->state) {
+ case CFI_LUN_INQUIRY: {
+ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
+ /* print out something here?? */
+ printf("%s: LUN %d probe failed because inquiry "
+ "failed\n", __func__, lun->lun_id);
+ ctl_io_error_print(io, NULL);
+ } else {
+
+ if (SID_TYPE(&lun->inq_data) != T_DIRECT) {
+ char path_str[40];
+
+ lun->state = CFI_LUN_READY;
+ ctl_scsi_path_string(io, path_str,
+ sizeof(path_str));
+ printf("%s", path_str);
+ scsi_print_inquiry(&lun->inq_data);
+ } else {
+ lun->state = CFI_LUN_READCAPACITY;
+ cfi_lun_probe(lun, /*have_lock*/ 0);
+ }
+ }
+ mtx_lock(&lun->softc->lock);
+ STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
+ mtx_unlock(&lun->softc->lock);
+ ctl_free_io(io);
+ break;
+ }
+ case CFI_LUN_READCAPACITY:
+ case CFI_LUN_READCAPACITY_16: {
+ uint64_t maxlba;
+ uint32_t blocksize;
+
+ maxlba = 0;
+ blocksize = 0;
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
+ printf("%s: LUN %d probe failed because READ CAPACITY "
+ "failed\n", __func__, lun->lun_id);
+ ctl_io_error_print(io, NULL);
+ } else {
+
+ if (lun->state == CFI_LUN_READCAPACITY) {
+ struct scsi_read_capacity_data *rdcap;
+
+ rdcap = (struct scsi_read_capacity_data *)
+ io->scsiio.ext_data_ptr;
+
+ maxlba = scsi_4btoul(rdcap->addr);
+ blocksize = scsi_4btoul(rdcap->length);
+ if (blocksize == 0) {
+ printf("%s: LUN %d has invalid "
+ "blocksize 0, probe aborted\n",
+ __func__, lun->lun_id);
+ } else if (maxlba == 0xffffffff) {
+ lun->state = CFI_LUN_READCAPACITY_16;
+ cfi_lun_probe(lun, /*have_lock*/ 0);
+ } else
+ lun->state = CFI_LUN_READY;
+ } else {
+ struct scsi_read_capacity_data_long *rdcap_long;
+
+ rdcap_long = (struct
+ scsi_read_capacity_data_long *)
+ io->scsiio.ext_data_ptr;
+ maxlba = scsi_8btou64(rdcap_long->addr);
+ blocksize = scsi_4btoul(rdcap_long->length);
+
+ if (blocksize == 0) {
+ printf("%s: LUN %d has invalid "
+ "blocksize 0, probe aborted\n",
+ __func__, lun->lun_id);
+ } else
+ lun->state = CFI_LUN_READY;
+ }
+ }
+
+ if (lun->state == CFI_LUN_READY) {
+ char path_str[40];
+
+ lun->num_blocks = maxlba + 1;
+ lun->blocksize = blocksize;
+
+ /*
+ * If this is true, the blocksize is a power of 2.
+ * We already checked for 0 above.
+ */
+ if (((blocksize - 1) & blocksize) == 0) {
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ if ((blocksize & (1 << i)) != 0) {
+ lun->blocksize_powerof2 = i;
+ break;
+ }
+ }
+ }
+ ctl_scsi_path_string(io, path_str,sizeof(path_str));
+ printf("%s", path_str);
+ scsi_print_inquiry(&lun->inq_data);
+ printf("%s %ju blocks, blocksize %d\n", path_str,
+ (uintmax_t)maxlba + 1, blocksize);
+ }
+ mtx_lock(&lun->softc->lock);
+ STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
+ mtx_unlock(&lun->softc->lock);
+ free(io->scsiio.ext_data_ptr, M_CTL_CFI);
+ ctl_free_io(io);
+ break;
+ }
+ case CFI_LUN_READY:
+ default:
+ mtx_lock(&lun->softc->lock);
+ /* How did we get here?? */
+ STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
+ mtx_unlock(&lun->softc->lock);
+ ctl_free_io(io);
+ break;
+ }
+}
+
+static void
+cfi_lun_probe(struct cfi_lun *lun, int have_lock)
+{
+
+ if (have_lock == 0)
+ mtx_lock(&lun->softc->lock);
+ if ((lun->softc->flags & CFI_ONLINE) == 0) {
+ if (have_lock == 0)
+ mtx_unlock(&lun->softc->lock);
+ return;
+ }
+ if (have_lock == 0)
+ mtx_unlock(&lun->softc->lock);
+
+ switch (lun->state) {
+ case CFI_LUN_INQUIRY: {
+ struct cfi_lun_io *lun_io;
+ union ctl_io *io;
+
+ io = ctl_alloc_io(lun->softc->fe.ctl_pool_ref);
+ if (io == NULL) {
+ printf("%s: unable to alloc ctl_io for target %ju "
+ "lun %d probe\n", __func__,
+ (uintmax_t)lun->target_id.id, lun->lun_id);
+ return;
+ }
+ ctl_scsi_inquiry(io,
+ /*data_ptr*/(uint8_t *)&lun->inq_data,
+ /*data_len*/ sizeof(lun->inq_data),
+ /*byte2*/ 0,
+ /*page_code*/ 0,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+
+ cfi_init_io(io,
+ /*lun*/ lun,
+ /*metatask*/ NULL,
+ /*policy*/ CFI_ERR_SOFT,
+ /*retries*/ 5,
+ /*orig_lun_io*/ NULL,
+ /*done_function*/
+ cfi_lun_probe_done);
+
+ lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
+
+ if (have_lock == 0)
+ mtx_lock(&lun->softc->lock);
+ STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
+ if (have_lock == 0)
+ mtx_unlock(&lun->softc->lock);
+
+ if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
+ printf("%s: error returned from ctl_queue()!\n",
+ __func__);
+ STAILQ_REMOVE(&lun->io_list, lun_io,
+ cfi_lun_io, links);
+ ctl_free_io(io);
+ }
+ break;
+ }
+ case CFI_LUN_READCAPACITY:
+ case CFI_LUN_READCAPACITY_16: {
+ struct cfi_lun_io *lun_io;
+ uint8_t *dataptr;
+ union ctl_io *io;
+
+ io = ctl_alloc_io(lun->softc->fe.ctl_pool_ref);
+ if (io == NULL) {
+ printf("%s: unable to alloc ctl_io for target %ju "
+ "lun %d probe\n", __func__,
+ (uintmax_t)lun->target_id.id, lun->lun_id);
+ return;
+ }
+
+ dataptr = malloc(sizeof(struct scsi_read_capacity_data_long),
+ M_CTL_CFI, M_NOWAIT);
+ if (dataptr == NULL) {
+ printf("%s: unable to allocate SCSI read capacity "
+ "buffer for target %ju lun %d\n", __func__,
+ (uintmax_t)lun->target_id.id, lun->lun_id);
+ return;
+ }
+ if (lun->state == CFI_LUN_READCAPACITY) {
+ ctl_scsi_read_capacity(io,
+ /*data_ptr*/ dataptr,
+ /*data_len*/
+ sizeof(struct scsi_read_capacity_data_long),
+ /*addr*/ 0,
+ /*reladr*/ 0,
+ /*pmi*/ 0,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+ } else {
+ ctl_scsi_read_capacity_16(io,
+ /*data_ptr*/ dataptr,
+ /*data_len*/
+ sizeof(struct scsi_read_capacity_data_long),
+ /*addr*/ 0,
+ /*reladr*/ 0,
+ /*pmi*/ 0,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+ }
+ cfi_init_io(io,
+ /*lun*/ lun,
+ /*metatask*/ NULL,
+ /*policy*/ CFI_ERR_SOFT,
+ /*retries*/ 7,
+ /*orig_lun_io*/ NULL,
+ /*done_function*/ cfi_lun_probe_done);
+
+ lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
+
+ if (have_lock == 0)
+ mtx_lock(&lun->softc->lock);
+ STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
+ if (have_lock == 0)
+ mtx_unlock(&lun->softc->lock);
+
+ if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
+ printf("%s: error returned from ctl_queue()!\n",
+ __func__);
+ STAILQ_REMOVE(&lun->io_list, lun_io,
+ cfi_lun_io, links);
+ free(dataptr, M_CTL_CFI);
+ ctl_free_io(io);
+ }
+ break;
+ }
+ case CFI_LUN_READY:
+ default:
+ /* Why were we called? */
+ break;
+ }
+}
+
+static void
+cfi_metatask_done(struct cfi_softc *softc, struct cfi_metatask *metatask)
+{
+ mtx_lock(&softc->lock);
+ STAILQ_REMOVE(&softc->metatask_list, metatask, cfi_metatask, links);
+ mtx_unlock(&softc->lock);
+
+ /*
+ * Return status to the caller. Caller allocated storage, and is
+ * responsible for calling cfi_free_metatask to release it once
+ * they've seen the status.
+ */
+ metatask->callback(metatask->callback_arg, metatask);
+}
+
+static void
+cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask, union ctl_io *io)
+{
+ int error_code, sense_key, asc, ascq;
+
+ if (metatask->tasktype != CFI_TASK_BBRREAD)
+ return;
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
+ metatask->status = CFI_MT_SUCCESS;
+ metatask->taskinfo.bbrread.status = CFI_BBR_SUCCESS;
+ return;
+ }
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR) {
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
+ return;
+ }
+
+ metatask->taskinfo.bbrread.scsi_status = io->scsiio.scsi_status;
+ memcpy(&metatask->taskinfo.bbrread.sense_data, &io->scsiio.sense_data,
+ ctl_min(sizeof(metatask->taskinfo.bbrread.sense_data),
+ sizeof(io->scsiio.sense_data)));
+
+ if (io->scsiio.scsi_status == SCSI_STATUS_RESERV_CONFLICT) {
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status = CFI_BBR_RESERV_CONFLICT;
+ return;
+ }
+
+ if (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND) {
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
+ return;
+ }
+
+ scsi_extract_sense_len(&io->scsiio.sense_data,
+ io->scsiio.sense_len,
+ &error_code,
+ &sense_key,
+ &asc,
+ &ascq,
+ /*show_errors*/ 1);
+
+ switch (error_code) {
+ case SSD_DEFERRED_ERROR:
+ case SSD_DESC_DEFERRED_ERROR:
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
+ break;
+ case SSD_CURRENT_ERROR:
+ case SSD_DESC_CURRENT_ERROR:
+ default: {
+ struct scsi_sense_data *sense;
+
+ sense = &io->scsiio.sense_data;
+
+ if ((asc == 0x04) && (ascq == 0x02)) {
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status = CFI_BBR_LUN_STOPPED;
+ } else if ((asc == 0x04) && (ascq == 0x03)) {
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status =
+ CFI_BBR_LUN_OFFLINE_CTL;
+ } else if ((asc == 0x44) && (ascq == 0x00)) {
+#ifdef NEEDTOPORT
+ if (sense->sense_key_spec[0] & SSD_SCS_VALID) {
+ uint16_t retry_count;
+
+ retry_count = sense->sense_key_spec[1] << 8 |
+ sense->sense_key_spec[2];
+ if (((retry_count & 0xf000) == CSC_RAIDCORE)
+ && ((retry_count & 0x0f00) == CSC_SHELF_SW)
+ && ((retry_count & 0xff) ==
+ RC_STS_DEVICE_OFFLINE)) {
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status =
+ CFI_BBR_LUN_OFFLINE_RC;
+ } else {
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status =
+ CFI_BBR_SCSI_ERROR;
+ }
+ } else {
+#endif /* NEEDTOPORT */
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status =
+ CFI_BBR_SCSI_ERROR;
+#ifdef NEEDTOPORT
+ }
+#endif
+ } else {
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
+ }
+ break;
+ }
+ }
+}
+
+static void
+cfi_metatask_io_done(union ctl_io *io)
+{
+ struct cfi_lun_io *lun_io;
+ struct cfi_metatask *metatask;
+ struct cfi_softc *softc;
+ struct cfi_lun *lun;
+
+ lun_io = (struct cfi_lun_io *)
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+
+ lun = lun_io->lun;
+ softc = lun->softc;
+
+ metatask = lun_io->metatask;
+
+ switch (metatask->tasktype) {
+ case CFI_TASK_STARTUP:
+ case CFI_TASK_SHUTDOWN: {
+ int failed, done, is_start;
+
+ failed = 0;
+ done = 0;
+ if (metatask->tasktype == CFI_TASK_STARTUP)
+ is_start = 1;
+ else
+ is_start = 0;
+
+ mtx_lock(&softc->lock);
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
+ metatask->taskinfo.startstop.luns_complete++;
+ else {
+ metatask->taskinfo.startstop.luns_failed++;
+ failed = 1;
+ }
+ if ((metatask->taskinfo.startstop.luns_complete +
+ metatask->taskinfo.startstop.luns_failed) >=
+ metatask->taskinfo.startstop.total_luns)
+ done = 1;
+
+ mtx_unlock(&softc->lock);
+
+ if (failed != 0) {
+ printf("%s: LUN %d %s request failed\n", __func__,
+ lun_io->lun->lun_id, (is_start == 1) ? "start" :
+ "stop");
+ ctl_io_error_print(io, &lun_io->lun->inq_data);
+ }
+ if (done != 0) {
+ if (metatask->taskinfo.startstop.luns_failed > 0)
+ metatask->status = CFI_MT_ERROR;
+ else
+ metatask->status = CFI_MT_SUCCESS;
+ cfi_metatask_done(softc, metatask);
+ }
+ mtx_lock(&softc->lock);
+ STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
+ mtx_unlock(&softc->lock);
+
+ ctl_free_io(io);
+ break;
+ }
+ case CFI_TASK_BBRREAD: {
+ /*
+ * Translate the SCSI error into an enumeration.
+ */
+ cfi_metatask_bbr_errorparse(metatask, io);
+
+ mtx_lock(&softc->lock);
+ STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
+ mtx_unlock(&softc->lock);
+
+ ctl_free_io(io);
+
+ cfi_metatask_done(softc, metatask);
+ break;
+ }
+ default:
+ /*
+ * This shouldn't happen.
+ */
+ mtx_lock(&softc->lock);
+ STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
+ mtx_unlock(&softc->lock);
+
+ ctl_free_io(io);
+ break;
+ }
+}
+
+static void
+cfi_err_recovery_done(union ctl_io *io)
+{
+ struct cfi_lun_io *lun_io, *orig_lun_io;
+ struct cfi_lun *lun;
+ union ctl_io *orig_io;
+
+ lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
+ orig_lun_io = lun_io->orig_lun_io;
+ orig_io = orig_lun_io->ctl_io;
+ lun = lun_io->lun;
+
+ if (io->io_hdr.status != CTL_SUCCESS) {
+ printf("%s: error recovery action failed. Original "
+ "error:\n", __func__);
+
+ ctl_io_error_print(orig_lun_io->ctl_io, &lun->inq_data);
+
+ printf("%s: error from error recovery action:\n", __func__);
+
+ ctl_io_error_print(io, &lun->inq_data);
+
+ printf("%s: trying original command again...\n", __func__);
+ }
+
+ mtx_lock(&lun->softc->lock);
+ STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
+ mtx_unlock(&lun->softc->lock);
+ ctl_free_io(io);
+
+ orig_io->io_hdr.retries--;
+ orig_io->io_hdr.status = CTL_STATUS_NONE;
+
+ if (ctl_queue(orig_io) != CTL_RETVAL_COMPLETE) {
+ printf("%s: error returned from ctl_queue()!\n", __func__);
+ STAILQ_REMOVE(&lun->io_list, orig_lun_io,
+ cfi_lun_io, links);
+ ctl_free_io(orig_io);
+ }
+}
+
+static void
+cfi_lun_io_done(union ctl_io *io)
+{
+ struct cfi_lun *lun;
+ struct cfi_lun_io *lun_io;
+
+ lun_io = (struct cfi_lun_io *)
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+ lun = lun_io->lun;
+
+ if (lun_io->metatask == NULL) {
+ printf("%s: I/O has no metatask pointer, discarding\n",
+ __func__);
+ STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
+ ctl_free_io(io);
+ return;
+ }
+ cfi_metatask_io_done(io);
+}
+
+void
+cfi_action(struct cfi_metatask *metatask)
+{
+ struct cfi_softc *softc;
+
+ softc = &fetd_internal_softc;
+
+ mtx_lock(&softc->lock);
+
+ STAILQ_INSERT_TAIL(&softc->metatask_list, metatask, links);
+
+ if ((softc->flags & CFI_ONLINE) == 0) {
+ mtx_unlock(&softc->lock);
+ metatask->status = CFI_MT_PORT_OFFLINE;
+ cfi_metatask_done(softc, metatask);
+ return;
+ } else
+ mtx_unlock(&softc->lock);
+
+ switch (metatask->tasktype) {
+ case CFI_TASK_STARTUP:
+ case CFI_TASK_SHUTDOWN: {
+ union ctl_io *io;
+ int da_luns, ios_allocated, do_start;
+ struct cfi_lun *lun;
+ STAILQ_HEAD(, ctl_io_hdr) tmp_io_list;
+
+ da_luns = 0;
+ ios_allocated = 0;
+ STAILQ_INIT(&tmp_io_list);
+
+ if (metatask->tasktype == CFI_TASK_STARTUP)
+ do_start = 1;
+ else
+ do_start = 0;
+
+ mtx_lock(&softc->lock);
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ if (lun->state != CFI_LUN_READY)
+ continue;
+
+ if (SID_TYPE(&lun->inq_data) != T_DIRECT)
+ continue;
+ da_luns++;
+ io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+ if (io != NULL) {
+ ios_allocated++;
+ STAILQ_INSERT_TAIL(&tmp_io_list, &io->io_hdr,
+ links);
+ }
+ }
+
+ if (ios_allocated < da_luns) {
+ printf("%s: error allocating ctl_io for %s\n",
+ __func__, (do_start == 1) ? "startup" :
+ "shutdown");
+ da_luns = ios_allocated;
+ }
+
+ metatask->taskinfo.startstop.total_luns = da_luns;
+
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ struct cfi_lun_io *lun_io;
+
+ if (lun->state != CFI_LUN_READY)
+ continue;
+
+ if (SID_TYPE(&lun->inq_data) != T_DIRECT)
+ continue;
+
+ io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
+ if (io == NULL)
+ break;
+
+ STAILQ_REMOVE(&tmp_io_list, &io->io_hdr, ctl_io_hdr,
+ links);
+
+ ctl_scsi_start_stop(io,
+ /*start*/ do_start,
+ /*load_eject*/ 0,
+ /*immediate*/ 0,
+ /*power_conditions*/
+ SSS_PC_START_VALID,
+ /*onoffline*/ 1,
+ /*ctl_tag_type*/ CTL_TAG_ORDERED,
+ /*control*/ 0);
+
+ cfi_init_io(io,
+ /*lun*/ lun,
+ /*metatask*/ metatask,
+ /*policy*/ CFI_ERR_HARD,
+ /*retries*/ 3,
+ /*orig_lun_io*/ NULL,
+ /*done_function*/ cfi_lun_io_done);
+
+ lun_io = (struct cfi_lun_io *) io->io_hdr.port_priv;
+
+ STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
+
+ if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
+ printf("%s: error returned from ctl_queue()!\n",
+ __func__);
+ STAILQ_REMOVE(&lun->io_list, lun_io,
+ cfi_lun_io, links);
+ ctl_free_io(io);
+ metatask->taskinfo.startstop.total_luns--;
+ }
+ }
+
+ if (STAILQ_FIRST(&tmp_io_list) != NULL) {
+ printf("%s: error: tmp_io_list != NULL\n", __func__);
+ for (io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
+ io != NULL;
+ io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list)) {
+ STAILQ_REMOVE(&tmp_io_list, &io->io_hdr,
+ ctl_io_hdr, links);
+ ctl_free_io(io);
+ }
+ }
+ mtx_unlock(&softc->lock);
+
+ break;
+ }
+ case CFI_TASK_BBRREAD: {
+ union ctl_io *io;
+ struct cfi_lun *lun;
+ struct cfi_lun_io *lun_io;
+ cfi_bbrread_status status;
+ int req_lun_num;
+ uint32_t num_blocks;
+
+ status = CFI_BBR_SUCCESS;
+
+ req_lun_num = metatask->taskinfo.bbrread.lun_num;
+
+ mtx_lock(&softc->lock);
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ if (lun->lun_id != req_lun_num)
+ continue;
+ if (lun->state != CFI_LUN_READY) {
+ status = CFI_BBR_LUN_UNCONFIG;
+ break;
+ } else
+ break;
+ }
+
+ if (lun == NULL)
+ status = CFI_BBR_NO_LUN;
+
+ if (status != CFI_BBR_SUCCESS) {
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status = status;
+ mtx_unlock(&softc->lock);
+ cfi_metatask_done(softc, metatask);
+ break;
+ }
+
+ /*
+ * Convert the number of bytes given into blocks and check
+ * that the number of bytes is a multiple of the blocksize.
+ * CTL will verify that the LBA is okay.
+ */
+ if (lun->blocksize_powerof2 != 0) {
+ if ((metatask->taskinfo.bbrread.len &
+ (lun->blocksize - 1)) != 0) {
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status =
+ CFI_BBR_BAD_LEN;
+ cfi_metatask_done(softc, metatask);
+ break;
+ }
+
+ num_blocks = metatask->taskinfo.bbrread.len >>
+ lun->blocksize_powerof2;
+ } else {
+ /*
+ * XXX KDM this could result in floating point
+ * division, which isn't supported in the kernel on
+ * x86 at least.
+ */
+ if ((metatask->taskinfo.bbrread.len %
+ lun->blocksize) != 0) {
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status =
+ CFI_BBR_BAD_LEN;
+ cfi_metatask_done(softc, metatask);
+ break;
+ }
+
+ /*
+ * XXX KDM this could result in floating point
+ * division in some cases.
+ */
+ num_blocks = metatask->taskinfo.bbrread.len /
+ lun->blocksize;
+
+ }
+
+ io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+ if (io == NULL) {
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status = CFI_BBR_NO_MEM;
+ mtx_unlock(&softc->lock);
+ cfi_metatask_done(softc, metatask);
+ break;
+ }
+
+ /*
+ * XXX KDM need to do a read capacity to get the blocksize
+ * for this device.
+ */
+ ctl_scsi_read_write(io,
+ /*data_ptr*/ NULL,
+ /*data_len*/ metatask->taskinfo.bbrread.len,
+ /*read_op*/ 1,
+ /*byte2*/ 0,
+ /*minimum_cdb_size*/ 0,
+ /*lba*/ metatask->taskinfo.bbrread.lba,
+ /*num_blocks*/ num_blocks,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+
+ cfi_init_io(io,
+ /*lun*/ lun,
+ /*metatask*/ metatask,
+ /*policy*/ CFI_ERR_SOFT,
+ /*retries*/ 3,
+ /*orig_lun_io*/ NULL,
+ /*done_function*/ cfi_lun_io_done);
+
+ lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
+
+ STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
+
+ if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
+ printf("%s: error returned from ctl_queue()!\n",
+ __func__);
+ STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
+ ctl_free_io(io);
+ metatask->status = CFI_MT_ERROR;
+ metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
+ mtx_unlock(&softc->lock);
+ cfi_metatask_done(softc, metatask);
+ break;
+ }
+
+ mtx_unlock(&softc->lock);
+ break;
+ }
+ default:
+ panic("invalid metatask type %d", metatask->tasktype);
+ break; /* NOTREACHED */
+ }
+}
+
+#ifdef oldapi
+void
+cfi_shutdown_shelf(cfi_cb_t callback, void *callback_arg)
+{
+ struct ctl_mem_element *element;
+ struct cfi_softc *softc;
+ struct cfi_metatask *metatask;
+
+ softc = &fetd_internal_softc;
+
+ element = ctl_alloc_mem_element(&softc->metatask_pool, /*can_wait*/ 0);
+ if (element == NULL) {
+ callback(callback_arg,
+ /*status*/ CFI_MT_ERROR,
+ /*sluns_found*/ 0,
+ /*sluns_complete*/ 0,
+ /*sluns_failed*/ 0);
+ return;
+ }
+
+ metatask = (struct cfi_metatask *)element->bytes;
+
+ memset(metatask, 0, sizeof(*metatask));
+ metatask->tasktype = CFI_TASK_SHUTDOWN;
+ metatask->status = CFI_MT_NONE;
+ metatask->taskinfo.startstop.callback = callback;
+ metatask->taskinfo.startstop.callback_arg = callback_arg;
+ metatask->element = element;
+
+ cfi_action(softc, metatask);
+
+ /*
+ * - send a report luns to lun 0, get LUN list.
+ * - send an inquiry to each lun
+ * - send a stop/offline to each direct access LUN
+ * - if we get a reservation conflict, reset the LUN and then
+ * retry sending the stop/offline
+ * - return status back to the caller
+ */
+}
+
+void
+cfi_start_shelf(cfi_cb_t callback, void *callback_arg)
+{
+ struct ctl_mem_element *element;
+ struct cfi_softc *softc;
+ struct cfi_metatask *metatask;
+
+ softc = &fetd_internal_softc;
+
+ element = ctl_alloc_mem_element(&softc->metatask_pool, /*can_wait*/ 0);
+ if (element == NULL) {
+ callback(callback_arg,
+ /*status*/ CFI_MT_ERROR,
+ /*sluns_found*/ 0,
+ /*sluns_complete*/ 0,
+ /*sluns_failed*/ 0);
+ return;
+ }
+
+ metatask = (struct cfi_metatask *)element->bytes;
+
+ memset(metatask, 0, sizeof(*metatask));
+ metatask->tasktype = CFI_TASK_STARTUP;
+ metatask->status = CFI_MT_NONE;
+ metatask->taskinfo.startstop.callback = callback;
+ metatask->taskinfo.startstop.callback_arg = callback_arg;
+ metatask->element = element;
+
+ cfi_action(softc, metatask);
+
+ /*
+ * - send a report luns to lun 0, get LUN list.
+ * - send an inquiry to each lun
+ * - send a stop/offline to each direct access LUN
+ * - if we get a reservation conflict, reset the LUN and then
+ * retry sending the stop/offline
+ * - return status back to the caller
+ */
+}
+
+#endif
+
+struct cfi_metatask *
+cfi_alloc_metatask(int can_wait)
+{
+ struct ctl_mem_element *element;
+ struct cfi_metatask *metatask;
+ struct cfi_softc *softc;
+
+ softc = &fetd_internal_softc;
+
+ element = ctl_alloc_mem_element(&softc->metatask_pool, can_wait);
+ if (element == NULL)
+ return (NULL);
+
+ metatask = (struct cfi_metatask *)element->bytes;
+ memset(metatask, 0, sizeof(*metatask));
+ metatask->status = CFI_MT_NONE;
+ metatask->element = element;
+
+ return (metatask);
+}
+
+void
+cfi_free_metatask(struct cfi_metatask *metatask)
+{
+ ctl_free_mem_element(metatask->element);
+}
+
+/*
+ * vim: ts=8
+ */
diff --git a/sys/cam/ctl/ctl_frontend_internal.h b/sys/cam/ctl/ctl_frontend_internal.h
new file mode 100644
index 0000000..cb00dc6
--- /dev/null
+++ b/sys/cam/ctl/ctl_frontend_internal.h
@@ -0,0 +1,154 @@
+/*-
+ * Copyright (c) 2004 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_internal.h#1 $
+ * $FreeBSD$
+ */
+/*
+ * CTL kernel internal frontend target driver. This allows kernel-level
+ * clients to send commands into CTL.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#ifndef _CTL_FRONTEND_INTERNAL_H_
+#define _CTL_FRONTEND_INTERNAL_H_
+
+/*
+ * These are general metatask error codes. If the error code is CFI_MT_ERROR,
+ * check any metatask-specific status codes for more detail on the problem.
+ */
+typedef enum {
+ CFI_MT_NONE,
+ CFI_MT_PORT_OFFLINE,
+ CFI_MT_ERROR,
+ CFI_MT_SUCCESS
+} cfi_mt_status;
+
+typedef enum {
+ CFI_TASK_NONE,
+ CFI_TASK_SHUTDOWN,
+ CFI_TASK_STARTUP,
+ CFI_TASK_BBRREAD
+} cfi_tasktype;
+
+struct cfi_task_startstop {
+ int total_luns;
+ int luns_complete;
+ int luns_failed;
+};
+
+/*
+ * Error code description:
+ * CFI_BBR_SUCCESS - the read was successful
+ * CFI_BBR_LUN_UNCONFIG - CFI probe for this lun hasn't completed
+ * CFI_BBR_NO_LUN - this lun doesn't exist, as far as CFI knows
+ * CFI_BBR_NO_MEM - memory allocation error
+ * CFI_BBR_BAD_LEN - data length isn't a multiple of the blocksize
+ * CFI_BBR_RESERV_CONFLICT - another initiator has this lun reserved, so
+ * we can't issue I/O at all.
+ * CFI_BBR_LUN_STOPPED - the lun is powered off.
+ * CFI_BBR_LUN_OFFLINE_CTL - the lun is offline from a CTL standpoint
+ * CFI_BBR_LUN_OFFLINE_RC - the lun is offline from a RAIDCore standpoint.
+ * This is bad, because it basically means we've
+ * had a double failure on the LUN.
+ * CFI_BBR_SCSI_ERROR - generic SCSI error, see status byte and sense
+ * data for more resolution if you want it.
+ * CFI_BBR_ERROR - the catch-all error code.
+ */
+typedef enum {
+ CFI_BBR_SUCCESS,
+ CFI_BBR_LUN_UNCONFIG,
+ CFI_BBR_NO_LUN,
+ CFI_BBR_NO_MEM,
+ CFI_BBR_BAD_LEN,
+ CFI_BBR_RESERV_CONFLICT,
+ CFI_BBR_LUN_STOPPED,
+ CFI_BBR_LUN_OFFLINE_CTL,
+ CFI_BBR_LUN_OFFLINE_RC,
+ CFI_BBR_SCSI_ERROR,
+ CFI_BBR_ERROR,
+} cfi_bbrread_status;
+
+struct cfi_task_bbrread {
+ int lun_num; /* lun number */
+ uint64_t lba; /* logical block address */
+ int len; /* length in bytes */
+ cfi_bbrread_status status; /* BBR status */
+ uint8_t scsi_status; /* SCSI status */
+ struct scsi_sense_data sense_data; /* SCSI sense data */
+};
+
+union cfi_taskinfo {
+ struct cfi_task_startstop startstop;
+ struct cfi_task_bbrread bbrread;
+};
+
+struct cfi_metatask;
+
+typedef void (*cfi_cb_t)(void *arg, struct cfi_metatask *metatask);
+
+struct cfi_metatask {
+ cfi_tasktype tasktype; /* passed to CFI */
+ cfi_mt_status status; /* returned from CFI */
+ union cfi_taskinfo taskinfo; /* returned from CFI */
+ struct ctl_mem_element *element; /* used by CFI, don't touch*/
+ cfi_cb_t callback; /* passed to CFI */
+ void *callback_arg; /* passed to CFI */
+ STAILQ_ENTRY(cfi_metatask) links; /* used by CFI, don't touch*/
+};
+
+#ifdef _KERNEL
+
+MALLOC_DECLARE(M_CTL_CFI);
+
+/*
+ * This is the API for sending meta commands (commands that are sent to more
+ * than one LUN) to the internal frontend:
+ * - Allocate a metatask using cfi_alloc_metatask(). can_wait == 0 means
+ * that you're calling from an interrupt context. can_wait == 1 means
+ * that you're calling from a thread context and don't mind waiting to
+ * allocate memory.
+ * - Setup the task type, callback and callback argument.
+ * - Call cfi_action().
+ * - When the callback comes, note the status and any per-command status
+ * (see the taskinfo union) and then free the metatask with
+ * cfi_free_metatask().
+ */
+struct cfi_metatask *cfi_alloc_metatask(int can_wait);
+void cfi_free_metatask(struct cfi_metatask *metatask);
+void cfi_action(struct cfi_metatask *metatask);
+
+#endif /* _KERNEL */
+
+#endif /* _CTL_FRONTEND_INTERNAL_H_ */
+
+/*
+ * vim: ts=8
+ */
diff --git a/sys/cam/ctl/ctl_ha.h b/sys/cam/ctl/ctl_ha.h
new file mode 100644
index 0000000..6293c7c
--- /dev/null
+++ b/sys/cam/ctl/ctl_ha.h
@@ -0,0 +1,270 @@
+/*-
+ * Copyright (c) 2003-2009 Silicon Graphics International Corp.
+ * Copyright (c) 2011 Spectra Logic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_ha.h#1 $
+ * $FreeBSD$
+ */
+
+#ifndef _CTL_HA_H_
+#define _CTL_HA_H_
+
+/*
+ * CTL High Availability Modes:
+ *
+ * CTL_HA_MODE_SER_ONLY: Commands are serialized to the other side. Write
+ * mirroring and read re-direction are assumed to
+ * happen in the back end.
+ * CTL_HA_MODE_XFER: Commands are serialized and data is transferred
+ * for write mirroring and read re-direction.
+ */
+
+typedef enum {
+ CTL_HA_MODE_SER_ONLY,
+ CTL_HA_MODE_XFER
+} ctl_ha_mode;
+
+
+/*
+ * This is a stubbed out High Availability interface. It assumes two nodes
+ * staying in sync.
+ *
+ * The reason this interface is here, and stubbed out, is that CTL was
+ * originally written with support for Copan's (now SGI) high availability
+ * framework. That framework was not released by SGI, and would not have
+ * been generally applicable to FreeBSD anyway.
+ *
+ * The idea here is to show the kind of API that would need to be in place
+ * in a HA framework to work with CTL's HA hooks. This API is very close
+ * to the Copan/SGI API, so that the code using it could stay in place
+ * as-is.
+ *
+ * So, in summary, this is a shell without real substance, and much more
+ * work would be needed to actually make HA work. The implementation
+ * inside CTL will also need to change to fit the eventual implementation.
+ * The additional pieces we would need are:
+ *
+ * - HA "Supervisor" framework that can startup the components of the
+ * system, and initiate failover (i.e. active/active to single mode)
+ * and failback (single to active/active mode) state transitions.
+ * This framework would be able to recognize when an event happens
+ * that requires it to initiate state transitions in the components it
+ * manages.
+ *
+ * - HA communication framework. This framework should have the following
+ * features:
+ * - Separate channels for separate system components. The CTL
+ * instance on one node should communicate with the CTL instance
+ * on another node.
+ * - Short message passing. These messages would be fixed length, so
+ * they could be preallocated and easily passed between the nodes.
+ * i.e. conceptually like an ethernet packet.
+ * - DMA/large buffer capability. This would require some negotiation
+ * with the other node to define the destination. It could
+ * allow for "push" (i.e. initiated by the requesting node) DMA or
+ * "pull" (i.e. initiated by the target controller) DMA or both.
+ * - Communication channel status change notification.
+ * - HA capability in other portions of the storage stack. Having two CTL
+ * instances communicate is just one part of an overall HA solution.
+ * State needs to be synchronized at multiple levels of the system in
+ * order for failover to actually work. For instance, if CTL is using a
+ * file on a ZFS filesystem as its backing store, the ZFS array state
+ * should be synchronized with the other node, so that the other node
+ * can immediately take over if the node that is primary for a particular
+ * array fails.
+ */
+
+/*
+ * Communication channel IDs for various system components. This is to
+ * make sure one CTL instance talks with another, one ZFS instance talks
+ * with another, etc.
+ */
+typedef enum {
+ CTL_HA_CHAN_NONE,
+ CTL_HA_CHAN_CTL,
+ CTL_HA_CHAN_ZFS,
+ CTL_HA_CHAN_MAX
+} ctl_ha_channel;
+
+/*
+ * HA communication event notification. These are events generated by the
+ * HA communication subsystem.
+ *
+ * CTL_HA_EVT_MSG_RECV: Message received by the other node.
+ * CTL_HA_EVT_MSG_SENT: Message sent to the other node.
+ * CTL_HA_EVT_DISCONNECT: Communication channel disconnected.
+ * CTL_HA_EVT_DMA_SENT: DMA successfully sent to other node (push).
+ * CTL_HA_EVT_DMA_RECEIVED: DMA successfully received by other node (pull).
+ */
+typedef enum {
+ CTL_HA_EVT_NONE,
+ CTL_HA_EVT_MSG_RECV,
+ CTL_HA_EVT_MSG_SENT,
+ CTL_HA_EVT_DISCONNECT,
+ CTL_HA_EVT_DMA_SENT,
+ CTL_HA_EVT_DMA_RECEIVED,
+ CTL_HA_EVT_MAX
+} ctl_ha_event;
+
+typedef enum {
+ CTL_HA_STATUS_WAIT,
+ CTL_HA_STATUS_SUCCESS,
+ CTL_HA_STATUS_ERROR,
+ CTL_HA_STATUS_INVALID,
+ CTL_HA_STATUS_DISCONNECT,
+ CTL_HA_STATUS_BUSY,
+ CTL_HA_STATUS_MAX
+} ctl_ha_status;
+
+typedef enum {
+ CTL_HA_DATA_CTL,
+ CTL_HA_DATA_ZFS,
+ CTL_HA_DATA_MAX
+} ctl_ha_dtid;
+
+typedef enum {
+ CTL_HA_DT_CMD_READ,
+ CTL_HA_DT_CMD_WRITE,
+} ctl_ha_dt_cmd;
+
+struct ctl_ha_dt_req;
+
+typedef void (*ctl_ha_dt_cb)(struct ctl_ha_dt_req *);
+
+struct ctl_ha_dt_req {
+ ctl_ha_dt_cmd command;
+ void *context;
+ ctl_ha_dt_cb callback;
+ ctl_ha_dtid id;
+ int ret;
+ uint32_t size;
+ uint8_t *local;
+ uint8_t *remote;
+};
+
+typedef void (*ctl_evt_handler)(ctl_ha_channel channel, ctl_ha_event event,
+ int param);
+void ctl_ha_register_evthandler(ctl_ha_channel channel,
+ ctl_evt_handler handler);
+
+static inline ctl_ha_status
+ctl_ha_msg_create(ctl_ha_channel channel, ctl_evt_handler handler)
+{
+ return (CTL_HA_STATUS_SUCCESS);
+}
+
+/*
+ * Receive a message of the specified size.
+ */
+static inline ctl_ha_status
+ctl_ha_msg_recv(ctl_ha_channel channel, void *buffer, unsigned int size,
+ int wait)
+{
+ return (CTL_HA_STATUS_SUCCESS);
+}
+
+/*
+ * Send a message of the specified size.
+ */
+static inline ctl_ha_status
+ctl_ha_msg_send(ctl_ha_channel channel, void *buffer, unsigned int size,
+ int wait)
+{
+ return (CTL_HA_STATUS_SUCCESS);
+}
+
+/*
+ * Allocate a data transfer request structure.
+ */
+static inline struct ctl_ha_dt_req *
+ctl_dt_req_alloc(void)
+{
+ return (NULL);
+}
+
+/*
+ * Free a data transfer request structure.
+ */
+static inline void
+ctl_dt_req_free(struct ctl_ha_dt_req *req)
+{
+ return;
+}
+
+/*
+ * Issue a DMA request for a single buffer.
+ */
+static inline ctl_ha_status
+ctl_dt_single(struct ctl_ha_dt_req *req)
+{
+ return (CTL_HA_STATUS_WAIT);
+}
+
+/*
+ * SINGLE: One node
+ * HA: Two nodes (Active/Active implied)
+ * SLAVE/MASTER: The component can set these flags to indicate which side
+ * is in control. It has no effect on the HA framework.
+ */
+typedef enum {
+ CTL_HA_STATE_UNKNOWN = 0x00,
+ CTL_HA_STATE_SINGLE = 0x01,
+ CTL_HA_STATE_HA = 0x02,
+ CTL_HA_STATE_MASK = 0x0F,
+ CTL_HA_STATE_SLAVE = 0x10,
+ CTL_HA_STATE_MASTER = 0x20
+} ctl_ha_state;
+
+typedef enum {
+ CTL_HA_COMP_STATUS_OK,
+ CTL_HA_COMP_STATUS_FAILED,
+ CTL_HA_COMP_STATUS_ERROR
+} ctl_ha_comp_status;
+
+struct ctl_ha_component;
+
+typedef ctl_ha_comp_status (*ctl_hacmp_init_t)(struct ctl_ha_component *);
+typedef ctl_ha_comp_status (*ctl_hacmp_start_t)(struct ctl_ha_component *,
+ ctl_ha_state);
+
+struct ctl_ha_component {
+ char *name;
+ ctl_ha_state state;
+ ctl_ha_comp_status status;
+ ctl_hacmp_init_t init;
+ ctl_hacmp_start_t start;
+ ctl_hacmp_init_t quiesce;
+};
+
+#define CTL_HA_STATE_IS_SINGLE(state) ((state & CTL_HA_STATE_MASK) == \
+ CTL_HA_STATE_SINGLE)
+#define CTL_HA_STATE_IS_HA(state) ((state & CTL_HA_STATE_MASK) == \
+ CTL_HA_STATE_HA)
+
+#endif /* _CTL_HA_H_ */
diff --git a/sys/cam/ctl/ctl_io.h b/sys/cam/ctl/ctl_io.h
new file mode 100644
index 0000000..aa00a06
--- /dev/null
+++ b/sys/cam/ctl/ctl_io.h
@@ -0,0 +1,474 @@
+/*-
+ * Copyright (c) 2003 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_io.h#5 $
+ * $FreeBSD$
+ */
+/*
+ * CAM Target Layer data movement structures/interface.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#ifndef _CTL_IO_H_
+#define _CTL_IO_H_
+
+#ifdef _CTL_C
+#define EXTERN(__var,__val) __var = __val
+#else
+#define EXTERN(__var,__val) extern __var
+#endif
+
+#define CTL_MAX_CDBLEN 32
+/*
+ * Uncomment this next line to enable printing out times for I/Os
+ * that take longer than CTL_TIME_IO_SECS seconds to get to the datamove
+ * and/or done stage.
+ */
+#define CTL_TIME_IO
+#ifdef CTL_TIME_IO
+#define CTL_TIME_IO_DEFAULT_SECS 90
+EXTERN(int ctl_time_io_secs, CTL_TIME_IO_DEFAULT_SECS);
+#endif
+
+/*
+ * Uncomment these next two lines to enable the CTL I/O delay feature. You
+ * can delay I/O at two different points -- datamove and done. This is
+ * useful for diagnosing abort conditions (for hosts that send an abort on a
+ * timeout), and for determining how long a host's timeout is.
+ */
+#define CTL_IO_DELAY
+#define CTL_TIMER_BYTES sizeof(struct callout)
+
+typedef enum {
+ CTL_STATUS_NONE, /* No status */
+ CTL_SUCCESS, /* Transaction completed successfully */
+ CTL_CMD_TIMEOUT, /* Command timed out, shouldn't happen here */
+ CTL_SEL_TIMEOUT, /* Selection timeout, shouldn't happen here */
+ CTL_ERROR, /* General CTL error XXX expand on this? */
+ CTL_SCSI_ERROR, /* SCSI error, look at status byte/sense data */
+ CTL_CMD_ABORTED, /* Command aborted, don't return status */
+ CTL_STATUS_MASK = 0xfff,/* Mask off any status flags */
+ CTL_AUTOSENSE = 0x1000 /* Autosense performed */
+} ctl_io_status;
+
+/*
+ * WARNING: Keep the data in/out/none flags where they are. They're used
+ * in conjuction with ctl_cmd_flags. See comment above ctl_cmd_flags
+ * definition in ctl_private.h.
+ */
+typedef enum {
+ CTL_FLAG_NONE = 0x00000000, /* no flags */
+ CTL_FLAG_DATA_IN = 0x00000001, /* DATA IN */
+ CTL_FLAG_DATA_OUT = 0x00000002, /* DATA OUT */
+ CTL_FLAG_DATA_NONE = 0x00000003, /* no data */
+ CTL_FLAG_DATA_MASK = 0x00000003,
+ CTL_FLAG_KDPTR_SGLIST = 0x00000008, /* kern_data_ptr is S/G list*/
+ CTL_FLAG_EDPTR_SGLIST = 0x00000010, /* ext_data_ptr is S/G list */
+ CTL_FLAG_DO_AUTOSENSE = 0x00000020, /* grab sense info */
+ CTL_FLAG_USER_REQ = 0x00000040, /* request came from userland */
+ CTL_FLAG_CONTROL_DEV = 0x00000080, /* processor device */
+ CTL_FLAG_ALLOCATED = 0x00000100, /* data space allocated */
+ CTL_FLAG_BLOCKED = 0x00000200, /* on the blocked queue */
+ CTL_FLAG_ABORT = 0x00000800, /* this I/O should be aborted */
+ CTL_FLAG_DMA_INPROG = 0x00001000, /* DMA in progress */
+ CTL_FLAG_NO_DATASYNC = 0x00002000, /* don't cache flush data */
+ CTL_FLAG_DELAY_DONE = 0x00004000, /* delay injection done */
+ CTL_FLAG_INT_COPY = 0x00008000, /* internal copy, no done call*/
+ CTL_FLAG_SENT_2OTHER_SC = 0x00010000,
+ CTL_FLAG_FROM_OTHER_SC = 0x00020000,
+ CTL_FLAG_IS_WAS_ON_RTR = 0x00040000, /* Don't rerun cmd on failover*/
+ CTL_FLAG_BUS_ADDR = 0x00080000, /* ctl_sglist contains BUS
+ addresses, not virtual ones*/
+ CTL_FLAG_IO_CONT = 0x00100000, /* Continue I/O instead of
+ completing */
+ CTL_FLAG_AUTO_MIRROR = 0x00200000, /* Automatically use memory
+ from the RC cache mirrored
+ address area. */
+#if 0
+ CTL_FLAG_ALREADY_DONE = 0x00200000 /* I/O already completed */
+#endif
+ CTL_FLAG_NO_DATAMOVE = 0x00400000,
+ CTL_FLAG_DMA_QUEUED = 0x00800000, /* DMA queued but not started*/
+ CTL_FLAG_STATUS_QUEUED = 0x01000000, /* Status queued but not sent*/
+
+ CTL_FLAG_REDIR_DONE = 0x02000000, /* Redirection has already
+ been done. */
+ CTL_FLAG_FAILOVER = 0x04000000, /* Killed by a failover */
+ CTL_FLAG_IO_ACTIVE = 0x08000000, /* I/O active on this SC */
+ CTL_FLAG_RDMA_MASK = CTL_FLAG_NO_DATASYNC | CTL_FLAG_BUS_ADDR |
+ CTL_FLAG_AUTO_MIRROR | CTL_FLAG_REDIR_DONE
+ /* Flags we care about for
+ remote DMA */
+} ctl_io_flags;
+
+
+struct ctl_lba_len {
+ uint64_t lba;
+ uint32_t len;
+};
+
+union ctl_priv {
+ uint8_t bytes[sizeof(uint64_t) * 2];
+ uint64_t integer;
+ void *ptr;
+};
+
+/*
+ * Number of CTL private areas.
+ */
+#define CTL_NUM_PRIV 6
+
+/*
+ * Which private area are we using for a particular piece of data?
+ */
+#define CTL_PRIV_LUN 0 /* CTL LUN pointer goes here */
+#define CTL_PRIV_LBA_LEN 1 /* Decoded LBA/len for read/write*/
+#define CTL_PRIV_MODEPAGE 1 /* Modepage info for config write */
+#define CTL_PRIV_BACKEND 2 /* Reserved for block, RAIDCore */
+#define CTL_PRIV_BACKEND_LUN 3 /* Backend LUN pointer */
+#define CTL_PRIV_FRONTEND 4 /* LSI driver, ioctl front end */
+#define CTL_PRIV_USER 5 /* Userland use */
+
+#define CTL_INVALID_PORTNAME 0xFF
+#define CTL_UNMAPPED_IID 0xFF
+/*
+ * XXX KDM this size is for the port_priv variable in struct ctl_io_hdr
+ * below. This should be defined in terms of the size of struct
+ * ctlfe_lun_cmd_info at the moment:
+ * struct ctlfe_lun_cmd_info {
+ * int cur_transfer_index;
+ * ctlfe_cmd_flags flags;
+ * bus_dma_segment_t cam_sglist[32];
+ * };
+ *
+ * This isn't really the way I'd prefer to do it, but it does make some
+ * sense, AS LONG AS we can guarantee that there will always only be one
+ * outstanding DMA request per ctl_io. If that assumption isn't valid,
+ * then we've got problems.
+ *
+ * At some point it may be nice switch CTL over to using CCBs for
+ * everything. At that point we can probably use the ATIO/CTIO model, so
+ * that multiple simultaneous DMAs per command will just work.
+ *
+ * Also note that the current size, 600, is appropriate for 64-bit
+ * architectures, but is overkill for 32-bit architectures. Need a way to
+ * figure out the size at compile time, or just get rid of this altogether.
+ */
+#define CTL_PORT_PRIV_SIZE 600
+
+struct ctl_sg_entry {
+ void *addr;
+ size_t len;
+};
+
+struct ctl_id {
+ uint32_t id;
+ uint64_t wwid[2];
+};
+
+typedef enum {
+ CTL_IO_NONE,
+ CTL_IO_SCSI,
+ CTL_IO_TASK,
+} ctl_io_type;
+
+struct ctl_nexus {
+ struct ctl_id initid; /* Initiator ID */
+ uint32_t targ_port; /* Target port, filled in by PORT */
+ struct ctl_id targ_target; /* Destination target */
+ uint32_t targ_lun; /* Destination lun */
+};
+
+typedef enum {
+ CTL_MSG_SERIALIZE,
+ CTL_MSG_R2R,
+ CTL_MSG_FINISH_IO,
+ CTL_MSG_BAD_JUJU,
+ CTL_MSG_MANAGE_TASKS,
+ CTL_MSG_PERS_ACTION,
+ CTL_MSG_SYNC_FE,
+ CTL_MSG_APS_LOCK,
+ CTL_MSG_DATAMOVE,
+ CTL_MSG_DATAMOVE_DONE
+} ctl_msg_type;
+
+struct ctl_scsiio;
+
+#define CTL_NUM_SG_ENTRIES 9
+
+struct ctl_io_hdr {
+ uint32_t version; /* interface version XXX */
+ ctl_io_type io_type; /* task I/O, SCSI I/O, etc. */
+ ctl_msg_type msg_type;
+ struct ctl_nexus nexus; /* Initiator, port, target, lun */
+ uint32_t iid_indx; /* the index into the iid mapping */
+ uint32_t flags; /* transaction flags */
+ uint32_t status; /* transaction status */
+ uint32_t port_status; /* trans status, set by PORT, 0 = good*/
+ uint32_t timeout; /* timeout in ms */
+ uint32_t retries; /* retry count */
+#ifdef CTL_IO_DELAY
+ uint8_t timer_bytes[CTL_TIMER_BYTES]; /* timer kludge */
+#endif /* CTL_IO_DELAY */
+#ifdef CTL_TIME_IO
+ time_t start_time; /* I/O start time */
+ struct bintime start_bt; /* Timer start ticks */
+ struct bintime dma_start_bt; /* DMA start ticks */
+ struct bintime dma_bt; /* DMA total ticks */
+ uint32_t num_dmas; /* Number of DMAs */
+#endif /* CTL_TIME_IO */
+ union ctl_io *original_sc;
+ union ctl_io *serializing_sc;
+ void *pool; /* I/O pool */
+ union ctl_priv ctl_private[CTL_NUM_PRIV];/* CTL private area */
+ uint8_t port_priv[CTL_PORT_PRIV_SIZE];/* PORT private area*/
+ struct ctl_sg_entry remote_sglist[CTL_NUM_SG_ENTRIES];
+ struct ctl_sg_entry remote_dma_sglist[CTL_NUM_SG_ENTRIES];
+ struct ctl_sg_entry local_sglist[CTL_NUM_SG_ENTRIES];
+ struct ctl_sg_entry local_dma_sglist[CTL_NUM_SG_ENTRIES];
+ STAILQ_ENTRY(ctl_io_hdr) links; /* linked list pointer */
+ TAILQ_ENTRY(ctl_io_hdr) ooa_links;
+ TAILQ_ENTRY(ctl_io_hdr) blocked_links;
+};
+
+typedef enum {
+ CTL_TAG_UNTAGGED,
+ CTL_TAG_SIMPLE,
+ CTL_TAG_ORDERED,
+ CTL_TAG_HEAD_OF_QUEUE,
+ CTL_TAG_ACA
+} ctl_tag_type;
+
+union ctl_io;
+
+/*
+ * SCSI passthrough I/O structure for the CAM Target Layer. Note
+ * that some of these fields are here for completeness, but they aren't
+ * used in the CTL implementation. e.g., timeout and retries won't be
+ * used.
+ *
+ * Note: Make sure the io_hdr is *always* the first element in this
+ * structure.
+ */
+struct ctl_scsiio {
+ struct ctl_io_hdr io_hdr; /* common to all I/O types */
+ uint32_t ext_sg_entries; /* 0 = no S/G list, > 0 = num entries */
+ uint8_t *ext_data_ptr; /* data buffer or S/G list */
+ uint32_t ext_data_len; /* Data transfer length */
+ uint32_t ext_data_filled; /* Amount of data filled so far */
+ uint32_t kern_sg_entries; /* 0 = no S/G list, > 0 = num entries */
+ uint32_t rem_sg_entries; /* 0 = no S/G list, > 0 = num entries */
+ uint8_t *kern_data_ptr; /* data buffer or S/G list */
+ uint32_t kern_data_len; /* Length of this S/G list/buffer */
+ uint32_t kern_total_len; /* Total length of this transaction */
+ uint32_t kern_data_resid; /* Length left to transfer after this*/
+ uint32_t kern_rel_offset; /* Byte Offset of this transfer */
+ struct scsi_sense_data sense_data; /* sense data */
+ uint8_t sense_len; /* Returned sense length */
+ uint8_t scsi_status; /* SCSI status byte */
+ uint8_t sense_residual; /* sense residual length */
+ uint32_t residual; /* data residual length */
+ uint32_t tag_num; /* tag number */
+ ctl_tag_type tag_type; /* simple, ordered, head of queue,etc.*/
+ uint8_t cdb_len; /* CDB length */
+ uint8_t cdb[CTL_MAX_CDBLEN]; /* CDB */
+ int (*be_move_done)(union ctl_io *io); /* called by fe */
+ int (*io_cont)(union ctl_io *io); /* to continue processing */
+};
+
+typedef enum {
+ CTL_TASK_ABORT_TASK,
+ CTL_TASK_ABORT_TASK_SET,
+ CTL_TASK_CLEAR_ACA,
+ CTL_TASK_CLEAR_TASK_SET,
+ CTL_TASK_LUN_RESET,
+ CTL_TASK_TARGET_RESET,
+ CTL_TASK_BUS_RESET,
+ CTL_TASK_PORT_LOGIN,
+ CTL_TASK_PORT_LOGOUT
+} ctl_task_type;
+
+/*
+ * Task management I/O structure. Aborts, bus resets, etc., are sent using
+ * this structure.
+ *
+ * Note: Make sure the io_hdr is *always* the first element in this
+ * structure.
+ */
+struct ctl_taskio {
+ struct ctl_io_hdr io_hdr; /* common to all I/O types */
+ ctl_task_type task_action; /* Target Reset, Abort, etc. */
+ uint32_t tag_num; /* tag number */
+ ctl_tag_type tag_type; /* simple, ordered, etc. */
+};
+
+typedef enum {
+ CTL_PR_REG_KEY,
+ CTL_PR_UNREG_KEY,
+ CTL_PR_PREEMPT,
+ CTL_PR_CLEAR,
+ CTL_PR_RESERVE,
+ CTL_PR_RELEASE
+} ctl_pr_action;
+
+/*
+ * The PR info is specifically for sending Persistent Reserve actions
+ * to the other SC which it must also act on.
+ *
+ * Note: Make sure the io_hdr is *always* the first element in this
+ * structure.
+ */
+struct ctl_pr_info {
+ ctl_pr_action action;
+ uint8_t sa_res_key[8];
+ uint8_t res_type;
+ uint16_t residx;
+};
+
+struct ctl_ha_msg_hdr {
+ ctl_msg_type msg_type;
+ union ctl_io *original_sc;
+ union ctl_io *serializing_sc;
+ struct ctl_nexus nexus; /* Initiator, port, target, lun */
+ uint32_t status; /* transaction status */
+ TAILQ_ENTRY(ctl_ha_msg_hdr) links;
+};
+
+#define CTL_HA_MAX_SG_ENTRIES 16
+
+/*
+ * Used for CTL_MSG_APS_LOCK.
+ */
+struct ctl_ha_msg_aps {
+ struct ctl_ha_msg_hdr hdr;
+ uint8_t lock_flag;
+};
+
+/*
+ * Used for CTL_MSG_PERS_ACTION.
+ */
+struct ctl_ha_msg_pr {
+ struct ctl_ha_msg_hdr hdr;
+ struct ctl_pr_info pr_info;
+};
+
+/*
+ * The S/G handling here is a little different than the standard ctl_scsiio
+ * structure, because we can't pass data by reference in between controllers.
+ * The S/G list in the ctl_scsiio struct is normally passed in the
+ * kern_data_ptr field. So kern_sg_entries here will always be non-zero,
+ * even if there is only one entry.
+ *
+ * Used for CTL_MSG_DATAMOVE.
+ */
+struct ctl_ha_msg_dt {
+ struct ctl_ha_msg_hdr hdr;
+ ctl_io_flags flags; /* Only I/O flags are used here */
+ uint32_t sg_sequence; /* S/G portion number */
+ uint8_t sg_last; /* last S/G batch = 1 */
+ uint32_t sent_sg_entries; /* previous S/G count */
+ uint32_t cur_sg_entries; /* current S/G entries */
+ uint32_t kern_sg_entries; /* total S/G entries */
+ uint32_t kern_data_len; /* Length of this S/G list */
+ uint32_t kern_total_len; /* Total length of this
+ transaction */
+ uint32_t kern_data_resid; /* Length left to transfer
+ after this*/
+ uint32_t kern_rel_offset; /* Byte Offset of this
+ transfer */
+ struct ctl_sg_entry sg_list[CTL_HA_MAX_SG_ENTRIES];
+};
+
+/*
+ * Used for CTL_MSG_SERIALIZE, CTL_MSG_FINISH_IO, CTL_MSG_BAD_JUJU.
+ */
+struct ctl_ha_msg_scsi {
+ struct ctl_ha_msg_hdr hdr;
+ uint8_t cdb[CTL_MAX_CDBLEN]; /* CDB */
+ uint32_t tag_num; /* tag number */
+ ctl_tag_type tag_type; /* simple, ordered, etc. */
+ uint8_t scsi_status; /* SCSI status byte */
+ struct scsi_sense_data sense_data; /* sense data */
+ uint8_t sense_len; /* Returned sense length */
+ uint8_t sense_residual; /* sense residual length */
+ uint32_t residual; /* data residual length */
+ uint32_t fetd_status; /* trans status, set by FETD,
+ 0 = good*/
+ struct ctl_lba_len lbalen; /* used for stats */
+};
+
+/*
+ * Used for CTL_MSG_MANAGE_TASKS.
+ */
+struct ctl_ha_msg_task {
+ struct ctl_ha_msg_hdr hdr;
+ ctl_task_type task_action; /* Target Reset, Abort, etc. */
+ uint32_t tag_num; /* tag number */
+ ctl_tag_type tag_type; /* simple, ordered, etc. */
+};
+
+union ctl_ha_msg {
+ struct ctl_ha_msg_hdr hdr;
+ struct ctl_ha_msg_task task;
+ struct ctl_ha_msg_scsi scsi;
+ struct ctl_ha_msg_dt dt;
+ struct ctl_ha_msg_pr pr;
+ struct ctl_ha_msg_aps aps;
+};
+
+
+struct ctl_prio {
+ struct ctl_io_hdr io_hdr;
+ struct ctl_ha_msg_pr pr_msg;
+};
+
+
+
+union ctl_io {
+ struct ctl_io_hdr io_hdr; /* common to all I/O types */
+ struct ctl_scsiio scsiio; /* Normal SCSI commands */
+ struct ctl_taskio taskio; /* SCSI task management/reset */
+ struct ctl_prio presio; /* update per. res info on other SC */
+};
+
+#ifdef _KERNEL
+
+union ctl_io *ctl_alloc_io(void *pool_ref);
+void ctl_free_io(union ctl_io *io);
+void ctl_zero_io(union ctl_io *io);
+void ctl_copy_io(union ctl_io *src, union ctl_io *dest);
+
+#endif /* _KERNEL */
+
+#endif /* _CTL_IO_H_ */
+
+/*
+ * vim: ts=8
+ */
diff --git a/sys/cam/ctl/ctl_ioctl.h b/sys/cam/ctl/ctl_ioctl.h
new file mode 100644
index 0000000..7ca78eb
--- /dev/null
+++ b/sys/cam/ctl/ctl_ioctl.h
@@ -0,0 +1,604 @@
+/*-
+ * Copyright (c) 2003 Silicon Graphics International Corp.
+ * Copyright (c) 2011 Spectra Logic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_ioctl.h#4 $
+ * $FreeBSD$
+ */
+/*
+ * CAM Target Layer ioctl interface.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#ifndef _CTL_IOCTL_H_
+#define _CTL_IOCTL_H_
+
+#define CTL_DEFAULT_DEV "/dev/cam/ctl"
+/*
+ * Maximum number of targets we support.
+ */
+#define CTL_MAX_TARGETS 1
+
+/*
+ * Maximum target ID we support.
+ */
+#define CTL_MAX_TARGID 15
+
+/*
+ * Maximum number of LUNs we support at the moment. MUST be a power of 2.
+ */
+#define CTL_MAX_LUNS 256
+
+/*
+ * Maximum number of initiators per port.
+ */
+#define CTL_MAX_INIT_PER_PORT 2048 // Was 16
+
+/*
+ * Maximum number of ports registered at one time.
+ */
+#define CTL_MAX_PORTS 32
+
+/*
+ * Maximum number of initiators we support.
+ */
+#define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * CTL_MAX_PORTS)
+
+/* Hopefully this won't conflict with new misc devices that pop up */
+#define CTL_MINOR 225
+
+typedef enum {
+ CTL_OOA_INVALID_LUN,
+ CTL_OOA_SUCCESS
+} ctl_ooa_status;
+
+struct ctl_ooa_info {
+ uint32_t target_id; /* Passed in to CTL */
+ uint32_t lun_id; /* Passed in to CTL */
+ uint32_t num_entries; /* Returned from CTL */
+ ctl_ooa_status status; /* Returned from CTL */
+};
+
+struct ctl_hard_startstop_info {
+ cfi_mt_status status;
+ int total_luns;
+ int luns_complete;
+ int luns_failed;
+};
+
+struct ctl_bbrread_info {
+ int lun_num; /* Passed in to CTL */
+ uint64_t lba; /* Passed in to CTL */
+ int len; /* Passed in to CTL */
+ cfi_mt_status status; /* Returned from CTL */
+ cfi_bbrread_status bbr_status; /* Returned from CTL */
+ uint8_t scsi_status; /* Returned from CTL */
+ struct scsi_sense_data sense_data; /* Returned from CTL */
+};
+
+typedef enum {
+ CTL_DELAY_TYPE_NONE,
+ CTL_DELAY_TYPE_CONT,
+ CTL_DELAY_TYPE_ONESHOT
+} ctl_delay_type;
+
+typedef enum {
+ CTL_DELAY_LOC_NONE,
+ CTL_DELAY_LOC_DATAMOVE,
+ CTL_DELAY_LOC_DONE,
+} ctl_delay_location;
+
+typedef enum {
+ CTL_DELAY_STATUS_NONE,
+ CTL_DELAY_STATUS_OK,
+ CTL_DELAY_STATUS_INVALID_LUN,
+ CTL_DELAY_STATUS_INVALID_TYPE,
+ CTL_DELAY_STATUS_INVALID_LOC,
+ CTL_DELAY_STATUS_NOT_IMPLEMENTED
+} ctl_delay_status;
+
+struct ctl_io_delay_info {
+ uint32_t target_id;
+ uint32_t lun_id;
+ ctl_delay_type delay_type;
+ ctl_delay_location delay_loc;
+ uint32_t delay_secs;
+ ctl_delay_status status;
+};
+
+typedef enum {
+ CTL_GS_SYNC_NONE,
+ CTL_GS_SYNC_OK,
+ CTL_GS_SYNC_NO_LUN
+} ctl_gs_sync_status;
+
+/*
+ * The target and LUN id specify which device to modify. The sync interval
+ * means that we will let through every N SYNCHRONIZE CACHE commands.
+ */
+struct ctl_sync_info {
+ uint32_t target_id; /* passed to kernel */
+ uint32_t lun_id; /* passed to kernel */
+ int sync_interval; /* depends on whether get/set */
+ ctl_gs_sync_status status; /* passed from kernel */
+};
+
+typedef enum {
+ CTL_STATS_NO_IO,
+ CTL_STATS_READ,
+ CTL_STATS_WRITE
+} ctl_stat_types;
+#define CTL_STATS_NUM_TYPES 3
+
+typedef enum {
+ CTL_LUN_STATS_NO_BLOCKSIZE = 0x01
+} ctl_lun_stats_flags;
+
+struct ctl_lun_io_port_stats {
+ uint32_t targ_port;
+ uint64_t bytes[CTL_STATS_NUM_TYPES];
+ uint64_t operations[CTL_STATS_NUM_TYPES];
+ struct bintime time[CTL_STATS_NUM_TYPES];
+ uint64_t num_dmas[CTL_STATS_NUM_TYPES];
+ struct bintime dma_time[CTL_STATS_NUM_TYPES];
+};
+
+struct ctl_lun_io_stats {
+ uint8_t device_type;
+ uint64_t lun_number;
+ uint32_t blocksize;
+ ctl_lun_stats_flags flags;
+ struct ctl_lun_io_port_stats ports[CTL_MAX_PORTS];
+};
+
+typedef enum {
+ CTL_SS_OK,
+ CTL_SS_NEED_MORE_SPACE,
+ CTL_SS_ERROR
+} ctl_stats_status;
+
+typedef enum {
+ CTL_STATS_FLAG_NONE = 0x00,
+ CTL_STATS_FLAG_TIME_VALID = 0x01
+} ctl_stats_flags;
+
+struct ctl_stats {
+ int alloc_len; /* passed to kernel */
+ struct ctl_lun_io_stats *lun_stats; /* passed to/from kernel */
+ int fill_len; /* passed to userland */
+ int num_luns; /* passed to userland */
+ ctl_stats_status status; /* passed to userland */
+ ctl_stats_flags flags; /* passed to userland */
+ struct timespec timestamp; /* passed to userland */
+};
+
+/*
+ * The types of errors that can be injected:
+ *
+ * NONE: No error specified.
+ * ABORTED: SSD_KEY_ABORTED_COMMAND, 0x45, 0x00
+ * MEDIUM_ERR: Medium error, different asc/ascq depending on read/write.
+ * UA: Unit attention.
+ * CUSTOM: User specifies the sense data.
+ * TYPE: Mask to use with error types.
+ *
+ * Flags that affect injection behavior:
+ * CONTINUOUS: This error will stay around until explicitly cleared.
+ * DESCRIPTOR: Use descriptor sense instead of fixed sense.
+ */
+typedef enum {
+ CTL_LUN_INJ_NONE = 0x000,
+ CTL_LUN_INJ_ABORTED = 0x001,
+ CTL_LUN_INJ_MEDIUM_ERR = 0x002,
+ CTL_LUN_INJ_UA = 0x003,
+ CTL_LUN_INJ_CUSTOM = 0x004,
+ CTL_LUN_INJ_TYPE = 0x0ff,
+ CTL_LUN_INJ_CONTINUOUS = 0x100,
+ CTL_LUN_INJ_DESCRIPTOR = 0x200
+} ctl_lun_error;
+
+/*
+ * Flags to specify what type of command the given error pattern will
+ * execute on. The first group of types can be ORed together.
+ *
+ * READ: Any read command.
+ * WRITE: Any write command.
+ * READWRITE: Any read or write command.
+ * READCAP: Any read capacity command.
+ * TUR: Test Unit Ready.
+ * ANY: Any command.
+ * MASK: Mask for basic command patterns.
+ *
+ * Special types:
+ *
+ * CMD: The CDB to act on is specified in struct ctl_error_desc_cmd.
+ * RANGE: For read/write commands, act when the LBA is in the
+ * specified range.
+ */
+typedef enum {
+ CTL_LUN_PAT_NONE = 0x000,
+ CTL_LUN_PAT_READ = 0x001,
+ CTL_LUN_PAT_WRITE = 0x002,
+ CTL_LUN_PAT_READWRITE = CTL_LUN_PAT_READ | CTL_LUN_PAT_WRITE,
+ CTL_LUN_PAT_READCAP = 0x004,
+ CTL_LUN_PAT_TUR = 0x008,
+ CTL_LUN_PAT_ANY = 0x0ff,
+ CTL_LUN_PAT_MASK = 0x0ff,
+ CTL_LUN_PAT_CMD = 0x100,
+ CTL_LUN_PAT_RANGE = 0x200
+} ctl_lun_error_pattern;
+
+/*
+ * This structure allows the user to specify a particular CDB pattern to
+ * look for.
+ *
+ * cdb_pattern: Fill in the relevant bytes to look for in the CDB.
+ * cdb_valid_bytes: Bitmask specifying valid bytes in the cdb_pattern.
+ * flags: Specify any command flags (see ctl_io_flags) that
+ * should be set.
+ */
+struct ctl_error_desc_cmd {
+ uint8_t cdb_pattern[CTL_MAX_CDBLEN];
+ uint32_t cdb_valid_bytes;
+ uint32_t flags;
+};
+
+/*
+ * Error injection descriptor.
+ *
+ * target_id: Target ID to act on.
+ * lun_id LUN to act on.
+ * lun_error: The type of error to inject. See above for descriptions.
+ * error_pattern: What kind of command to act on. See above.
+ * cmd_desc: For CTL_LUN_PAT_CMD only.
+ * lba_range: For CTL_LUN_PAT_RANGE only.
+ * custom_sense: Specify sense. For CTL_LUN_INJ_CUSTOM only.
+ * serial: Serial number returned by the kernel. Use for deletion.
+ * links: Kernel use only.
+ */
+struct ctl_error_desc {
+ uint32_t target_id; /* To kernel */
+ uint32_t lun_id; /* To kernel */
+ ctl_lun_error lun_error; /* To kernel */
+ ctl_lun_error_pattern error_pattern; /* To kernel */
+ struct ctl_error_desc_cmd cmd_desc; /* To kernel */
+ struct ctl_lba_len lba_range; /* To kernel */
+ struct scsi_sense_data custom_sense; /* To kernel */
+ uint64_t serial; /* From kernel */
+ STAILQ_ENTRY(ctl_error_desc) links; /* Kernel use only */
+};
+
+typedef enum {
+ CTL_OOA_FLAG_NONE = 0x00,
+ CTL_OOA_FLAG_ALL_LUNS = 0x01
+} ctl_ooa_flags;
+
+typedef enum {
+ CTL_OOA_OK,
+ CTL_OOA_NEED_MORE_SPACE,
+ CTL_OOA_ERROR
+} ctl_get_ooa_status;
+
+typedef enum {
+ CTL_OOACMD_FLAG_NONE = 0x00,
+ CTL_OOACMD_FLAG_DMA = 0x01,
+ CTL_OOACMD_FLAG_BLOCKED = 0x02,
+ CTL_OOACMD_FLAG_ABORT = 0x04,
+ CTL_OOACMD_FLAG_RTR = 0x08,
+ CTL_OOACMD_FLAG_DMA_QUEUED = 0x10
+} ctl_ooa_cmd_flags;
+
+struct ctl_ooa_entry {
+ ctl_ooa_cmd_flags cmd_flags;
+ uint8_t cdb[CTL_MAX_CDBLEN];
+ uint8_t cdb_len;
+ uint32_t tag_num;
+ uint32_t lun_num;
+ struct bintime start_bt;
+};
+
+struct ctl_ooa {
+ ctl_ooa_flags flags; /* passed to kernel */
+ uint64_t lun_num; /* passed to kernel */
+ uint32_t alloc_len; /* passed to kernel */
+ uint32_t alloc_num; /* passed to kernel */
+ struct ctl_ooa_entry *entries; /* filled in kernel */
+ uint32_t fill_len; /* passed to userland */
+ uint32_t fill_num; /* passed to userland */
+ uint32_t dropped_num; /* passed to userland */
+ struct bintime cur_bt; /* passed to userland */
+ ctl_get_ooa_status status; /* passed to userland */
+};
+
+typedef enum {
+ CTL_PORT_LIST_NONE,
+ CTL_PORT_LIST_OK,
+ CTL_PORT_LIST_NEED_MORE_SPACE,
+ CTL_PORT_LIST_ERROR
+} ctl_port_list_status;
+
+struct ctl_port_list {
+ uint32_t alloc_len; /* passed to kernel */
+ uint32_t alloc_num; /* passed to kernel */
+ struct ctl_port_entry *entries; /* filled in kernel */
+ uint32_t fill_len; /* passed to userland */
+ uint32_t fill_num; /* passed to userland */
+ uint32_t dropped_num; /* passed to userland */
+ ctl_port_list_status status; /* passed to userland */
+};
+
+typedef enum {
+ CTL_LUN_NOSTATUS,
+ CTL_LUN_OK,
+ CTL_LUN_ERROR
+} ctl_lun_status;
+
+#define CTL_ERROR_STR_LEN 160
+
+#define CTL_BEARG_RD 0x01
+#define CTL_BEARG_WR 0x02
+#define CTL_BEARG_RW (CTL_BEARG_RD|CTL_BEARG_WR)
+#define CTL_BEARG_ASCII 0x04
+
+/*
+ * Backend Argument:
+ *
+ * namelen: Length of the name field, including the terminating NUL.
+ *
+ * name: Name of the paramter. This must be NUL-terminated.
+ *
+ * flags: Flags for the parameter, see above for values.
+ *
+ * vallen: Length of the value in bytes.
+ *
+ * value: Value to be set/fetched.
+ *
+ * kname: For kernel use only.
+ *
+ * kvalue: For kernel use only.
+ */
+struct ctl_be_arg {
+ int namelen;
+ char *name;
+ int flags;
+ int vallen;
+ void *value;
+
+ char *kname;
+ void *kvalue;
+};
+
+typedef enum {
+ CTL_LUNREQ_CREATE,
+ CTL_LUNREQ_RM
+} ctl_lunreq_type;
+
+
+/*
+ * LUN creation parameters:
+ *
+ * flags: Various LUN flags, see ctl_backend.h for a
+ * description of the flag values and meanings.
+ *
+ * device_type: The SCSI device type. e.g. 0 for Direct Access,
+ * 3 for Processor, etc. Only certain backends may
+ * support setting this field. The CTL_LUN_FLAG_DEV_TYPE
+ * flag should be set in the flags field if the device
+ * type is set.
+ *
+ * lun_size_bytes: The size of the LUN in bytes. For some backends
+ * this is relevant (e.g. ramdisk), for others, it may
+ * be ignored in favor of using the properties of the
+ * backing store. If specified, this should be a
+ * multiple of the blocksize.
+ *
+ * The actual size of the LUN is returned in this
+ * field.
+ *
+ * blocksize_bytes: The LUN blocksize in bytes. For some backends this
+ * is relevant, for others it may be ignored in
+ * favor of using the properties of the backing store.
+ *
+ * The actual blocksize of the LUN is returned in this
+ * field.
+ *
+ * req_lun_id: The requested LUN ID. The CTL_LUN_FLAG_ID_REQ flag
+ * should be set if this is set. The request will be
+ * granted if the LUN number is available, otherwise
+ * the LUN addition request will fail.
+ *
+ * The allocated LUN number is returned in this field.
+ *
+ * serial_num: This is the value returned in SCSI INQUIRY VPD page
+ * 0x80. If it is specified, the CTL_LUN_FLAG_SERIAL_NUM
+ * flag should be set.
+ *
+ * The serial number value used is returned in this
+ * field.
+ *
+ * device_id: This is the value returned in the T10 vendor ID
+ * based DESIGNATOR field in the SCSI INQUIRY VPD page
+ * 0x83 data. If it is specified, the CTL_LUN_FLAG_DEVID
+ * flag should be set.
+ *
+ * The device id value used is returned in this field.
+ */
+struct ctl_lun_create_params {
+ ctl_backend_lun_flags flags;
+ uint8_t device_type;
+ uint64_t lun_size_bytes;
+ uint32_t blocksize_bytes;
+ uint32_t req_lun_id;
+ uint8_t serial_num[CTL_SN_LEN];
+ uint8_t device_id[CTL_DEVID_LEN];
+};
+
+/*
+ * LUN removal parameters:
+ *
+ * lun_id: The number of the LUN to delete. This must be set.
+ * The LUN must be backed by the given backend.
+ */
+struct ctl_lun_rm_params {
+ uint32_t lun_id;
+};
+
+/*
+ * Union of request type data. Fill in the appropriate union member for
+ * the request type.
+ */
+union ctl_lunreq_data {
+ struct ctl_lun_create_params create;
+ struct ctl_lun_rm_params rm;
+};
+
+/*
+ * LUN request interface:
+ *
+ * backend: This is required, and is NUL-terminated a string
+ * that is the name of the backend, like "ramdisk" or
+ * "block".
+ *
+ * reqtype: The type of request, CTL_LUNREQ_CREATE to create a
+ * LUN, CTL_LUNREQ_RM to delete a LUN.
+ *
+ * reqdata: Request type-specific information. See the
+ * description of individual the union members above
+ * for more information.
+ *
+ * num_be_args: This is the number of backend-specific arguments
+ * in the be_args array.
+ *
+ * be_args: This is an array of backend-specific arguments.
+ * See above for a description of the fields in this
+ * structure.
+ *
+ * status: Status of the LUN request.
+ *
+ * error_str: If the status is CTL_LUN_ERROR, this will
+ * contain a string describing the error.
+ *
+ * kern_be_args: For kernel use only.
+ */
+struct ctl_lun_req {
+ char backend[CTL_BE_NAME_LEN];
+ ctl_lunreq_type reqtype;
+ union ctl_lunreq_data reqdata;
+ int num_be_args;
+ struct ctl_be_arg *be_args;
+ ctl_lun_status status;
+ char error_str[CTL_ERROR_STR_LEN];
+ struct ctl_be_arg *kern_be_args;
+};
+
+/*
+ * LUN list status:
+ *
+ * NONE: No status.
+ *
+ * OK: Request completed successfully.
+ *
+ * NEED_MORE_SPACE: The allocated length of the entries field is too
+ * small for the available data.
+ *
+ * ERROR: An error occured, look at the error string for a
+ * description of the error.
+ */
+typedef enum {
+ CTL_LUN_LIST_NONE,
+ CTL_LUN_LIST_OK,
+ CTL_LUN_LIST_NEED_MORE_SPACE,
+ CTL_LUN_LIST_ERROR
+} ctl_lun_list_status;
+
+/*
+ * LUN list interface
+ *
+ * backend_name: This is a NUL-terminated string. If the string
+ * length is 0, then all LUNs on all backends will
+ * be enumerated. Otherwise this is the name of the
+ * backend to be enumerated, like "ramdisk" or "block".
+ *
+ * alloc_len: The length of the data buffer allocated for entries.
+ * In order to properly size the buffer, make one call
+ * with alloc_len set to 0, and then use the returned
+ * dropped_len as the buffer length to allocate and
+ * pass in on a subsequent call.
+ *
+ * lun_xml: XML-formatted information on the requested LUNs.
+ *
+ * fill_len: The amount of data filled in the storage for entries.
+ *
+ * status: The status of the request. See above for the
+ * description of the values of this field.
+ *
+ * error_str: If the status indicates an error, this string will
+ * be filled in to describe the error.
+ */
+struct ctl_lun_list {
+ char backend[CTL_BE_NAME_LEN]; /* passed to kernel*/
+ uint32_t alloc_len; /* passed to kernel */
+ char *lun_xml; /* filled in kernel */
+ uint32_t fill_len; /* passed to userland */
+ ctl_lun_list_status status; /* passed to userland */
+ char error_str[CTL_ERROR_STR_LEN];
+ /* passed to userland */
+};
+
+#define CTL_IO _IOWR(CTL_MINOR, 0x00, union ctl_io)
+#define CTL_ENABLE_PORT _IOW(CTL_MINOR, 0x04, struct ctl_port_entry)
+#define CTL_DISABLE_PORT _IOW(CTL_MINOR, 0x05, struct ctl_port_entry)
+#define CTL_DUMP_OOA _IO(CTL_MINOR, 0x06)
+#define CTL_CHECK_OOA _IOWR(CTL_MINOR, 0x07, struct ctl_ooa_info)
+#define CTL_HARD_STOP _IOR(CTL_MINOR, 0x08, \
+ struct ctl_hard_startstop_info)
+#define CTL_HARD_START _IOR(CTL_MINOR, 0x09, \
+ struct ctl_hard_startstop_info)
+#define CTL_DELAY_IO _IOWR(CTL_MINOR, 0x10, struct ctl_io_delay_info)
+#define CTL_REALSYNC_GET _IOR(CTL_MINOR, 0x11, int)
+#define CTL_REALSYNC_SET _IOW(CTL_MINOR, 0x12, int)
+#define CTL_SETSYNC _IOWR(CTL_MINOR, 0x13, struct ctl_sync_info)
+#define CTL_GETSYNC _IOWR(CTL_MINOR, 0x14, struct ctl_sync_info)
+#define CTL_GETSTATS _IOWR(CTL_MINOR, 0x15, struct ctl_stats)
+#define CTL_ERROR_INJECT _IOWR(CTL_MINOR, 0x16, struct ctl_error_desc)
+#define CTL_BBRREAD _IOWR(CTL_MINOR, 0x17, struct ctl_bbrread_info)
+#define CTL_GET_OOA _IOWR(CTL_MINOR, 0x18, struct ctl_ooa)
+#define CTL_DUMP_STRUCTS _IO(CTL_MINOR, 0x19)
+#define CTL_GET_PORT_LIST _IOWR(CTL_MINOR, 0x20, struct ctl_port_list)
+#define CTL_LUN_REQ _IOWR(CTL_MINOR, 0x21, struct ctl_lun_req)
+#define CTL_LUN_LIST _IOWR(CTL_MINOR, 0x22, struct ctl_lun_list)
+#define CTL_ERROR_INJECT_DELETE _IOW(CTL_MINOR, 0x23, struct ctl_error_desc)
+#define CTL_SET_PORT_WWNS _IOW(CTL_MINOR, 0x24, struct ctl_port_entry)
+
+#endif /* _CTL_IOCTL_H_ */
+
+/*
+ * vim: ts=8
+ */
diff --git a/sys/cam/ctl/ctl_mem_pool.c b/sys/cam/ctl/ctl_mem_pool.c
new file mode 100644
index 0000000..801fbc9
--- /dev/null
+++ b/sys/cam/ctl/ctl_mem_pool.c
@@ -0,0 +1,192 @@
+/*-
+ * Copyright (c) 2003, 2004 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_mem_pool.c#1 $
+ */
+/*
+ * CAM Target Layer memory pool code.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/queue.h>
+
+#include <cam/ctl/ctl_mem_pool.h>
+
+MALLOC_DEFINE(M_CTL_POOL, "ctlpool", "CTL memory pool");
+
+int
+ctl_init_mem_pool(struct ctl_mem_pool *pool, int chunk_size,
+ ctl_mem_pool_flags flags, int grow_inc,
+ int initial_pool_size)
+{
+ pool->flags = flags;
+ pool->chunk_size = chunk_size;
+ pool->grow_inc = grow_inc;
+ mtx_init(&pool->lock, "Pool mutex", NULL, MTX_DEF);
+ STAILQ_INIT(&pool->free_mem_list);
+ cv_init(&pool->wait_mem, "CTL mem pool");
+
+ if (ctl_grow_mem_pool(pool, initial_pool_size, /*can_wait*/ 1) !=
+ initial_pool_size)
+ return (1);
+ else
+ return (0);
+}
+
+struct ctl_mem_element *
+ctl_alloc_mem_element(struct ctl_mem_pool *pool, int can_wait)
+{
+ struct ctl_mem_element *mem;
+
+ for (;;) {
+ mtx_lock(&pool->lock);
+
+ mem = STAILQ_FIRST(&pool->free_mem_list);
+ if (mem != NULL) {
+ STAILQ_REMOVE(&pool->free_mem_list, mem,
+ ctl_mem_element, links);
+ mem->flags = CTL_MEM_ELEMENT_PREALLOC;
+ }
+ mtx_unlock(&pool->lock);
+
+ if (mem != NULL)
+ return (mem);
+
+ /*
+ * Grow the pool permanantly by the requested increment
+ * instead of temporarily. This has the effect that
+ * whatever the high water mark of transactions is for
+ * this pool, we'll keep that much memory around.
+ */
+ if (pool->flags & CTL_MEM_POOL_PERM_GROW) {
+ if (ctl_grow_mem_pool(pool, pool->grow_inc,
+ can_wait) != 0)
+ continue;
+ }
+ mem = (struct ctl_mem_element *)malloc(sizeof(*mem),
+ M_CTL_POOL, can_wait ? M_WAITOK : M_NOWAIT);
+
+ if (mem != NULL) {
+ mem->flags = CTL_MEM_ELEMENT_NONE;
+ mem->pool = pool;
+
+ mem->bytes = malloc(pool->chunk_size, M_CTL_POOL,
+ can_wait ? M_WAITOK : M_NOWAIT);
+ if (mem->bytes == NULL) {
+ free(mem, M_CTL_POOL);
+ mem = NULL;
+ } else {
+ return (mem);
+ }
+ }
+
+ if (can_wait == 0)
+ return (NULL);
+
+ cv_wait_unlock(&pool->wait_mem, &pool->lock);
+ }
+}
+
+void
+ctl_free_mem_element(struct ctl_mem_element *mem)
+{
+ struct ctl_mem_pool *pool;
+
+ pool = mem->pool;
+
+ if (mem->flags & CTL_MEM_ELEMENT_PREALLOC) {
+ mtx_lock(&pool->lock);
+ STAILQ_INSERT_TAIL(&pool->free_mem_list, mem, links);
+ mtx_unlock(&pool->lock);
+ cv_broadcast(&pool->wait_mem);
+ } else
+ free(mem, M_CTL_POOL);
+}
+
+int
+ctl_grow_mem_pool(struct ctl_mem_pool *pool, int count, int can_wait)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct ctl_mem_element *mem;
+
+ mem = (struct ctl_mem_element *)malloc(sizeof(*mem),
+ M_CTL_POOL, can_wait ? M_WAITOK : M_NOWAIT);
+
+ if (mem == NULL)
+ break;
+
+ mem->bytes = malloc(pool->chunk_size, M_CTL_POOL, can_wait ?
+ M_WAITOK : M_NOWAIT);
+ if (mem->bytes == NULL) {
+ free(mem, M_CTL_POOL);
+ break;
+ }
+ mem->flags = CTL_MEM_ELEMENT_PREALLOC;
+ mem->pool = pool;
+ mtx_lock(&pool->lock);
+ STAILQ_INSERT_TAIL(&pool->free_mem_list, mem, links);
+ mtx_unlock(&pool->lock);
+ }
+
+ return (i);
+}
+
+int
+ctl_shrink_mem_pool(struct ctl_mem_pool *pool)
+{
+ struct ctl_mem_element *mem, *mem_next;
+
+ mtx_lock(&pool->lock);
+ for (mem = STAILQ_FIRST(&pool->free_mem_list); mem != NULL;
+ mem = mem_next) {
+ mem_next = STAILQ_NEXT(mem, links);
+
+ STAILQ_REMOVE(&pool->free_mem_list, mem, ctl_mem_element,
+ links);
+ free(mem->bytes, M_CTL_POOL);
+ free(mem, M_CTL_POOL);
+ }
+ mtx_unlock(&pool->lock);
+
+ return (0);
+}
diff --git a/sys/cam/ctl/ctl_mem_pool.h b/sys/cam/ctl/ctl_mem_pool.h
new file mode 100644
index 0000000..a1df7e3
--- /dev/null
+++ b/sys/cam/ctl/ctl_mem_pool.h
@@ -0,0 +1,83 @@
+/*-
+ * Copyright (c) 2003, 2004 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_mem_pool.h#1 $
+ * $FreeBSD$
+ */
+/*
+ * CAM Target Layer memory pool code.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#ifndef _CTL_MEMPOOL_H_
+#define _CTL_MEMPOOL_H_
+
+typedef enum {
+ CTL_MEM_POOL_NONE,
+ CTL_MEM_POOL_PERM_GROW
+} ctl_mem_pool_flags;
+
+struct ctl_mem_pool {
+ ctl_mem_pool_flags flags;
+ int chunk_size;
+ int grow_inc;
+ struct mtx lock;
+ struct cv wait_mem;
+ STAILQ_HEAD(, ctl_mem_element) free_mem_list;
+};
+
+typedef enum {
+ CTL_MEM_ELEMENT_NONE,
+ CTL_MEM_ELEMENT_PREALLOC
+} ctl_mem_element_flags;
+
+struct ctl_mem_element {
+ ctl_mem_element_flags flags;
+ struct ctl_mem_pool *pool;
+ uint8_t *bytes;
+ STAILQ_ENTRY(ctl_mem_element) links;
+};
+
+#ifdef _KERNEL
+
+MALLOC_DECLARE(M_CTL_POOL);
+
+int ctl_init_mem_pool(struct ctl_mem_pool *pool, int chunk_size,
+ ctl_mem_pool_flags flags, int grow_inc,
+ int initial_pool_size);
+struct ctl_mem_element *ctl_alloc_mem_element(struct ctl_mem_pool *pool,
+ int can_wait);
+void ctl_free_mem_element(struct ctl_mem_element *mem);
+int ctl_grow_mem_pool(struct ctl_mem_pool *pool, int count,
+ int can_wait);
+int ctl_shrink_mem_pool(struct ctl_mem_pool *pool);
+#endif /* _KERNEL */
+
+#endif /* _CTL_MEMPOOL_H_ */
diff --git a/sys/cam/ctl/ctl_private.h b/sys/cam/ctl/ctl_private.h
new file mode 100644
index 0000000..864027d
--- /dev/null
+++ b/sys/cam/ctl/ctl_private.h
@@ -0,0 +1,493 @@
+/*-
+ * Copyright (c) 2003, 2004, 2005, 2008 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_private.h#7 $
+ * $FreeBSD$
+ */
+/*
+ * CAM Target Layer driver private data structures/definitions.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#ifndef _CTL_PRIVATE_H_
+#define _CTL_PRIVATE_H_
+
+/*
+ * SCSI vendor and product names.
+ */
+#define CTL_VENDOR "FREEBSD "
+#define CTL_DIRECT_PRODUCT "CTLDISK "
+#define CTL_PROCESSOR_PRODUCT "CTLPROCESSOR "
+#define CTL_UNKNOWN_PRODUCT "CTLDEVICE "
+
+struct ctl_fe_ioctl_startstop_info {
+ struct cv sem;
+ struct ctl_hard_startstop_info hs_info;
+};
+
+struct ctl_fe_ioctl_bbrread_info {
+ struct cv sem;
+ struct ctl_bbrread_info *bbr_info;
+ int wakeup_done;
+ struct mtx *lock;
+};
+
+typedef enum {
+ CTL_IOCTL_INPROG,
+ CTL_IOCTL_DATAMOVE,
+ CTL_IOCTL_DONE
+} ctl_fe_ioctl_state;
+
+struct ctl_fe_ioctl_params {
+ struct cv sem;
+ struct mtx ioctl_mtx;
+ ctl_fe_ioctl_state state;
+};
+
+#define CTL_POOL_ENTRIES_INTERNAL 200
+#define CTL_POOL_ENTRIES_EMERGENCY 300
+#define CTL_POOL_ENTRIES_OTHER_SC 200
+
+typedef enum {
+ CTL_POOL_INTERNAL,
+ CTL_POOL_FETD,
+ CTL_POOL_EMERGENCY,
+ CTL_POOL_IOCTL,
+ CTL_POOL_4OTHERSC
+} ctl_pool_type;
+
+typedef enum {
+ CTL_POOL_FLAG_NONE = 0x00,
+ CTL_POOL_FLAG_INVALID = 0x01
+} ctl_pool_flags;
+
+struct ctl_io_pool {
+ ctl_pool_type type;
+ ctl_pool_flags flags;
+ uint32_t id;
+ struct ctl_softc *ctl_softc;
+ uint32_t refcount;
+ uint64_t total_allocated;
+ uint64_t total_freed;
+ int32_t total_ctl_io;
+ int32_t free_ctl_io;
+ STAILQ_HEAD(, ctl_io_hdr) free_queue;
+ STAILQ_ENTRY(ctl_io_pool) links;
+};
+
+typedef enum {
+ CTL_IOCTL_FLAG_NONE = 0x00,
+ CTL_IOCTL_FLAG_ENABLED = 0x01
+} ctl_ioctl_flags;
+
+struct ctl_ioctl_info {
+ ctl_ioctl_flags flags;
+ uint32_t cur_tag_num;
+ struct ctl_frontend fe;
+ char port_name[24];
+};
+
+typedef enum {
+ CTL_SER_BLOCK,
+ CTL_SER_EXTENT,
+ CTL_SER_PASS,
+ CTL_SER_SKIP
+} ctl_serialize_action;
+
+typedef enum {
+ CTL_ACTION_BLOCK,
+ CTL_ACTION_OVERLAP,
+ CTL_ACTION_OVERLAP_TAG,
+ CTL_ACTION_PASS,
+ CTL_ACTION_SKIP,
+ CTL_ACTION_ERROR
+} ctl_action;
+
+/*
+ * WARNING: Keep the bottom nibble here free, we OR in the data direction
+ * flags for each command.
+ *
+ * Note: "OK_ON_ALL_LUNS" == we don't have to have a lun configured
+ * "OK_ON_BOTH" == we have to have a lun configured
+ */
+typedef enum {
+ CTL_CMD_FLAG_NONE = 0x0000,
+ CTL_CMD_FLAG_NO_SENSE = 0x0010,
+ CTL_CMD_FLAG_OK_ON_ALL_LUNS = 0x0020,
+ CTL_CMD_FLAG_ALLOW_ON_RESV = 0x0040,
+ CTL_CMD_FLAG_OK_ON_PROC = 0x0100,
+ CTL_CMD_FLAG_OK_ON_SLUN = 0x0200,
+ CTL_CMD_FLAG_OK_ON_BOTH = 0x0300,
+ CTL_CMD_FLAG_OK_ON_STOPPED = 0x0400,
+ CTL_CMD_FLAG_OK_ON_INOPERABLE = 0x0800,
+ CTL_CMD_FLAG_OK_ON_OFFLINE = 0x1000,
+ CTL_CMD_FLAG_OK_ON_SECONDARY = 0x2000,
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV = 0x4000
+} ctl_cmd_flags;
+
+typedef enum {
+ CTL_SERIDX_TUR = 0,
+ CTL_SERIDX_READ,
+ CTL_SERIDX_WRITE,
+ CTL_SERIDX_MD_SNS,
+ CTL_SERIDX_MD_SEL,
+ CTL_SERIDX_RQ_SNS,
+ CTL_SERIDX_INQ,
+ CTL_SERIDX_RD_CAP,
+ CTL_SERIDX_RESV,
+ CTL_SERIDX_REL,
+ CTL_SERIDX_LOG_SNS,
+ CTL_SERIDX_FORMAT,
+ CTL_SERIDX_START,
+ CTL_SERIDX_PRES_IN,
+ CTL_SERIDX_PRES_OUT,
+ CTL_SERIDX_MAIN_IN,
+ /* TBD: others to be filled in as needed */
+ CTL_SERIDX_COUNT, /* LAST, not a normal code, provides # codes */
+ CTL_SERIDX_INVLD = CTL_SERIDX_COUNT
+} ctl_seridx;
+
+typedef int ctl_opfunc(struct ctl_scsiio *ctsio);
+
+struct ctl_cmd_entry {
+ ctl_opfunc *execute;
+ ctl_seridx seridx;
+ ctl_cmd_flags flags;
+ ctl_lun_error_pattern pattern;
+};
+
+typedef enum {
+ CTL_LUN_NONE = 0x000,
+ CTL_LUN_CONTROL = 0x001,
+ CTL_LUN_RESERVED = 0x002,
+ CTL_LUN_INVALID = 0x004,
+ CTL_LUN_DISABLED = 0x008,
+ CTL_LUN_MALLOCED = 0x010,
+ CTL_LUN_STOPPED = 0x020,
+ CTL_LUN_INOPERABLE = 0x040,
+ CTL_LUN_OFFLINE = 0x080,
+ CTL_LUN_PR_RESERVED = 0x100,
+ CTL_LUN_PRIMARY_SC = 0x200,
+ CTL_LUN_SENSE_DESC = 0x400
+} ctl_lun_flags;
+
+typedef enum {
+ CTLBLOCK_FLAG_NONE = 0x00,
+ CTLBLOCK_FLAG_INVALID = 0x01
+} ctlblock_flags;
+
+union ctl_softcs {
+ struct ctl_softc *ctl_softc;
+ struct ctlblock_softc *ctlblock_softc;
+};
+
+/*
+ * Mode page defaults.
+ */
+#if 0
+/*
+ * These values make Solaris trim off some of the capacity.
+ */
+#define CTL_DEFAULT_SECTORS_PER_TRACK 63
+#define CTL_DEFAULT_HEADS 255
+/*
+ * These values seem to work okay.
+ */
+#define CTL_DEFAULT_SECTORS_PER_TRACK 63
+#define CTL_DEFAULT_HEADS 16
+/*
+ * These values work reasonably well.
+ */
+#define CTL_DEFAULT_SECTORS_PER_TRACK 512
+#define CTL_DEFAULT_HEADS 64
+#endif
+
+/*
+ * Solaris is somewhat picky about how many heads and sectors per track you
+ * have defined in mode pages 3 and 4. These values seem to cause Solaris
+ * to get the capacity more or less right when you run the format tool.
+ * They still have problems when dealing with devices larger than 1TB,
+ * but there isn't anything we can do about that.
+ *
+ * For smaller LUN sizes, this ends up causing the number of cylinders to
+ * work out to 0. Solaris actually recognizes that and comes up with its
+ * own bogus geometry to fit the actual capacity of the drive. They really
+ * should just give up on geometry and stick to the read capacity
+ * information alone for modern disk drives.
+ *
+ * One thing worth mentioning about Solaris' mkfs command is that it
+ * doesn't like sectors per track values larger than 256. 512 seems to
+ * work okay for format, but causes problems when you try to make a
+ * filesystem.
+ *
+ * Another caveat about these values: the product of these two values
+ * really should be a power of 2. This is because of the simplistic
+ * shift-based calculation that we have to use on the i386 platform to
+ * calculate the number of cylinders here. (If you use a divide, you end
+ * up calling __udivdi3(), which is a hardware FP call on the PC. On the
+ * XScale, it is done in software, so you can do that from inside the
+ * kernel.)
+ *
+ * So for the current values (256 S/T, 128 H), we get 32768, which works
+ * very nicely for calculating cylinders.
+ *
+ * If you want to change these values so that their product is no longer a
+ * power of 2, re-visit the calculation in ctl_init_page_index(). You may
+ * need to make it a bit more complicated to get the number of cylinders
+ * right.
+ */
+#define CTL_DEFAULT_SECTORS_PER_TRACK 256
+#define CTL_DEFAULT_HEADS 128
+
+#define CTL_DEFAULT_ROTATION_RATE 10000
+
+struct ctl_page_index;
+
+typedef int ctl_modesen_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index,
+ int pc);
+typedef int ctl_modesel_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index,
+ uint8_t *page_ptr);
+
+typedef enum {
+ CTL_PAGE_FLAG_NONE = 0x00,
+ CTL_PAGE_FLAG_DISK_ONLY = 0x01
+} ctl_page_flags;
+
+struct ctl_page_index {
+ uint8_t page_code;
+ uint8_t subpage;
+ uint16_t page_len;
+ uint8_t *page_data;
+ ctl_page_flags page_flags;
+ ctl_modesen_handler *sense_handler;
+ ctl_modesel_handler *select_handler;
+};
+
+#define CTL_PAGE_CURRENT 0x00
+#define CTL_PAGE_CHANGEABLE 0x01
+#define CTL_PAGE_DEFAULT 0x02
+#define CTL_PAGE_SAVED 0x03
+
+static struct ctl_page_index page_index_template[] = {
+ {SMS_FORMAT_DEVICE_PAGE, 0, sizeof(struct scsi_format_page), NULL,
+ CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL},
+ {SMS_RIGID_DISK_PAGE, 0, sizeof(struct scsi_rigid_disk_page), NULL,
+ CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL},
+ {SMS_CACHING_PAGE, 0, sizeof(struct scsi_caching_page), NULL,
+ CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL},
+ {SMS_CONTROL_MODE_PAGE, 0, sizeof(struct scsi_control_page), NULL,
+ CTL_PAGE_FLAG_NONE, NULL, ctl_control_page_handler},
+ {SMS_VENDOR_SPECIFIC_PAGE | SMPH_SPF, PWR_SUBPAGE_CODE,
+ sizeof(struct copan_power_subpage), NULL, CTL_PAGE_FLAG_NONE,
+ ctl_power_sp_sense_handler, ctl_power_sp_handler},
+ {SMS_VENDOR_SPECIFIC_PAGE | SMPH_SPF, APS_SUBPAGE_CODE,
+ sizeof(struct copan_aps_subpage), NULL, CTL_PAGE_FLAG_NONE,
+ NULL, ctl_aps_sp_handler},
+ {SMS_VENDOR_SPECIFIC_PAGE | SMPH_SPF, DBGCNF_SUBPAGE_CODE,
+ sizeof(struct copan_debugconf_subpage), NULL, CTL_PAGE_FLAG_NONE,
+ ctl_debugconf_sp_sense_handler, ctl_debugconf_sp_select_handler},
+};
+
+#define CTL_NUM_MODE_PAGES sizeof(page_index_template)/ \
+ sizeof(page_index_template[0])
+
+struct ctl_mode_pages {
+ struct scsi_format_page format_page[4];
+ struct scsi_rigid_disk_page rigid_disk_page[4];
+ struct scsi_caching_page caching_page[4];
+ struct scsi_control_page control_page[4];
+ struct copan_power_subpage power_subpage[4];
+ struct copan_aps_subpage aps_subpage[4];
+ struct copan_debugconf_subpage debugconf_subpage[4];
+ struct ctl_page_index index[CTL_NUM_MODE_PAGES];
+};
+
+struct ctl_pending_sense {
+ ctl_ua_type ua_pending;
+ struct scsi_sense_data sense;
+};
+
+struct ctl_lun_delay_info {
+ ctl_delay_type datamove_type;
+ uint32_t datamove_delay;
+ ctl_delay_type done_type;
+ uint32_t done_delay;
+};
+
+typedef enum {
+ CTL_ERR_INJ_NONE = 0x00,
+ CTL_ERR_INJ_ABORTED = 0x01
+} ctl_err_inject_flags;
+
+typedef enum {
+ CTL_PR_FLAG_NONE = 0x00,
+ CTL_PR_FLAG_REGISTERED = 0x01,
+ CTL_PR_FLAG_ACTIVE_RES = 0x02
+} ctl_per_res_flags;
+
+struct ctl_per_res_info {
+ struct scsi_per_res_key res_key;
+ uint8_t registered;
+};
+
+#define CTL_PR_ALL_REGISTRANTS 0xFFFF
+#define CTL_PR_NO_RESERVATION 0xFFF0
+
+/*
+ * For report target port groups.
+ */
+#define NUM_TARGET_PORT_GROUPS 2
+#define NUM_PORTS_PER_GRP 2
+
+struct ctl_lun {
+ struct mtx lun_lock;
+ struct ctl_id target;
+ uint64_t lun;
+ ctl_lun_flags flags;
+ STAILQ_HEAD(,ctl_error_desc) error_list;
+ uint64_t error_serial;
+ struct ctl_softc *ctl_softc;
+ struct ctl_be_lun *be_lun;
+ struct ctl_backend_driver *backend;
+ int io_count;
+ struct ctl_lun_delay_info delay_info;
+ int sync_interval;
+ int sync_count;
+ TAILQ_HEAD(ctl_ooaq, ctl_io_hdr) ooa_queue;
+ TAILQ_HEAD(ctl_blockq,ctl_io_hdr) blocked_queue;
+ STAILQ_ENTRY(ctl_lun) links;
+ STAILQ_ENTRY(ctl_lun) run_links;
+ struct ctl_nexus rsv_nexus;
+ uint32_t have_ca[CTL_MAX_INITIATORS >> 5];
+ struct ctl_pending_sense pending_sense[CTL_MAX_INITIATORS];
+ struct ctl_mode_pages mode_pages;
+ struct ctl_lun_io_stats stats;
+ struct ctl_per_res_info per_res[2*CTL_MAX_INITIATORS];
+ unsigned int PRGeneration;
+ int pr_key_count;
+ uint16_t pr_res_idx;
+ uint8_t res_type;
+ uint8_t write_buffer[524288];
+};
+
+typedef enum {
+ CTL_FLAG_TASK_PENDING = 0x01,
+ CTL_FLAG_REAL_SYNC = 0x02,
+ CTL_FLAG_MASTER_SHELF = 0x04
+} ctl_gen_flags;
+
+struct ctl_wwpn_iid {
+ int in_use;
+ uint64_t wwpn;
+ uint32_t iid;
+ int32_t port;
+};
+
+struct ctl_softc {
+ struct mtx ctl_lock;
+ struct cdev *dev;
+ int open_count;
+ struct ctl_id target;
+ int num_disks;
+ int num_luns;
+ ctl_gen_flags flags;
+ ctl_ha_mode ha_mode;
+ struct ctl_ioctl_info ioctl_info;
+ struct ctl_lun lun;
+ struct ctl_io_pool *internal_pool;
+ struct ctl_io_pool *emergency_pool;
+ struct ctl_io_pool *othersc_pool;
+ struct proc *work_thread;
+ int targ_online;
+ uint32_t ctl_lun_mask[CTL_MAX_LUNS >> 5];
+ struct ctl_lun *ctl_luns[CTL_MAX_LUNS];
+ struct ctl_wwpn_iid wwpn_iid[CTL_MAX_PORTS][CTL_MAX_INIT_PER_PORT];
+ uint32_t ctl_port_mask;
+ uint64_t aps_locked_lun;
+ STAILQ_HEAD(, ctl_lun) lun_list;
+ STAILQ_HEAD(, ctl_be_lun) pending_lun_queue;
+ STAILQ_HEAD(, ctl_io_hdr) task_queue;
+ STAILQ_HEAD(, ctl_io_hdr) incoming_queue;
+ STAILQ_HEAD(, ctl_io_hdr) rtr_queue;
+ STAILQ_HEAD(, ctl_io_hdr) done_queue;
+ STAILQ_HEAD(, ctl_io_hdr) isc_queue;
+ uint32_t num_frontends;
+ STAILQ_HEAD(, ctl_frontend) fe_list;
+ struct ctl_frontend *ctl_ports[CTL_MAX_PORTS];
+ uint32_t num_backends;
+ STAILQ_HEAD(, ctl_backend_driver) be_list;
+ uint32_t num_pools;
+ uint32_t cur_pool_id;
+ STAILQ_HEAD(, ctl_io_pool) io_pools;
+ time_t last_print_jiffies;
+ uint32_t skipped_prints;
+};
+
+#ifdef _KERNEL
+
+extern struct ctl_cmd_entry ctl_cmd_table[];
+
+uint32_t ctl_get_initindex(struct ctl_nexus *nexus);
+int ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
+ uint32_t total_ctl_io, struct ctl_io_pool **npool);
+int ctl_pool_acquire(struct ctl_io_pool *pool);
+int ctl_pool_invalidate(struct ctl_io_pool *pool);
+int ctl_pool_release(struct ctl_io_pool *pool);
+void ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool);
+int ctl_scsi_release(struct ctl_scsiio *ctsio);
+int ctl_scsi_reserve(struct ctl_scsiio *ctsio);
+int ctl_start_stop(struct ctl_scsiio *ctsio);
+int ctl_sync_cache(struct ctl_scsiio *ctsio);
+int ctl_format(struct ctl_scsiio *ctsio);
+int ctl_write_buffer(struct ctl_scsiio *ctsio);
+int ctl_mode_select(struct ctl_scsiio *ctsio);
+int ctl_mode_sense(struct ctl_scsiio *ctsio);
+int ctl_read_capacity(struct ctl_scsiio *ctsio);
+int ctl_service_action_in(struct ctl_scsiio *ctsio);
+int ctl_read_write(struct ctl_scsiio *ctsio);
+int ctl_report_luns(struct ctl_scsiio *ctsio);
+int ctl_request_sense(struct ctl_scsiio *ctsio);
+int ctl_tur(struct ctl_scsiio *ctsio);
+int ctl_inquiry(struct ctl_scsiio *ctsio);
+int ctl_persistent_reserve_in(struct ctl_scsiio *ctsio);
+int ctl_persistent_reserve_out(struct ctl_scsiio *ctsio);
+int ctl_maintenance_in(struct ctl_scsiio *ctsio);
+void ctl_done_lock(union ctl_io *io, int have_lock);
+int ctl_isc(struct ctl_scsiio *ctsio);
+
+#endif /* _KERNEL */
+
+#endif /* _CTL_PRIVATE_H_ */
+
+/*
+ * vim: ts=8
+ */
diff --git a/sys/cam/ctl/ctl_scsi_all.c b/sys/cam/ctl/ctl_scsi_all.c
new file mode 100644
index 0000000..91b79f3
--- /dev/null
+++ b/sys/cam/ctl/ctl_scsi_all.c
@@ -0,0 +1,227 @@
+/*-
+ * Implementation of Utility functions for all SCSI device types.
+ *
+ * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
+ * Copyright (c) 1997, 1998, 2003 Kenneth D. Merry.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_scsi_all.c#2 $
+ */
+
+#include <sys/param.h>
+
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#ifdef _KERNEL
+#include <sys/systm.h>
+#include <sys/libkern.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#else
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#endif
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_queue.h>
+#include <cam/cam_xpt.h>
+#include <cam/scsi/scsi_all.h>
+
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl_scsi_all.h>
+#include <sys/sbuf.h>
+#ifndef _KERNEL
+#include <camlib.h>
+#endif
+
+const char *
+ctl_scsi_status_string(struct ctl_scsiio *ctsio)
+{
+ switch(ctsio->scsi_status) {
+ case SCSI_STATUS_OK:
+ return("OK");
+ case SCSI_STATUS_CHECK_COND:
+ return("Check Condition");
+ case SCSI_STATUS_BUSY:
+ return("Busy");
+ case SCSI_STATUS_INTERMED:
+ return("Intermediate");
+ case SCSI_STATUS_INTERMED_COND_MET:
+ return("Intermediate-Condition Met");
+ case SCSI_STATUS_RESERV_CONFLICT:
+ return("Reservation Conflict");
+ case SCSI_STATUS_CMD_TERMINATED:
+ return("Command Terminated");
+ case SCSI_STATUS_QUEUE_FULL:
+ return("Queue Full");
+ case SCSI_STATUS_ACA_ACTIVE:
+ return("ACA Active");
+ case SCSI_STATUS_TASK_ABORTED:
+ return("Task Aborted");
+ default: {
+ static char unkstr[64];
+ snprintf(unkstr, sizeof(unkstr), "Unknown %#x",
+ ctsio->scsi_status);
+ return(unkstr);
+ }
+ }
+}
+
+/*
+ * scsi_command_string() returns 0 for success and -1 for failure.
+ */
+int
+ctl_scsi_command_string(struct ctl_scsiio *ctsio,
+ struct scsi_inquiry_data *inq_data, struct sbuf *sb)
+{
+ char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
+
+ sbuf_printf(sb, "%s. CDB: %s",
+ scsi_op_desc(ctsio->cdb[0], inq_data),
+ scsi_cdb_string(ctsio->cdb, cdb_str, sizeof(cdb_str)));
+
+ return(0);
+}
+
+void
+ctl_scsi_path_string(union ctl_io *io, char *path_str, int len)
+{
+ if (io->io_hdr.nexus.targ_target.wwid[0] == 0) {
+ snprintf(path_str, len, "(%ju:%d:%ju:%d): ",
+ (uintmax_t)io->io_hdr.nexus.initid.id,
+ io->io_hdr.nexus.targ_port,
+ (uintmax_t)io->io_hdr.nexus.targ_target.id,
+ io->io_hdr.nexus.targ_lun);
+ } else {
+ /*
+ * XXX KDM find a better way to display FC WWIDs.
+ */
+#ifdef _KERNEL
+ snprintf(path_str, len, "(%ju:%d:%#jx,%#jx:%d): ",
+ (uintmax_t)io->io_hdr.nexus.initid.id,
+ io->io_hdr.nexus.targ_port,
+ (intmax_t)io->io_hdr.nexus.targ_target.wwid[0],
+ (intmax_t)io->io_hdr.nexus.targ_target.wwid[1],
+ io->io_hdr.nexus.targ_lun);
+#else /* _KERNEL */
+ snprintf(path_str, len, "(%ju:%d:%#jx,%#jx:%d): ",
+ (uintmax_t)io->io_hdr.nexus.initid.id,
+ io->io_hdr.nexus.targ_port,
+ (intmax_t)io->io_hdr.nexus.targ_target.wwid[0],
+ (intmax_t)io->io_hdr.nexus.targ_target.wwid[1],
+ io->io_hdr.nexus.targ_lun);
+#endif /* _KERNEL */
+ }
+}
+
+/*
+ * ctl_scsi_sense_sbuf() returns 0 for success and -1 for failure.
+ */
+int
+ctl_scsi_sense_sbuf(struct ctl_scsiio *ctsio,
+ struct scsi_inquiry_data *inq_data, struct sbuf *sb,
+ scsi_sense_string_flags flags)
+{
+ char path_str[64];
+
+ if ((ctsio == NULL) || (sb == NULL))
+ return(-1);
+
+ ctl_scsi_path_string((union ctl_io *)ctsio, path_str, sizeof(path_str));
+
+ if (flags & SSS_FLAG_PRINT_COMMAND) {
+
+ sbuf_cat(sb, path_str);
+
+ ctl_scsi_command_string(ctsio, inq_data, sb);
+
+ sbuf_printf(sb, "\n");
+ }
+
+ scsi_sense_only_sbuf(&ctsio->sense_data, ctsio->sense_len, sb,
+ path_str, inq_data, ctsio->cdb, ctsio->cdb_len);
+
+ return(0);
+}
+
+char *
+ctl_scsi_sense_string(struct ctl_scsiio *ctsio,
+ struct scsi_inquiry_data *inq_data, char *str,
+ int str_len)
+{
+ struct sbuf sb;
+
+ sbuf_new(&sb, str, str_len, 0);
+
+ ctl_scsi_sense_sbuf(ctsio, inq_data, &sb, SSS_FLAG_PRINT_COMMAND);
+
+ sbuf_finish(&sb);
+
+ return(sbuf_data(&sb));
+}
+
+#ifdef _KERNEL
+void
+ctl_scsi_sense_print(struct ctl_scsiio *ctsio,
+ struct scsi_inquiry_data *inq_data)
+{
+ struct sbuf sb;
+ char str[512];
+
+ sbuf_new(&sb, str, sizeof(str), 0);
+
+ ctl_scsi_sense_sbuf(ctsio, inq_data, &sb, SSS_FLAG_PRINT_COMMAND);
+
+ sbuf_finish(&sb);
+
+ printf("%s", sbuf_data(&sb));
+}
+
+#else /* _KERNEL */
+void
+ctl_scsi_sense_print(struct ctl_scsiio *ctsio,
+ struct scsi_inquiry_data *inq_data, FILE *ofile)
+{
+ struct sbuf sb;
+ char str[512];
+
+ if ((ctsio == NULL) || (ofile == NULL))
+ return;
+
+ sbuf_new(&sb, str, sizeof(str), 0);
+
+ ctl_scsi_sense_sbuf(ctsio, inq_data, &sb, SSS_FLAG_PRINT_COMMAND);
+
+ sbuf_finish(&sb);
+
+ fprintf(ofile, "%s", sbuf_data(&sb));
+}
+
+#endif /* _KERNEL */
+
diff --git a/sys/cam/ctl/ctl_scsi_all.h b/sys/cam/ctl/ctl_scsi_all.h
new file mode 100644
index 0000000..35e2d0a
--- /dev/null
+++ b/sys/cam/ctl/ctl_scsi_all.h
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
+ * Copyright (c) 1997, 1998, 2003 Kenneth D. Merry.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_scsi_all.h#2 $
+ */
+
+__FBSDID("$FreeBSD$");
+
+__BEGIN_DECLS
+const char * ctl_scsi_status_string(struct ctl_scsiio *ctsio);
+#ifdef _KERNEL
+void ctl_scsi_sense_print(struct ctl_scsiio *ctsio,
+ struct scsi_inquiry_data *inq_data);
+#else /* _KERNEL */
+void ctl_scsi_sense_print(struct ctl_scsiio *ctsio,
+ struct scsi_inquiry_data *inq_data,
+ FILE *ofile);
+#endif /* _KERNEL */
+int ctl_scsi_command_string(struct ctl_scsiio *ctsio,
+ struct scsi_inquiry_data *inq_data,struct sbuf *sb);
+int ctl_scsi_sense_sbuf(struct ctl_scsiio *ctsio,
+ struct scsi_inquiry_data *inq_data, struct sbuf *sb,
+ scsi_sense_string_flags flags);
+void ctl_scsi_path_string(union ctl_io *io, char *path_str, int strlen);
+char *ctl_scsi_sense_string(struct ctl_scsiio *ctsio,
+ struct scsi_inquiry_data *inq_data, char *str,
+ int str_len);
+
+__END_DECLS
diff --git a/sys/cam/ctl/ctl_ser_table.c b/sys/cam/ctl/ctl_ser_table.c
new file mode 100644
index 0000000..d862788
--- /dev/null
+++ b/sys/cam/ctl/ctl_ser_table.c
@@ -0,0 +1,81 @@
+/*-
+ * Copyright (c) 2003 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_ser_table.c#1 $
+ * $FreeBSD$
+ */
+
+/*
+ * CAM Target Layer command serialization table.
+ *
+ * Author: Kim Le
+ */
+
+/****************************************************************************/
+/* TABLE ctlSerTbl */
+/* */
+/* The matrix which drives the serialization algorithm. The major index */
+/* (the first) into this table is the command being checked and the minor */
+/* index is the command against which the first command is being checked. */
+/* i.e., the major index (row) command is ahead of the minor index command */
+/* (column) in the queue. This allows the code to optimize by capturing */
+/* the result of the first indexing operation into a pointer. */
+/* */
+/* Whenever a new value is added to the IDX_T type, this matrix must be */
+/* expanded by one row AND one column -- Because of this, some effort */
+/* should be made to re-use the indexes whenever possible. */
+/* */
+/****************************************************************************/
+
+#define sK CTL_SER_SKIP /* Skip */
+#define pS CTL_SER_PASS /* pS */
+#define bK CTL_SER_BLOCK /* Blocked */
+#define xT CTL_SER_EXTENT /* Extent check */
+
+static ctl_serialize_action
+ctl_serialize_table[CTL_SERIDX_COUNT][CTL_SERIDX_COUNT] = {
+/**>IDX_ :: 2nd:TUR RD WRT MDSN MDSL RQSN INQ RDCP RES REL LSNS FMT STR PRIN PROT MAININ*/
+/*TUR */{ pS, pS, pS, bK, bK, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
+/*READ */{ pS, pS, xT, bK, bK, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
+/*WRITE */{ pS, xT, xT, bK, bK, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
+/*MD_SNS */{ bK, bK, bK, pS, bK, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
+/*MD_SEL */{ bK, bK, bK, bK, bK, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
+/*RQ_SNS */{ pS, pS, pS, pS, pS, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
+/*INQ */{ pS, pS, pS, pS, pS, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
+/*RD_CAP */{ pS, pS, pS, pS, pS, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
+/*RESV */{ bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK},
+/*REL */{ bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK},
+/*LOG_SNS */{ pS, pS, pS, pS, bK, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
+/*FORMAT */{ pS, bK, bK, bK, bK, pS, pS, bK, bK, bK, bK, bK, bK, bK, bK, bK},
+/*START */{ bK, bK, bK, bK, bK, bK, pS, bK, bK, bK, bK, bK, bK, bK, bK, bK},
+/*PRES_IN */{ bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK},
+/*PRES_OUT*/{ bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK},
+/*MAIN_IN */{ bK, bK, bK, bK, bK, bK, pS, bK, bK, bK, bK, bK, bK, bK, bK, pS}
+};
+
diff --git a/sys/cam/ctl/ctl_util.c b/sys/cam/ctl/ctl_util.c
new file mode 100644
index 0000000..3ca0aa2
--- /dev/null
+++ b/sys/cam/ctl/ctl_util.c
@@ -0,0 +1,843 @@
+/*-
+ * Copyright (c) 2003 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_util.c#2 $
+ */
+/*
+ * CAM Target Layer SCSI library
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifdef _KERNEL
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#else /* __KERNEL__ */
+#include <sys/types.h>
+#include <sys/time.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#endif /* __KERNEL__ */
+#include <sys/sbuf.h>
+#include <sys/queue.h>
+#include <sys/callout.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl_scsi_all.h>
+#include <cam/ctl/ctl_util.h>
+
+struct ctl_status_desc {
+ ctl_io_status status;
+ const char *description;
+};
+
+struct ctl_task_desc {
+ ctl_task_type task_action;
+ const char *description;
+};
+static struct ctl_status_desc ctl_status_table[] = {
+ {CTL_STATUS_NONE, "No Status"},
+ {CTL_SUCCESS, "Command Completed Successfully"},
+ {CTL_CMD_TIMEOUT, "Command Timed Out"},
+ {CTL_SEL_TIMEOUT, "Selection Timeout"},
+ {CTL_ERROR, "Command Failed"},
+ {CTL_SCSI_ERROR, "SCSI Error"},
+ {CTL_CMD_ABORTED, "Command Aborted"},
+};
+
+static struct ctl_task_desc ctl_task_table[] = {
+ {CTL_TASK_ABORT_TASK, "Abort Task"},
+ {CTL_TASK_ABORT_TASK_SET, "Abort Task Set"},
+ {CTL_TASK_CLEAR_ACA, "Clear ACA"},
+ {CTL_TASK_CLEAR_TASK_SET, "Clear Task Set"},
+ {CTL_TASK_LUN_RESET, "LUN Reset"},
+ {CTL_TASK_TARGET_RESET, "Target Reset"},
+ {CTL_TASK_BUS_RESET, "Bus Reset"},
+ {CTL_TASK_PORT_LOGIN, "Port Login"},
+ {CTL_TASK_PORT_LOGOUT, "Port Logout"}
+};
+
+void
+ctl_scsi_tur(union ctl_io *io, ctl_tag_type tag_type, uint8_t control)
+{
+ struct ctl_scsiio *ctsio;
+ struct scsi_test_unit_ready *cdb;
+
+ ctl_scsi_zero_io(io);
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ ctsio = &io->scsiio;
+ cdb = (struct scsi_test_unit_ready *)ctsio->cdb;
+
+ cdb->opcode = TEST_UNIT_READY;
+ cdb->control = control;
+ io->io_hdr.flags = CTL_FLAG_DATA_NONE;
+ ctsio->tag_type = tag_type;
+ ctsio->cdb_len = sizeof(*cdb);
+ ctsio->ext_data_len = 0;
+ ctsio->ext_data_ptr = NULL;
+ ctsio->ext_sg_entries = 0;
+ ctsio->ext_data_filled = 0;
+ ctsio->sense_len = SSD_FULL_SIZE;
+}
+
+void
+ctl_scsi_inquiry(union ctl_io *io, uint8_t *data_ptr, int32_t data_len,
+ uint8_t byte2, uint8_t page_code, ctl_tag_type tag_type,
+ uint8_t control)
+{
+ struct ctl_scsiio *ctsio;
+ struct scsi_inquiry *cdb;
+
+ ctl_scsi_zero_io(io);
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ ctsio = &io->scsiio;
+ cdb = (struct scsi_inquiry *)ctsio->cdb;
+
+ cdb->opcode = INQUIRY;
+ cdb->byte2 = byte2;
+ cdb->page_code = page_code;
+ cdb->control = control;
+ scsi_ulto2b(data_len, cdb->length);
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ io->io_hdr.flags = CTL_FLAG_DATA_IN;
+ ctsio->tag_type = tag_type;
+ ctsio->cdb_len = sizeof(*cdb);
+ ctsio->ext_data_len = data_len;
+ ctsio->ext_data_ptr = data_ptr;
+ ctsio->ext_sg_entries = 0;
+ ctsio->ext_data_filled = 0;
+ ctsio->sense_len = SSD_FULL_SIZE;
+}
+
+void
+ctl_scsi_request_sense(union ctl_io *io, uint8_t *data_ptr,
+ int32_t data_len, uint8_t byte2, ctl_tag_type tag_type,
+ uint8_t control)
+{
+ struct ctl_scsiio *ctsio;
+ struct scsi_request_sense *cdb;
+
+ ctl_scsi_zero_io(io);
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ ctsio = &io->scsiio;
+ cdb = (struct scsi_request_sense *)ctsio->cdb;
+
+ cdb->opcode = REQUEST_SENSE;
+ cdb->byte2 = byte2;
+ cdb->control = control;
+ cdb->length = data_len;
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ io->io_hdr.flags = CTL_FLAG_DATA_IN;
+ ctsio->tag_type = tag_type;
+ ctsio->cdb_len = sizeof(*cdb);
+ ctsio->ext_data_ptr = data_ptr;
+ ctsio->ext_data_len = data_len;
+ ctsio->ext_sg_entries = 0;
+ ctsio->ext_data_filled = 0;
+ ctsio->sense_len = SSD_FULL_SIZE;
+}
+
+void
+ctl_scsi_report_luns(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len,
+ uint8_t select_report, ctl_tag_type tag_type,
+ uint8_t control)
+{
+ struct ctl_scsiio *ctsio;
+ struct scsi_report_luns *cdb;
+
+ ctl_scsi_zero_io(io);
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ ctsio = &io->scsiio;
+ cdb = (struct scsi_report_luns *)ctsio->cdb;
+
+ cdb->opcode = REPORT_LUNS;
+ cdb->select_report = select_report;
+ scsi_ulto4b(data_len, cdb->length);
+ cdb->control = control;
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ io->io_hdr.flags = CTL_FLAG_DATA_IN;
+ ctsio->tag_type = tag_type;
+ ctsio->cdb_len = sizeof(*cdb);
+ ctsio->ext_data_ptr = data_ptr;
+ ctsio->ext_data_len = data_len;
+ ctsio->ext_sg_entries = 0;
+ ctsio->ext_data_filled = 0;
+ ctsio->sense_len = SSD_FULL_SIZE;
+}
+
+void
+ctl_scsi_read_write_buffer(union ctl_io *io, uint8_t *data_ptr,
+ uint32_t data_len, int read_buffer, uint8_t mode,
+ uint8_t buffer_id, uint32_t buffer_offset,
+ ctl_tag_type tag_type, uint8_t control)
+{
+ struct ctl_scsiio *ctsio;
+ struct scsi_write_buffer *cdb;
+
+ ctl_scsi_zero_io(io);
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ ctsio = &io->scsiio;
+ cdb = (struct scsi_write_buffer *)ctsio->cdb;
+
+ if (read_buffer != 0)
+ cdb->opcode = READ_BUFFER;
+ else
+ cdb->opcode = WRITE_BUFFER;
+
+ cdb->byte2 = mode & RWB_MODE;
+ cdb->buffer_id = buffer_id;
+ scsi_ulto3b(buffer_offset, cdb->offset);
+ scsi_ulto3b(data_len, cdb->length);
+ cdb->control = control;
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ if (read_buffer != 0)
+ io->io_hdr.flags = CTL_FLAG_DATA_IN;
+ else
+ io->io_hdr.flags = CTL_FLAG_DATA_OUT;
+ ctsio->tag_type = tag_type;
+ ctsio->cdb_len = sizeof(*cdb);
+ ctsio->ext_data_ptr = data_ptr;
+ ctsio->ext_data_len = data_len;
+ ctsio->ext_sg_entries = 0;
+ ctsio->ext_data_filled = 0;
+ ctsio->sense_len = SSD_FULL_SIZE;
+}
+
+void
+ctl_scsi_read_write(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len,
+ int read_op, uint8_t byte2, int minimum_cdb_size,
+ uint64_t lba, uint32_t num_blocks, ctl_tag_type tag_type,
+ uint8_t control)
+{
+ struct ctl_scsiio *ctsio;
+
+ ctl_scsi_zero_io(io);
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ ctsio = &io->scsiio;
+
+ /*
+ * Pick out the smallest CDB that will hold the user's request.
+ * minimum_cdb_size allows cranking the CDB size up, even for
+ * requests that would not normally need a large CDB. This can be
+ * useful for testing (e.g. to make sure READ_16 support works without
+ * having an array larger than 2TB) and for compatibility -- e.g.
+ * if your device doesn't support READ_6. (ATAPI drives don't.)
+ */
+ if ((minimum_cdb_size < 10)
+ && ((lba & 0x1fffff) == lba)
+ && ((num_blocks & 0xff) == num_blocks)
+ && (byte2 == 0)) {
+ struct scsi_rw_6 *cdb;
+
+ /*
+ * Note that according to SBC-2, the target should return 256
+ * blocks if the transfer length in a READ(6) or WRITE(6) CDB
+ * is set to 0. Since it's possible that some targets
+ * won't do the right thing, we only send a READ(6) or
+ * WRITE(6) for transfer sizes up to and including 255 blocks.
+ */
+ cdb = (struct scsi_rw_6 *)ctsio->cdb;
+
+ cdb->opcode = (read_op) ? READ_6 : WRITE_6;
+ scsi_ulto3b(lba, cdb->addr);
+ cdb->length = num_blocks & 0xff;
+ cdb->control = control;
+
+ ctsio->cdb_len = sizeof(*cdb);
+
+ } else if ((minimum_cdb_size < 12)
+ && ((num_blocks & 0xffff) == num_blocks)
+ && ((lba & 0xffffffff) == lba)) {
+ struct scsi_rw_10 *cdb;
+
+ cdb = (struct scsi_rw_10 *)ctsio->cdb;
+
+ cdb->opcode = (read_op) ? READ_10 : WRITE_10;
+ cdb->byte2 = byte2;
+ scsi_ulto4b(lba, cdb->addr);
+ cdb->reserved = 0;
+ scsi_ulto2b(num_blocks, cdb->length);
+ cdb->control = control;
+
+ ctsio->cdb_len = sizeof(*cdb);
+ } else if ((minimum_cdb_size < 16)
+ && ((num_blocks & 0xffffffff) == num_blocks)
+ && ((lba & 0xffffffff) == lba)) {
+ struct scsi_rw_12 *cdb;
+
+ cdb = (struct scsi_rw_12 *)ctsio->cdb;
+
+ cdb->opcode = (read_op) ? READ_12 : WRITE_12;
+ cdb->byte2 = byte2;
+ scsi_ulto4b(lba, cdb->addr);
+ scsi_ulto4b(num_blocks, cdb->length);
+ cdb->reserved = 0;
+ cdb->control = control;
+
+ ctsio->cdb_len = sizeof(*cdb);
+ } else {
+ struct scsi_rw_16 *cdb;
+
+ cdb = (struct scsi_rw_16 *)ctsio->cdb;
+
+ cdb->opcode = (read_op) ? READ_16 : WRITE_16;
+ cdb->byte2 = byte2;
+ scsi_u64to8b(lba, cdb->addr);
+ scsi_ulto4b(num_blocks, cdb->length);
+ cdb->reserved = 0;
+ cdb->control = control;
+
+ ctsio->cdb_len = sizeof(*cdb);
+ }
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ if (read_op != 0)
+ io->io_hdr.flags = CTL_FLAG_DATA_IN;
+ else
+ io->io_hdr.flags = CTL_FLAG_DATA_OUT;
+ ctsio->tag_type = tag_type;
+ ctsio->ext_data_ptr = data_ptr;
+ ctsio->ext_data_len = data_len;
+ ctsio->ext_sg_entries = 0;
+ ctsio->ext_data_filled = 0;
+ ctsio->sense_len = SSD_FULL_SIZE;
+}
+
+void
+ctl_scsi_read_capacity(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len,
+ uint32_t addr, int reladr, int pmi,
+ ctl_tag_type tag_type, uint8_t control)
+{
+ struct scsi_read_capacity *cdb;
+
+ ctl_scsi_zero_io(io);
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ cdb = (struct scsi_read_capacity *)io->scsiio.cdb;
+
+ cdb->opcode = READ_CAPACITY;
+ if (reladr)
+ cdb->byte2 = SRC_RELADR;
+ if (pmi)
+ cdb->pmi = SRC_PMI;
+ scsi_ulto4b(addr, cdb->addr);
+ cdb->control = control;
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ io->io_hdr.flags = CTL_FLAG_DATA_IN;
+ io->scsiio.tag_type = tag_type;
+ io->scsiio.ext_data_ptr = data_ptr;
+ io->scsiio.ext_data_len = data_len;
+ io->scsiio.ext_sg_entries = 0;
+ io->scsiio.ext_data_filled = 0;
+ io->scsiio.sense_len = SSD_FULL_SIZE;
+}
+
+void
+ctl_scsi_read_capacity_16(union ctl_io *io, uint8_t *data_ptr,
+ uint32_t data_len, uint64_t addr, int reladr,
+ int pmi, ctl_tag_type tag_type, uint8_t control)
+{
+ struct scsi_read_capacity_16 *cdb;
+
+ ctl_scsi_zero_io(io);
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ cdb = (struct scsi_read_capacity_16 *)io->scsiio.cdb;
+
+ cdb->opcode = SERVICE_ACTION_IN;
+ cdb->service_action = SRC16_SERVICE_ACTION;
+ if (reladr)
+ cdb->reladr |= SRC16_RELADR;
+ if (pmi)
+ cdb->reladr |= SRC16_PMI;
+ scsi_u64to8b(addr, cdb->addr);
+ scsi_ulto4b(data_len, cdb->alloc_len);
+ cdb->control = control;
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ io->io_hdr.flags = CTL_FLAG_DATA_IN;
+ io->scsiio.tag_type = tag_type;
+ io->scsiio.ext_data_ptr = data_ptr;
+ io->scsiio.ext_data_len = data_len;
+ io->scsiio.ext_sg_entries = 0;
+ io->scsiio.ext_data_filled = 0;
+ io->scsiio.sense_len = SSD_FULL_SIZE;
+}
+
+void
+ctl_scsi_mode_sense(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len,
+ int dbd, int llbaa, uint8_t page_code, uint8_t pc,
+ uint8_t subpage, int minimum_cdb_size,
+ ctl_tag_type tag_type, uint8_t control)
+{
+ ctl_scsi_zero_io(io);
+
+ if ((minimum_cdb_size < 10)
+ && (llbaa == 0)
+ && (data_len < 256)) {
+ struct scsi_mode_sense_6 *cdb;
+
+ cdb = (struct scsi_mode_sense_6 *)io->scsiio.cdb;
+
+ cdb->opcode = MODE_SENSE_6;
+ if (dbd)
+ cdb->byte2 |= SMS_DBD;
+ cdb->page = page_code | pc;
+ cdb->subpage = subpage;
+ cdb->length = data_len;
+ cdb->control = control;
+ } else {
+ struct scsi_mode_sense_10 *cdb;
+
+ cdb = (struct scsi_mode_sense_10 *)io->scsiio.cdb;
+
+ cdb->opcode = MODE_SENSE_10;
+ if (dbd)
+ cdb->byte2 |= SMS_DBD;
+ if (llbaa)
+ cdb->byte2 |= SMS10_LLBAA;
+ cdb->page = page_code | pc;
+ cdb->subpage = subpage;
+ scsi_ulto2b(data_len, cdb->length);
+ cdb->control = control;
+ }
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ io->io_hdr.flags = CTL_FLAG_DATA_IN;
+ io->scsiio.tag_type = tag_type;
+ io->scsiio.ext_data_ptr = data_ptr;
+ io->scsiio.ext_data_len = data_len;
+ io->scsiio.ext_sg_entries = 0;
+ io->scsiio.ext_data_filled = 0;
+ io->scsiio.sense_len = SSD_FULL_SIZE;
+}
+
+void
+ctl_scsi_start_stop(union ctl_io *io, int start, int load_eject, int immediate,
+ int power_conditions, int onoffline __unused,
+ ctl_tag_type tag_type, uint8_t control)
+{
+ struct scsi_start_stop_unit *cdb;
+
+ cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
+
+ ctl_scsi_zero_io(io);
+
+ cdb->opcode = START_STOP_UNIT;
+ if (immediate)
+ cdb->byte2 |= SSS_IMMED;
+#ifdef NEEDTOPORT
+ if (onoffline)
+ cdb->byte2 |= SSS_ONOFFLINE;
+#endif
+ cdb->how = power_conditions;
+ if (load_eject)
+ cdb->how |= SSS_LOEJ;
+ if (start)
+ cdb->how |= SSS_START;
+ cdb->control = control;
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ io->io_hdr.flags = CTL_FLAG_DATA_NONE;
+ io->scsiio.tag_type = tag_type;
+ io->scsiio.ext_data_ptr = NULL;
+ io->scsiio.ext_data_len = 0;
+ io->scsiio.ext_sg_entries = 0;
+ io->scsiio.ext_data_filled = 0;
+ io->scsiio.sense_len = SSD_FULL_SIZE;
+}
+
+void
+ctl_scsi_sync_cache(union ctl_io *io, int immed, int reladr,
+ int minimum_cdb_size, uint64_t starting_lba,
+ uint32_t block_count, ctl_tag_type tag_type,
+ uint8_t control)
+{
+ ctl_scsi_zero_io(io);
+
+ if ((minimum_cdb_size < 16)
+ && ((block_count & 0xffff) == block_count)
+ && ((starting_lba & 0xffffffff) == starting_lba)) {
+ struct scsi_sync_cache *cdb;
+
+ cdb = (struct scsi_sync_cache *)io->scsiio.cdb;
+
+ cdb->opcode = SYNCHRONIZE_CACHE;
+ if (reladr)
+ cdb->byte2 |= SSC_RELADR;
+
+ if (immed)
+ cdb->byte2 |= SSC_IMMED;
+
+ scsi_ulto4b(starting_lba, cdb->begin_lba);
+ scsi_ulto2b(block_count, cdb->lb_count);
+ cdb->control = control;
+ } else {
+ struct scsi_sync_cache_16 *cdb;
+
+ cdb = (struct scsi_sync_cache_16 *)io->scsiio.cdb;
+
+ cdb->opcode = SYNCHRONIZE_CACHE_16;
+ if (reladr)
+ cdb->byte2 |= SSC_RELADR;
+
+ if (immed)
+ cdb->byte2 |= SSC_IMMED;
+
+ scsi_u64to8b(starting_lba, cdb->begin_lba);
+ scsi_ulto4b(block_count, cdb->lb_count);
+ cdb->control = control;
+ }
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ io->io_hdr.flags = CTL_FLAG_DATA_NONE;
+ io->scsiio.tag_type = tag_type;
+ io->scsiio.ext_data_ptr = NULL;
+ io->scsiio.ext_data_len = 0;
+ io->scsiio.ext_sg_entries = 0;
+ io->scsiio.ext_data_filled = 0;
+ io->scsiio.sense_len = SSD_FULL_SIZE;
+}
+
+void
+ctl_scsi_persistent_res_in(union ctl_io *io, uint8_t *data_ptr,
+ uint32_t data_len, int action,
+ ctl_tag_type tag_type, uint8_t control)
+{
+
+ struct scsi_per_res_in *cdb;
+
+ ctl_scsi_zero_io(io);
+
+ cdb = (struct scsi_per_res_in *)io->scsiio.cdb;
+ cdb->opcode = PERSISTENT_RES_IN;
+ cdb->action = action;
+ scsi_ulto2b(data_len, cdb->length);
+ cdb->control = control;
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ io->io_hdr.flags = CTL_FLAG_DATA_IN;
+ io->scsiio.tag_type = tag_type;
+ io->scsiio.ext_data_ptr = data_ptr;
+ io->scsiio.ext_data_len = data_len;
+ io->scsiio.ext_sg_entries = 0;
+ io->scsiio.ext_data_filled = 0;
+ io->scsiio.sense_len = SSD_FULL_SIZE;
+}
+
+void
+ctl_scsi_persistent_res_out(union ctl_io *io, uint8_t *data_ptr,
+ uint32_t data_len, int action, int type,
+ uint64_t key, uint64_t sa_key,
+ ctl_tag_type tag_type, uint8_t control)
+{
+
+ struct scsi_per_res_out *cdb;
+ struct scsi_per_res_out_parms *params;
+
+ ctl_scsi_zero_io(io);
+
+ cdb = (struct scsi_per_res_out *)io->scsiio.cdb;
+ params = (struct scsi_per_res_out_parms *)data_ptr;
+
+ cdb->opcode = PERSISTENT_RES_OUT;
+ if (action == 5)
+ cdb->action = 6;
+ else
+ cdb->action = action;
+ switch(type)
+ {
+ case 0:
+ cdb->scope_type = 1;
+ break;
+ case 1:
+ cdb->scope_type = 3;
+ break;
+ case 2:
+ cdb->scope_type = 5;
+ break;
+ case 3:
+ cdb->scope_type = 6;
+ break;
+ case 4:
+ cdb->scope_type = 7;
+ break;
+ case 5:
+ cdb->scope_type = 8;
+ break;
+ }
+ scsi_ulto4b(data_len, cdb->length);
+ cdb->control = control;
+
+ scsi_u64to8b(key, params->res_key.key);
+ scsi_u64to8b(sa_key, params->serv_act_res_key);
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ io->io_hdr.flags = CTL_FLAG_DATA_OUT;
+ io->scsiio.tag_type = tag_type;
+ io->scsiio.ext_data_ptr = data_ptr;
+ io->scsiio.ext_data_len = data_len;
+ io->scsiio.ext_sg_entries = 0;
+ io->scsiio.ext_data_filled = 0;
+ io->scsiio.sense_len = SSD_FULL_SIZE;
+
+}
+
+void
+ctl_scsi_maintenance_in(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len,
+ uint8_t action, ctl_tag_type tag_type, uint8_t control)
+{
+ struct scsi_maintenance_in *cdb;
+
+ ctl_scsi_zero_io(io);
+
+ cdb = (struct scsi_maintenance_in *)io->scsiio.cdb;
+ cdb->opcode = MAINTENANCE_IN;
+ cdb->byte2 = action;
+ scsi_ulto4b(data_len, cdb->length);
+ cdb->control = control;
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ io->io_hdr.flags = CTL_FLAG_DATA_IN;
+ io->scsiio.tag_type = tag_type;
+ io->scsiio.ext_data_ptr = data_ptr;
+ io->scsiio.ext_data_len = data_len;
+ io->scsiio.ext_sg_entries = 0;
+ io->scsiio.ext_data_filled = 0;
+ io->scsiio.sense_len = SSD_FULL_SIZE;
+}
+
+#ifndef _KERNEL
+union ctl_io *
+ctl_scsi_alloc_io(struct ctl_id initid)
+{
+ union ctl_io *io;
+
+ io = (union ctl_io *)malloc(sizeof(*io));
+ if (io == NULL)
+ goto bailout;
+
+ io->io_hdr.nexus.initid = initid;
+
+bailout:
+ return (io);
+}
+
+void
+ctl_scsi_free_io(union ctl_io *io)
+{
+ free(io);
+}
+
+#endif /* !_KERNEL */
+void
+ctl_scsi_zero_io(union ctl_io *io)
+{
+ void *pool_ref;
+
+ if (io == NULL)
+ return;
+
+ pool_ref = io->io_hdr.pool;
+
+ memset(io, 0, sizeof(*io));
+
+ io->io_hdr.pool = pool_ref;
+}
+
+const char *
+ctl_scsi_task_string(struct ctl_taskio *taskio)
+{
+ unsigned int i;
+
+ for (i = 0; i < (sizeof(ctl_task_table)/sizeof(ctl_task_table[0]));
+ i++) {
+ if (taskio->task_action == ctl_task_table[i].task_action) {
+ return (ctl_task_table[i].description);
+ }
+ }
+
+ return (NULL);
+}
+
+void
+ctl_io_error_sbuf(union ctl_io *io, struct scsi_inquiry_data *inq_data,
+ struct sbuf *sb)
+{
+ struct ctl_status_desc *status_desc;
+ char path_str[64];
+ unsigned int i;
+
+ status_desc = NULL;
+
+ for (i = 0; i < (sizeof(ctl_status_table)/sizeof(ctl_status_table[0]));
+ i++) {
+ if ((io->io_hdr.status & CTL_STATUS_MASK) ==
+ ctl_status_table[i].status) {
+ status_desc = &ctl_status_table[i];
+ break;
+ }
+ }
+
+ ctl_scsi_path_string(io, path_str, sizeof(path_str));
+
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI:
+ sbuf_cat(sb, path_str);
+
+ ctl_scsi_command_string(&io->scsiio, NULL, sb);
+
+ sbuf_printf(sb, "\n");
+
+ sbuf_printf(sb, "%sTag: 0x%04x, Type: %d\n", path_str,
+ io->scsiio.tag_num, io->scsiio.tag_type);
+ break;
+ case CTL_IO_TASK: {
+ const char *task_desc;
+
+ sbuf_cat(sb, path_str);
+
+ task_desc = ctl_scsi_task_string(&io->taskio);
+
+ if (task_desc == NULL)
+ sbuf_printf(sb, "Unknown Task Action %d (%#x)",
+ io->taskio.task_action,
+ io->taskio.task_action);
+ else
+ sbuf_printf(sb, "Task Action: %s", task_desc);
+
+ sbuf_printf(sb, "\n");
+
+ switch (io->taskio.task_action) {
+ case CTL_TASK_ABORT_TASK:
+ case CTL_TASK_ABORT_TASK_SET:
+ case CTL_TASK_CLEAR_TASK_SET:
+ sbuf_printf(sb, "%sTag: 0x%04x, Type: %d\n", path_str,
+ io->taskio.tag_num,
+ io->taskio.tag_type);
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ sbuf_cat(sb, path_str);
+ if (status_desc == NULL)
+ sbuf_printf(sb, "CTL Status: Unknown status %#x\n",
+ io->io_hdr.status);
+ else
+ sbuf_printf(sb, "CTL Status: %s\n", status_desc->description);
+
+ if ((io->io_hdr.io_type == CTL_IO_SCSI)
+ && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR)) {
+ sbuf_cat(sb, path_str);
+ sbuf_printf(sb, "SCSI Status: %s\n",
+ ctl_scsi_status_string(&io->scsiio));
+
+ if (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)
+ ctl_scsi_sense_sbuf(&io->scsiio, inq_data,
+ sb, SSS_FLAG_NONE);
+ }
+}
+
+char *
+ctl_io_error_string(union ctl_io *io, struct scsi_inquiry_data *inq_data,
+ char *str, int str_len)
+{
+ struct sbuf sb;
+
+ sbuf_new(&sb, str, str_len, SBUF_FIXEDLEN);
+
+ ctl_io_error_sbuf(io, inq_data, &sb);
+
+ sbuf_finish(&sb);
+
+ return (sbuf_data(&sb));
+}
+
+#ifdef _KERNEL
+
+void
+ctl_io_error_print(union ctl_io *io, struct scsi_inquiry_data *inq_data)
+{
+ char str[512];
+#ifdef NEEDTOPORT
+ char *message;
+ char *line;
+
+ message = io_error_string(io, inq_data, str, sizeof(str));
+
+ for (line = strsep(&message, "\n"); line != NULL;
+ line = strsep(&message, "\n")) {
+ csevent_log(CSC_CTL | CSC_SHELF_SW | CTL_ERROR_REPORT,
+ csevent_LogType_Trace,
+ csevent_Severity_Information,
+ csevent_AlertLevel_Green,
+ csevent_FRU_Firmware,
+ csevent_FRU_Unknown, "%s", line);
+ }
+#else
+ printf("%s", ctl_io_error_string(io, inq_data, str, sizeof(str)));
+#endif
+
+}
+
+#else /* _KERNEL */
+
+void
+ctl_io_error_print(union ctl_io *io, struct scsi_inquiry_data *inq_data,
+ FILE *ofile)
+{
+ char str[512];
+
+ fprintf(ofile, "%s", ctl_io_error_string(io, inq_data, str,
+ sizeof(str)));
+}
+
+#endif /* _KERNEL */
+
+/*
+ * vim: ts=8
+ */
diff --git a/sys/cam/ctl/ctl_util.h b/sys/cam/ctl/ctl_util.h
new file mode 100644
index 0000000..29bc3e5
--- /dev/null
+++ b/sys/cam/ctl/ctl_util.h
@@ -0,0 +1,119 @@
+/*-
+ * Copyright (c) 2003 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_util.h#2 $
+ * $FreeBSD$
+ */
+/*
+ * CAM Target Layer SCSI library interface
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#ifndef _CTL_UTIL_H
+#define _CTL_UTIL_H 1
+
+__BEGIN_DECLS
+
+void ctl_scsi_tur(union ctl_io *io, ctl_tag_type tag_type, uint8_t control);
+void ctl_scsi_inquiry(union ctl_io *io, uint8_t *data_ptr, int32_t data_len,
+ uint8_t byte2, uint8_t page_code, ctl_tag_type tag_type,
+ uint8_t control);
+void ctl_scsi_request_sense(union ctl_io *io, uint8_t *data_ptr,
+ int32_t data_len, uint8_t byte2,
+ ctl_tag_type tag_type, uint8_t control);
+void ctl_scsi_report_luns(union ctl_io *io, uint8_t *data_ptr,
+ uint32_t data_len, uint8_t select_report,
+ ctl_tag_type tag_type, uint8_t control);
+void ctl_scsi_read_write_buffer(union ctl_io *io, uint8_t *data_ptr,
+ uint32_t data_len, int read_buffer,
+ uint8_t mode, uint8_t buffer_id,
+ uint32_t buffer_offset, ctl_tag_type tag_type,
+ uint8_t control);
+void ctl_scsi_read_write(union ctl_io *io, uint8_t *data_ptr,
+ uint32_t data_len, int read_op, uint8_t byte2,
+ int minimum_cdb_size, uint64_t lba,
+ uint32_t num_blocks, ctl_tag_type tag_type,
+ uint8_t control);
+void ctl_scsi_read_capacity(union ctl_io *io, uint8_t *data_ptr,
+ uint32_t data_len, uint32_t addr, int reladr,
+ int pmi, ctl_tag_type tag_type, uint8_t control);
+void ctl_scsi_read_capacity_16(union ctl_io *io, uint8_t *data_ptr,
+ uint32_t data_len, uint64_t addr, int reladr,
+ int pmi, ctl_tag_type tag_type, uint8_t control);
+void ctl_scsi_mode_sense(union ctl_io *io, uint8_t *data_ptr,
+ uint32_t data_len, int dbd, int llbaa,
+ uint8_t page_code, uint8_t pc, uint8_t subpage,
+ int minimum_cdb_size, ctl_tag_type tag_type,
+ uint8_t control);
+void ctl_scsi_start_stop(union ctl_io *io, int start, int load_eject,
+ int immediate, int power_conditions, int onoffline,
+ ctl_tag_type tag_type, uint8_t control);
+void ctl_scsi_sync_cache(union ctl_io *io, int immed, int reladr,
+ int minimum_cdb_size, uint64_t starting_lba,
+ uint32_t block_count, ctl_tag_type tag_type,
+ uint8_t control);
+void ctl_scsi_persistent_res_in(union ctl_io *io, uint8_t *data_ptr,
+ uint32_t data_len, int action,
+ ctl_tag_type tag_type, uint8_t control);
+void ctl_scsi_persistent_res_out(union ctl_io *io, uint8_t *data_ptr,
+ uint32_t data_len, int action, int type,
+ uint64_t key, uint64_t sa_key,
+ ctl_tag_type tag_type, uint8_t control);
+void ctl_scsi_maintenance_in(union ctl_io *io, uint8_t *data_ptr,
+ uint32_t data_len, uint8_t action,
+ ctl_tag_type tag_type, uint8_t control);
+#ifndef _KERNEL
+union ctl_io *ctl_scsi_alloc_io(struct ctl_id initid);
+void ctl_scsi_free_io(union ctl_io *io);
+#endif /* !_KERNEL */
+void ctl_scsi_zero_io(union ctl_io *io);
+const char *ctl_scsi_task_string(struct ctl_taskio *taskio);
+void ctl_io_error_sbuf(union ctl_io *io,
+ struct scsi_inquiry_data *inq_data, struct sbuf *sb);
+char *ctl_io_error_string(union ctl_io *io,
+ struct scsi_inquiry_data *inq_data, char *str,
+ int str_len);
+#ifdef _KERNEL
+
+void ctl_io_error_print(union ctl_io *io, struct scsi_inquiry_data *inq_data);
+#else /* _KERNEL */
+void
+ctl_io_error_print(union ctl_io *io, struct scsi_inquiry_data *inq_data,
+ FILE *ofile);
+
+#endif /* _KERNEL */
+
+__END_DECLS
+
+#endif /* _CTL_UTIL_H */
+
+/*
+ * vim: ts=8
+ */
diff --git a/sys/cam/ctl/scsi_ctl.c b/sys/cam/ctl/scsi_ctl.c
new file mode 100644
index 0000000..1d547d5
--- /dev/null
+++ b/sys/cam/ctl/scsi_ctl.c
@@ -0,0 +1,2049 @@
+/*-
+ * Copyright (c) 2008, 2009 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $
+ */
+/*
+ * Peripheral driver interface between CAM and CTL (CAM Target Layer).
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/sbuf.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <machine/bus.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_queue.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/cam_debug.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_error.h>
+
+typedef enum {
+ CTLFE_CCB_WAITING = 0x01
+} ctlfe_ccb_types;
+
+struct ctlfe_softc {
+ struct ctl_frontend fe;
+ path_id_t path_id;
+ struct cam_sim *sim;
+ char port_name[DEV_IDLEN];
+ STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list;
+ STAILQ_ENTRY(ctlfe_softc) links;
+};
+
+STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list;
+struct mtx ctlfe_list_mtx;
+static char ctlfe_mtx_desc[] = "ctlfelist";
+static int ctlfe_dma_enabled = 1;
+#ifdef CTLFE_INIT_ENABLE
+static int ctlfe_max_targets = 1;
+static int ctlfe_num_targets = 0;
+#endif
+
+typedef enum {
+ CTLFE_LUN_NONE = 0x00,
+ CTLFE_LUN_WILDCARD = 0x01
+} ctlfe_lun_flags;
+
+struct ctlfe_lun_softc {
+ struct ctlfe_softc *parent_softc;
+ struct cam_periph *periph;
+ ctlfe_lun_flags flags;
+ struct callout dma_callout;
+ uint64_t ccbs_alloced;
+ uint64_t ccbs_freed;
+ uint64_t ctios_sent;
+ uint64_t ctios_returned;
+ uint64_t atios_sent;
+ uint64_t atios_returned;
+ uint64_t inots_sent;
+ uint64_t inots_returned;
+ /* bus_dma_tag_t dma_tag; */
+ TAILQ_HEAD(, ccb_hdr) work_queue;
+ STAILQ_ENTRY(ctlfe_lun_softc) links;
+};
+
+typedef enum {
+ CTLFE_CMD_NONE = 0x00,
+ CTLFE_CMD_PIECEWISE = 0x01
+} ctlfe_cmd_flags;
+
+/*
+ * The size limit of this structure is CTL_PORT_PRIV_SIZE, from ctl_io.h.
+ * Currently that is 600 bytes.
+ */
+struct ctlfe_lun_cmd_info {
+ int cur_transfer_index;
+ ctlfe_cmd_flags flags;
+ /*
+ * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16
+ * bytes on amd64. So with 32 elements, this is 256 bytes on
+ * i386 and 512 bytes on amd64.
+ */
+ bus_dma_segment_t cam_sglist[32];
+};
+
+/*
+ * When we register the adapter/bus, request that this many ctl_ios be
+ * allocated. This should be the maximum supported by the adapter, but we
+ * currently don't have a way to get that back from the path inquiry.
+ * XXX KDM add that to the path inquiry.
+ */
+#define CTLFE_REQ_CTL_IO 4096
+/*
+ * Number of Accept Target I/O CCBs to allocate and queue down to the
+ * adapter per LUN.
+ * XXX KDM should this be controlled by CTL?
+ */
+#define CTLFE_ATIO_PER_LUN 1024
+/*
+ * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to
+ * allocate and queue down to the adapter per LUN.
+ * XXX KDM should this be controlled by CTL?
+ */
+#define CTLFE_IN_PER_LUN 1024
+
+/*
+ * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending
+ * status to the initiator. The SIM is expected to have its own timeouts,
+ * so we're not putting this timeout around the CCB execution time. The
+ * SIM should timeout and let us know if it has an issue.
+ */
+#define CTLFE_DMA_TIMEOUT 60
+
+/*
+ * Turn this on to enable extra debugging prints.
+ */
+#if 0
+#define CTLFE_DEBUG
+#endif
+
+/*
+ * Use randomly assigned WWNN/WWPN values. This is to work around an issue
+ * in the FreeBSD initiator that makes it unable to rescan the target if
+ * the target gets rebooted and the WWNN/WWPN stay the same.
+ */
+#if 0
+#define RANDOM_WWNN
+#endif
+
+SYSCTL_INT(_kern_cam_ctl, OID_AUTO, dma_enabled, CTLFLAG_RW,
+ &ctlfe_dma_enabled, 0, "DMA enabled");
+MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface");
+
+#define ccb_type ppriv_field0
+/* This is only used in the ATIO */
+#define io_ptr ppriv_ptr1
+
+/* This is only used in the CTIO */
+#define ccb_atio ppriv_ptr1
+
+int ctlfeinitialize(void);
+void ctlfeshutdown(void);
+static periph_init_t ctlfeinit;
+static void ctlfeasync(void *callback_arg, uint32_t code,
+ struct cam_path *path, void *arg);
+static periph_ctor_t ctlferegister;
+static periph_oninv_t ctlfeoninvalidate;
+static periph_dtor_t ctlfecleanup;
+static periph_start_t ctlfestart;
+static void ctlfedone(struct cam_periph *periph,
+ union ccb *done_ccb);
+
+static void ctlfe_onoffline(void *arg, int online);
+static void ctlfe_online(void *arg);
+static void ctlfe_offline(void *arg);
+static int ctlfe_targ_enable(void *arg, struct ctl_id targ_id);
+static int ctlfe_targ_disable(void *arg, struct ctl_id targ_id);
+static int ctlfe_lun_enable(void *arg, struct ctl_id targ_id,
+ int lun_id);
+static int ctlfe_lun_disable(void *arg, struct ctl_id targ_id,
+ int lun_id);
+static void ctlfe_dump_sim(struct cam_sim *sim);
+static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc);
+static void ctlfe_dma_timeout(void *arg);
+static void ctlfe_datamove_done(union ctl_io *io);
+static void ctlfe_dump(void);
+
+static struct periph_driver ctlfe_driver =
+{
+ ctlfeinit, "ctl",
+ TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0
+};
+PERIPHDRIVER_DECLARE(ctl, ctlfe_driver);
+
+extern struct ctl_softc *control_softc;
+
+int
+ctlfeinitialize(void)
+{
+ cam_status status;
+
+ STAILQ_INIT(&ctlfe_softc_list);
+
+ mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF);
+
+ xpt_lock_buses();
+ periphdriver_register(&ctlfe_driver);
+ xpt_unlock_buses();
+
+ status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED |
+ AC_CONTRACT, ctlfeasync, NULL, NULL);
+
+ if (status != CAM_REQ_CMP) {
+ printf("ctl: Failed to attach async callback due to CAM "
+ "status 0x%x!\n", status);
+ }
+
+ return (0);
+}
+
+void
+ctlfeshutdown(void)
+{
+ return;
+}
+
+void
+ctlfeinit(void)
+{
+ cam_status status;
+
+ STAILQ_INIT(&ctlfe_softc_list);
+
+ mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF);
+
+ KASSERT(control_softc != NULL, ("CTL is not initialized!"));
+
+ status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED |
+ AC_CONTRACT, ctlfeasync, NULL, NULL);
+
+ if (status != CAM_REQ_CMP) {
+ printf("ctl: Failed to attach async callback due to CAM "
+ "status 0x%x!\n", status);
+ }
+}
+
+static void
+ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
+{
+
+#ifdef CTLFEDEBUG
+ printf("%s: entered\n", __func__);
+#endif
+
+ /*
+ * When a new path gets registered, and it is capable of target
+ * mode, go ahead and attach. Later on, we may need to be more
+ * selective, but for now this will be sufficient.
+ */
+ switch (code) {
+ case AC_PATH_REGISTERED: {
+ struct ctl_frontend *fe;
+ struct ctlfe_softc *bus_softc;
+ struct ctlfe_lun_softc *lun_softc;
+ struct cam_path *path;
+ struct ccb_pathinq *cpi;
+ cam_status status;
+ int retval;
+
+ cpi = (struct ccb_pathinq *)arg;
+
+ /* Don't attach if it doesn't support target mode */
+ if ((cpi->target_sprt & PIT_PROCESSOR) == 0) {
+ printf("%s: SIM %s%d doesn't support target mode\n",
+ __func__, cpi->dev_name, cpi->unit_number);
+ break;
+ }
+
+#ifdef CTLFE_INIT_ENABLE
+ if (ctlfe_num_targets >= ctlfe_max_targets) {
+ union ccb *ccb;
+ struct cam_sim *sim;
+
+ ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP,
+ M_NOWAIT | M_ZERO);
+ if (ccb == NULL) {
+ printf("%s: unable to malloc CCB!\n", __func__);
+ xpt_free_path(path);
+ return;
+ }
+ xpt_setup_ccb(&ccb->ccb_h, cpi->ccb_h.path,
+ /*priority*/ 1);
+
+ sim = xpt_path_sim(cpi->ccb_h.path);
+
+ ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
+ ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
+ ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR;
+
+ /* We should hold the SIM lock here */
+ mtx_assert(sim->mtx, MA_OWNED);
+
+ xpt_action(ccb);
+
+ if ((ccb->ccb_h.status & CAM_STATUS_MASK) !=
+ CAM_REQ_CMP) {
+ printf("%s: SIM %s%d (path id %d) initiator "
+ "enable failed with status %#x\n",
+ __func__, cpi->dev_name,
+ cpi->unit_number, cpi->ccb_h.path_id,
+ ccb->ccb_h.status);
+ } else {
+ printf("%s: SIM %s%d (path id %d) initiator "
+ "enable succeeded\n",
+ __func__, cpi->dev_name,
+ cpi->unit_number, cpi->ccb_h.path_id);
+ }
+
+ free(ccb, M_TEMP);
+
+ break;
+ } else {
+ ctlfe_num_targets++;
+ }
+
+ printf("%s: ctlfe_num_targets = %d\n", __func__,
+ ctlfe_num_targets);
+#endif /* CTLFE_INIT_ENABLE */
+
+ /*
+ * We're in an interrupt context here, so we have to
+ * use M_NOWAIT. Of course this means trouble if we
+ * can't allocate memory.
+ */
+ bus_softc = malloc(sizeof(*bus_softc), M_CTLFE,
+ M_NOWAIT | M_ZERO);
+ if (bus_softc == NULL) {
+ printf("%s: unable to malloc %zd bytes for softc\n",
+ __func__, sizeof(*bus_softc));
+ return;
+ }
+
+ bus_softc->path_id = cpi->ccb_h.path_id;
+ bus_softc->sim = xpt_path_sim(cpi->ccb_h.path);
+ STAILQ_INIT(&bus_softc->lun_softc_list);
+
+ fe = &bus_softc->fe;
+
+ /*
+ * XXX KDM should we be more accurate here ?
+ */
+ if (cpi->transport == XPORT_FC)
+ fe->port_type = CTL_PORT_FC;
+ else
+ fe->port_type = CTL_PORT_SCSI;
+
+ /* XXX KDM what should the real number be here? */
+ fe->num_requested_ctl_io = 4096;
+ snprintf(bus_softc->port_name, sizeof(bus_softc->port_name),
+ "%s%d", cpi->dev_name, cpi->unit_number);
+ /*
+ * XXX KDM it would be nice to allocate storage in the
+ * frontend structure itself.
+ */
+ fe->port_name = bus_softc->port_name;
+ fe->physical_port = cpi->unit_number;
+ fe->virtual_port = cpi->bus_id;
+ fe->port_online = ctlfe_online;
+ fe->port_offline = ctlfe_offline;
+ fe->onoff_arg = bus_softc;
+ fe->targ_enable = ctlfe_targ_enable;
+ fe->targ_disable = ctlfe_targ_disable;
+ fe->lun_enable = ctlfe_lun_enable;
+ fe->lun_disable = ctlfe_lun_disable;
+ fe->targ_lun_arg = bus_softc;
+ fe->fe_datamove = ctlfe_datamove_done;
+ fe->fe_done = ctlfe_datamove_done;
+ fe->fe_dump = ctlfe_dump;
+ /*
+ * XXX KDM the path inquiry doesn't give us the maximum
+ * number of targets supported.
+ */
+ fe->max_targets = cpi->max_target;
+ fe->max_target_id = cpi->max_target;
+
+ /*
+ * XXX KDM need to figure out whether we're the master or
+ * slave.
+ */
+ printf("%s: calling ctl_frontend_register() for %s%d\n",
+ __func__, cpi->dev_name, cpi->unit_number);
+ retval = ctl_frontend_register(fe, /*master_SC*/ 1);
+ if (retval != 0) {
+ printf("%s: ctl_frontend_register() failed with "
+ "error %d!\n", __func__, retval);
+ free(bus_softc, M_CTLFE);
+ break;
+ } else {
+ mtx_lock(&ctlfe_list_mtx);
+ STAILQ_INSERT_TAIL(&ctlfe_softc_list, bus_softc, links);
+ mtx_unlock(&ctlfe_list_mtx);
+ }
+
+ status = xpt_create_path(&path, /*periph*/ NULL,
+ bus_softc->path_id,CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD);
+ if (status != CAM_REQ_CMP) {
+ printf("%s: unable to create path for wildcard "
+ "periph\n", __func__);
+ break;
+ }
+ lun_softc = malloc(sizeof(*lun_softc), M_CTLFE,
+ M_NOWAIT | M_ZERO);
+ if (lun_softc == NULL) {
+ xpt_print(path, "%s: unable to allocate softc for "
+ "wildcard periph\n", __func__);
+ xpt_free_path(path);
+ break;
+ }
+
+ lun_softc->parent_softc = bus_softc;
+ lun_softc->flags |= CTLFE_LUN_WILDCARD;
+
+ status = cam_periph_alloc(ctlferegister,
+ ctlfeoninvalidate,
+ ctlfecleanup,
+ ctlfestart,
+ "ctl",
+ CAM_PERIPH_BIO,
+ path,
+ ctlfeasync,
+ 0,
+ lun_softc);
+
+ xpt_free_path(path);
+
+ break;
+ }
+ case AC_PATH_DEREGISTERED:
+ /* ctl_frontend_deregister() */
+ break;
+ case AC_CONTRACT: {
+ struct ac_contract *ac;
+
+ ac = (struct ac_contract *)arg;
+
+ switch (ac->contract_number) {
+ case AC_CONTRACT_DEV_CHG: {
+ struct ac_device_changed *dev_chg;
+ struct ctlfe_softc *softc;
+ int retval, found;
+
+ dev_chg = (struct ac_device_changed *)ac->contract_data;
+
+ printf("%s: WWPN %#jx port %u path %u target %u %s\n",
+ __func__, dev_chg->wwpn, dev_chg->port,
+ xpt_path_path_id(path), dev_chg->target,
+ (dev_chg->arrived == 0) ? "left" : "arrived");
+
+ found = 0;
+
+ mtx_lock(&ctlfe_list_mtx);
+ STAILQ_FOREACH(softc, &ctlfe_softc_list, links) {
+ if (softc->path_id == xpt_path_path_id(path)) {
+ found = 1;
+ break;
+ }
+ }
+ mtx_unlock(&ctlfe_list_mtx);
+
+ if (found == 0) {
+ printf("%s: CTL port for CAM path %u not "
+ "found!\n", __func__,
+ xpt_path_path_id(path));
+ break;
+ }
+ if (dev_chg->arrived != 0) {
+ retval = ctl_add_initiator(dev_chg->wwpn,
+ softc->fe.targ_port, dev_chg->target);
+ } else {
+ retval = ctl_remove_initiator(
+ softc->fe.targ_port, dev_chg->target);
+ }
+
+ if (retval != 0) {
+ printf("%s: could not %s port %d iid %u "
+ "WWPN %#jx!\n", __func__,
+ (dev_chg->arrived != 0) ? "add" :
+ "remove", softc->fe.targ_port,
+ dev_chg->target,
+ (uintmax_t)dev_chg->wwpn);
+ }
+ break;
+ }
+ default:
+ printf("%s: unsupported contract number %ju\n",
+ __func__, (uintmax_t)ac->contract_number);
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static cam_status
+ctlferegister(struct cam_periph *periph, void *arg)
+{
+ struct ctlfe_softc *bus_softc;
+ struct ctlfe_lun_softc *softc;
+ struct cam_sim *sim;
+ union ccb en_lun_ccb;
+ cam_status status;
+ int i;
+
+ softc = (struct ctlfe_lun_softc *)arg;
+ bus_softc = softc->parent_softc;
+ sim = xpt_path_sim(periph->path);
+
+ TAILQ_INIT(&softc->work_queue);
+ softc->periph = periph;
+ softc->parent_softc = bus_softc;
+
+ callout_init_mtx(&softc->dma_callout, sim->mtx, /*flags*/ 0);
+ periph->softc = softc;
+
+ xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, /*priority*/ 1);
+ en_lun_ccb.ccb_h.func_code = XPT_EN_LUN;
+ en_lun_ccb.cel.grp6_len = 0;
+ en_lun_ccb.cel.grp7_len = 0;
+ en_lun_ccb.cel.enable = 1;
+ xpt_action(&en_lun_ccb);
+ status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK);
+ if (status != CAM_REQ_CMP) {
+ xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n",
+ __func__, en_lun_ccb.ccb_h.status);
+ return (status);
+ }
+
+ status = CAM_REQ_CMP;
+
+ for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) {
+ union ccb *new_ccb;
+
+ new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
+ M_NOWAIT);
+ if (new_ccb == NULL) {
+ status = CAM_RESRC_UNAVAIL;
+ break;
+ }
+ xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
+ new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
+ new_ccb->ccb_h.cbfcnp = ctlfedone;
+ xpt_action(new_ccb);
+ softc->atios_sent++;
+ status = new_ccb->ccb_h.status;
+ if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
+ free(new_ccb, M_CTLFE);
+ break;
+ }
+ }
+
+ status = cam_periph_acquire(periph);
+ if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ xpt_print(periph->path, "%s: could not acquire reference "
+ "count, status = %#x\n", __func__, status);
+ return (status);
+ }
+
+ if (i == 0) {
+ xpt_print(periph->path, "%s: could not allocate ATIO CCBs, "
+ "status 0x%x\n", __func__, status);
+ return (CAM_REQ_CMP_ERR);
+ }
+
+ for (i = 0; i < CTLFE_IN_PER_LUN; i++) {
+ union ccb *new_ccb;
+
+ new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
+ M_NOWAIT);
+ if (new_ccb == NULL) {
+ status = CAM_RESRC_UNAVAIL;
+ break;
+ }
+
+ xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
+ new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
+ new_ccb->ccb_h.cbfcnp = ctlfedone;
+ xpt_action(new_ccb);
+ softc->inots_sent++;
+ status = new_ccb->ccb_h.status;
+ if (status != CAM_REQ_INPROG) {
+ free(new_ccb, M_CTLFE);
+ break;
+ }
+ }
+ if (i == 0) {
+ xpt_print(periph->path, "%s: could not allocate immediate "
+ "notify CCBs, status 0x%x\n", __func__, status);
+ return (CAM_REQ_CMP_ERR);
+ }
+ return (CAM_REQ_CMP);
+}
+
+static void
+ctlfeoninvalidate(struct cam_periph *periph)
+{
+ union ccb en_lun_ccb;
+ cam_status status;
+ struct ctlfe_lun_softc *softc;
+
+ softc = (struct ctlfe_lun_softc *)periph->softc;
+
+ xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, /*priority*/ 1);
+ en_lun_ccb.ccb_h.func_code = XPT_EN_LUN;
+ en_lun_ccb.cel.grp6_len = 0;
+ en_lun_ccb.cel.grp7_len = 0;
+ en_lun_ccb.cel.enable = 0;
+ xpt_action(&en_lun_ccb);
+ status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK);
+ if (status != CAM_REQ_CMP) {
+ xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n",
+ __func__, en_lun_ccb.ccb_h.status);
+ /*
+ * XXX KDM what do we do now?
+ */
+ }
+ xpt_print(periph->path, "LUN removed, %ju ATIOs outstanding, %ju "
+ "INOTs outstanding, %d refs\n", softc->atios_sent -
+ softc->atios_returned, softc->inots_sent -
+ softc->inots_returned, periph->refcount);
+}
+
+static void
+ctlfecleanup(struct cam_periph *periph)
+{
+ struct ctlfe_lun_softc *softc;
+ struct ctlfe_softc *bus_softc;
+
+ xpt_print(periph->path, "%s: Called\n", __func__);
+
+ softc = (struct ctlfe_lun_softc *)periph->softc;
+ bus_softc = softc->parent_softc;
+
+ STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc,links);
+
+ /*
+ * XXX KDM is there anything else that needs to be done here?
+ */
+ free(softc, M_CTLFE);
+}
+
+static void
+ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
+{
+ struct ctlfe_lun_softc *softc;
+ struct ccb_hdr *ccb_h;
+
+ softc = (struct ctlfe_lun_softc *)periph->softc;
+
+ softc->ccbs_alloced++;
+
+ ccb_h = TAILQ_FIRST(&softc->work_queue);
+ if (periph->immediate_priority <= periph->pinfo.priority) {
+ panic("shouldn't get to the CCB waiting case!");
+ start_ccb->ccb_h.ccb_type = CTLFE_CCB_WAITING;
+ SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
+ periph_links.sle);
+ periph->immediate_priority = CAM_PRIORITY_NONE;
+ wakeup(&periph->ccb_list);
+ } else if (ccb_h == NULL) {
+ softc->ccbs_freed++;
+ xpt_release_ccb(start_ccb);
+ } else {
+ struct ccb_accept_tio *atio;
+ struct ccb_scsiio *csio;
+ uint8_t *data_ptr;
+ uint32_t dxfer_len;
+ ccb_flags flags;
+ union ctl_io *io;
+ uint8_t scsi_status;
+
+ /* Take the ATIO off the work queue */
+ TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe);
+ atio = (struct ccb_accept_tio *)ccb_h;
+ io = (union ctl_io *)ccb_h->io_ptr;
+ csio = &start_ccb->csio;
+
+ flags = atio->ccb_h.flags &
+ (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
+
+ if ((io == NULL)
+ || (io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) {
+ /*
+ * We're done, send status back.
+ */
+ flags |= CAM_SEND_STATUS;
+ if (io == NULL) {
+ scsi_status = SCSI_STATUS_BUSY;
+ csio->sense_len = 0;
+ } else if ((io->io_hdr.status & CTL_STATUS_MASK) ==
+ CTL_CMD_ABORTED) {
+ io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED;
+
+ /*
+ * If this command was aborted, we don't
+ * need to send status back to the SIM.
+ * Just free the CTIO and ctl_io, and
+ * recycle the ATIO back to the SIM.
+ */
+ xpt_print(periph->path, "%s: aborted "
+ "command 0x%04x discarded\n",
+ __func__, io->scsiio.tag_num);
+ ctl_free_io(io);
+ /*
+ * For a wildcard attachment, commands can
+ * come in with a specific target/lun. Reset
+ * the target and LUN fields back to the
+ * wildcard values before we send them back
+ * down to the SIM. The SIM has a wildcard
+ * LUN enabled, not whatever target/lun
+ * these happened to be.
+ */
+ if (softc->flags & CTLFE_LUN_WILDCARD) {
+ atio->ccb_h.target_id =
+ CAM_TARGET_WILDCARD;
+ atio->ccb_h.target_lun =
+ CAM_LUN_WILDCARD;
+ }
+
+ if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ cam_release_devq(periph->path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ atio->ccb_h.status &= ~CAM_DEV_QFRZN;
+ }
+
+ ccb_h = TAILQ_FIRST(&softc->work_queue);
+
+ if (atio->ccb_h.func_code !=
+ XPT_ACCEPT_TARGET_IO) {
+ xpt_print(periph->path, "%s: func_code "
+ "is %#x\n", __func__,
+ atio->ccb_h.func_code);
+ }
+ start_ccb->ccb_h.func_code = XPT_ABORT;
+ start_ccb->cab.abort_ccb = (union ccb *)atio;
+ start_ccb->ccb_h.cbfcnp = ctlfedone;
+
+ /* Tell the SIM that we've aborted this ATIO */
+ xpt_action(start_ccb);
+ softc->ccbs_freed++;
+ xpt_release_ccb(start_ccb);
+
+ /*
+ * Send the ATIO back down to the SIM.
+ */
+ xpt_action((union ccb *)atio);
+ softc->atios_sent++;
+
+ /*
+ * If we still have work to do, ask for
+ * another CCB. Otherwise, deactivate our
+ * callout.
+ */
+ if (ccb_h != NULL)
+ xpt_schedule(periph, /*priority*/ 1);
+ else
+ callout_stop(&softc->dma_callout);
+
+ return;
+ } else {
+ io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED;
+ scsi_status = io->scsiio.scsi_status;
+ csio->sense_len = io->scsiio.sense_len;
+ }
+ data_ptr = NULL;
+ dxfer_len = 0;
+ if (io == NULL) {
+ printf("%s: tag %04x io is NULL\n", __func__,
+ atio->tag_id);
+ } else {
+#ifdef CTLFEDEBUG
+ printf("%s: tag %04x status %x\n", __func__,
+ atio->tag_id, io->io_hdr.status);
+#endif
+ }
+ csio->sglist_cnt = 0;
+ if (csio->sense_len != 0) {
+ csio->sense_data = io->scsiio.sense_data;
+ flags |= CAM_SEND_SENSE;
+ } else if (scsi_status == SCSI_STATUS_CHECK_COND) {
+ xpt_print(periph->path, "%s: check condition "
+ "with no sense\n", __func__);
+ }
+ } else {
+ struct ctlfe_lun_cmd_info *cmd_info;
+
+ /*
+ * Datamove call, we need to setup the S/G list.
+ * If we pass in a S/G list, the isp(4) driver at
+ * least expects physical/bus addresses.
+ */
+
+ cmd_info = (struct ctlfe_lun_cmd_info *)
+ io->io_hdr.port_priv;
+
+ KASSERT(sizeof(*cmd_info) < CTL_PORT_PRIV_SIZE,
+ ("%s: sizeof(struct ctlfe_lun_cmd_info) %zd < "
+ "CTL_PORT_PRIV_SIZE %d", __func__,
+ sizeof(*cmd_info), CTL_PORT_PRIV_SIZE));
+ io->io_hdr.flags &= ~CTL_FLAG_DMA_QUEUED;
+
+ /*
+ * Need to zero this, in case it has been used for
+ * a previous datamove for this particular I/O.
+ */
+ bzero(cmd_info, sizeof(*cmd_info));
+ scsi_status = 0;
+
+ /*
+ * Set the direction, relative to the initiator.
+ */
+ flags &= ~CAM_DIR_MASK;
+ if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+ CTL_FLAG_DATA_IN)
+ flags |= CAM_DIR_IN;
+ else
+ flags |= CAM_DIR_OUT;
+
+ csio->cdb_len = atio->cdb_len;
+
+ if (io->scsiio.kern_sg_entries == 0) {
+ /* No S/G list */
+ data_ptr = io->scsiio.kern_data_ptr;
+ dxfer_len = io->scsiio.kern_data_len;
+ csio->sglist_cnt = 0;
+
+ if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
+ flags |= CAM_DATA_PHYS;
+ } else if (io->scsiio.kern_sg_entries <=
+ (sizeof(cmd_info->cam_sglist)/
+ sizeof(cmd_info->cam_sglist[0]))) {
+ /*
+ * S/G list with physical or virtual pointers.
+ * Just populate the CAM S/G list with the
+ * pointers.
+ */
+ int i;
+ struct ctl_sg_entry *ctl_sglist;
+ bus_dma_segment_t *cam_sglist;
+
+ ctl_sglist = (struct ctl_sg_entry *)
+ io->scsiio.kern_data_ptr;
+ cam_sglist = cmd_info->cam_sglist;
+
+ for (i = 0; i < io->scsiio.kern_sg_entries;i++){
+ cam_sglist[i].ds_addr =
+ (bus_addr_t)ctl_sglist[i].addr;
+ cam_sglist[i].ds_len =
+ ctl_sglist[i].len;
+ }
+ csio->sglist_cnt = io->scsiio.kern_sg_entries;
+ flags |= CAM_SCATTER_VALID;
+ if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
+ flags |= CAM_SG_LIST_PHYS;
+ else
+ flags &= ~CAM_SG_LIST_PHYS;
+ data_ptr = (uint8_t *)cam_sglist;
+ dxfer_len = io->scsiio.kern_data_len;
+ } else {
+ /* S/G list with virtual pointers */
+ struct ctl_sg_entry *sglist;
+ int *ti;
+
+ /*
+ * XXX KDM this is a temporary hack. The
+ * isp(4) driver can't deal with S/G lists
+ * with virtual pointers, so we need to
+ * go through and send down one virtual
+ * pointer at a time.
+ */
+ sglist = (struct ctl_sg_entry *)
+ io->scsiio.kern_data_ptr;
+ ti = &cmd_info->cur_transfer_index;
+ data_ptr = sglist[*ti].addr;
+ dxfer_len = sglist[*ti].len;
+ csio->sglist_cnt = 0;
+ cmd_info->flags |= CTLFE_CMD_PIECEWISE;
+ (*ti)++;
+ }
+
+ io->scsiio.ext_data_filled += dxfer_len;
+
+ if (io->scsiio.ext_data_filled >
+ io->scsiio.kern_total_len) {
+ xpt_print(periph->path, "%s: tag 0x%04x "
+ "fill len %u > total %u\n",
+ __func__, io->scsiio.tag_num,
+ io->scsiio.ext_data_filled,
+ io->scsiio.kern_total_len);
+ }
+ }
+
+#ifdef CTLFEDEBUG
+ printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__,
+ (flags & CAM_SEND_STATUS) ? "done" : "datamove",
+ atio->tag_id, flags, data_ptr, dxfer_len);
+#endif
+
+ /*
+ * Valid combinations:
+ * - CAM_SEND_STATUS, SCATTER_VALID = 0, dxfer_len = 0,
+ * sglist_cnt = 0
+ * - CAM_SEND_STATUS = 0, SCATTER_VALID = 0, dxfer_len != 0,
+ * sglist_cnt = 0
+ * - CAM_SEND_STATUS = 0, SCATTER_VALID, dxfer_len != 0,
+ * sglist_cnt != 0
+ */
+#ifdef CTLFEDEBUG
+ if (((flags & CAM_SEND_STATUS)
+ && (((flags & CAM_SCATTER_VALID) != 0)
+ || (dxfer_len != 0)
+ || (csio->sglist_cnt != 0)))
+ || (((flags & CAM_SEND_STATUS) == 0)
+ && (dxfer_len == 0))
+ || ((flags & CAM_SCATTER_VALID)
+ && (csio->sglist_cnt == 0))
+ || (((flags & CAM_SCATTER_VALID) == 0)
+ && (csio->sglist_cnt != 0))) {
+ printf("%s: tag %04x cdb %02x flags %#x dxfer_len "
+ "%d sg %u\n", __func__, atio->tag_id,
+ atio->cdb_io.cdb_bytes[0], flags, dxfer_len,
+ csio->sglist_cnt);
+ if (io != NULL) {
+ printf("%s: tag %04x io status %#x\n", __func__,
+ atio->tag_id, io->io_hdr.status);
+ } else {
+ printf("%s: tag %04x no associated io\n",
+ __func__, atio->tag_id);
+ }
+ }
+#endif
+ cam_fill_ctio(csio,
+ /*retries*/ 2,
+ ctlfedone,
+ flags,
+ (flags & CAM_TAG_ACTION_VALID) ?
+ MSG_SIMPLE_Q_TAG : 0,
+ atio->tag_id,
+ atio->init_id,
+ scsi_status,
+ /*data_ptr*/ data_ptr,
+ /*dxfer_len*/ dxfer_len,
+ /*timeout*/ 5 * 1000);
+ start_ccb->ccb_h.ccb_atio = atio;
+ if (((flags & CAM_SEND_STATUS) == 0)
+ && (io != NULL))
+ io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
+
+ softc->ctios_sent++;
+
+ xpt_action(start_ccb);
+
+ if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ cam_release_devq(periph->path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ atio->ccb_h.status &= ~CAM_DEV_QFRZN;
+ }
+
+ ccb_h = TAILQ_FIRST(&softc->work_queue);
+ }
+ /*
+ * If we still have work to do, ask for another CCB. Otherwise,
+ * deactivate our callout.
+ */
+ if (ccb_h != NULL)
+ xpt_schedule(periph, /*priority*/ 1);
+ else
+ callout_stop(&softc->dma_callout);
+}
+
+static void
+ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb)
+{
+ struct ctlfe_lun_softc *softc;
+
+ softc = (struct ctlfe_lun_softc *)periph->softc;
+
+ switch (ccb->ccb_h.func_code) {
+ case XPT_ACCEPT_TARGET_IO:
+ softc->atios_returned++;
+ break;
+ case XPT_IMMEDIATE_NOTIFY:
+ case XPT_NOTIFY_ACKNOWLEDGE:
+ softc->inots_returned++;
+ break;
+ default:
+ break;
+ }
+
+ free(ccb, M_CTLFE);
+
+ KASSERT(softc->atios_returned <= softc->atios_sent, ("%s: "
+ "atios_returned %ju > atios_sent %ju", __func__,
+ softc->atios_returned, softc->atios_sent));
+ KASSERT(softc->inots_returned <= softc->inots_sent, ("%s: "
+ "inots_returned %ju > inots_sent %ju", __func__,
+ softc->inots_returned, softc->inots_sent));
+
+ /*
+ * If we have received all of our CCBs, we can release our
+ * reference on the peripheral driver. It will probably go away
+ * now.
+ */
+ if ((softc->atios_returned == softc->atios_sent)
+ && (softc->inots_returned == softc->inots_sent)) {
+ cam_periph_release_locked(periph);
+ }
+}
+
+static void
+ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
+{
+ struct ctlfe_lun_softc *softc;
+ struct ctlfe_softc *bus_softc;
+
+#ifdef CTLFE_DEBUG
+ printf("%s: entered, func_code = %#x, type = %#lx\n", __func__,
+ done_ccb->ccb_h.func_code, done_ccb->ccb_h.ccb_type);
+#endif
+
+ softc = (struct ctlfe_lun_softc *)periph->softc;
+ bus_softc = softc->parent_softc;
+
+ if (done_ccb->ccb_h.ccb_type == CTLFE_CCB_WAITING) {
+ panic("shouldn't get to the CCB waiting case!");
+ wakeup(&done_ccb->ccb_h.cbfcnp);
+ return;
+ }
+
+ /*
+ * If the peripheral is invalid, ATIOs and immediate notify CCBs
+ * need to be freed. Most of the ATIOs and INOTs that come back
+ * will be CCBs that are being returned from the SIM as a result of
+ * our disabling the LUN.
+ *
+ * Other CCB types are handled in their respective cases below.
+ */
+ if (periph->flags & CAM_PERIPH_INVALID) {
+ switch (done_ccb->ccb_h.func_code) {
+ case XPT_ACCEPT_TARGET_IO:
+ case XPT_IMMEDIATE_NOTIFY:
+ case XPT_NOTIFY_ACKNOWLEDGE:
+ ctlfe_free_ccb(periph, done_ccb);
+ return;
+ default:
+ break;
+ }
+
+ }
+ switch (done_ccb->ccb_h.func_code) {
+ case XPT_ACCEPT_TARGET_IO: {
+ union ctl_io *io;
+ struct ccb_accept_tio *atio;
+
+ atio = &done_ccb->atio;
+
+ softc->atios_returned++;
+
+ /*
+ * Allocate a ctl_io, pass it to CTL, and wait for the
+ * datamove or done.
+ */
+ io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref);
+ if (io == NULL) {
+ atio->ccb_h.flags &= ~CAM_DIR_MASK;
+ atio->ccb_h.flags |= CAM_DIR_NONE;
+
+ printf("%s: ctl_alloc_io failed!\n", __func__);
+
+ /*
+ * XXX KDM need to set SCSI_STATUS_BUSY, but there
+ * is no field in the ATIO structure to do that,
+ * and we aren't able to allocate a ctl_io here.
+ * What to do?
+ */
+ atio->sense_len = 0;
+ done_ccb->ccb_h.io_ptr = NULL;
+ TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
+ periph_links.tqe);
+ xpt_schedule(periph, /*priority*/ 1);
+ break;
+ }
+ ctl_zero_io(io);
+
+ /* Save pointers on both sides */
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb;
+ done_ccb->ccb_h.io_ptr = io;
+
+ /*
+ * Only SCSI I/O comes down this path, resets, etc. come
+ * down the immediate notify path below.
+ */
+ io->io_hdr.io_type = CTL_IO_SCSI;
+ io->io_hdr.nexus.initid.id = atio->init_id;
+ io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port;
+ io->io_hdr.nexus.targ_target.id = atio->ccb_h.target_id;
+ io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun;
+ io->scsiio.tag_num = atio->tag_id;
+ switch (atio->tag_action) {
+ case CAM_TAG_ACTION_NONE:
+ io->scsiio.tag_type = CTL_TAG_UNTAGGED;
+ break;
+ case MSG_SIMPLE_TASK:
+ io->scsiio.tag_type = CTL_TAG_SIMPLE;
+ break;
+ case MSG_HEAD_OF_QUEUE_TASK:
+ io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
+ break;
+ case MSG_ORDERED_TASK:
+ io->scsiio.tag_type = CTL_TAG_ORDERED;
+ break;
+ case MSG_ACA_TASK:
+ io->scsiio.tag_type = CTL_TAG_ACA;
+ break;
+ default:
+ io->scsiio.tag_type = CTL_TAG_UNTAGGED;
+ printf("%s: unhandled tag type %#x!!\n", __func__,
+ atio->tag_action);
+ break;
+ }
+ if (atio->cdb_len > sizeof(io->scsiio.cdb)) {
+ printf("%s: WARNING: CDB len %d > ctl_io space %zd\n",
+ __func__, atio->cdb_len, sizeof(io->scsiio.cdb));
+ }
+ io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb));
+ bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb,
+ io->scsiio.cdb_len);
+
+#ifdef CTLFEDEBUG
+ printf("%s: %ju:%d:%ju:%d: tag %04x CDB %02x\n", __func__,
+ (uintmax_t)io->io_hdr.nexus.initid.id,
+ io->io_hdr.nexus.targ_port,
+ (uintmax_t)io->io_hdr.nexus.targ_target.id,
+ io->io_hdr.nexus.targ_lun,
+ io->scsiio.tag_num, io->scsiio.cdb[0]);
+#endif
+
+ ctl_queue(io);
+ break;
+ }
+ case XPT_CONT_TARGET_IO: {
+ struct ccb_accept_tio *atio;
+ union ctl_io *io;
+
+ atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio;
+ io = (union ctl_io *)atio->ccb_h.io_ptr;
+
+ softc->ctios_returned++;
+#ifdef CTLFEDEBUG
+ printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n",
+ __func__, atio->tag_id, done_ccb->ccb_h.flags);
+#endif
+ /*
+ * If we were sending status back to the initiator, free up
+ * resources. If we were doing a datamove, call the
+ * datamove done routine.
+ */
+ if (done_ccb->ccb_h.flags & CAM_SEND_STATUS) {
+ softc->ccbs_freed++;
+ xpt_release_ccb(done_ccb);
+ ctl_free_io(io);
+ /*
+ * For a wildcard attachment, commands can come in
+ * with a specific target/lun. Reset the target
+ * and LUN fields back to the wildcard values before
+ * we send them back down to the SIM. The SIM has
+ * a wildcard LUN enabled, not whatever target/lun
+ * these happened to be.
+ */
+ if (softc->flags & CTLFE_LUN_WILDCARD) {
+ atio->ccb_h.target_id = CAM_TARGET_WILDCARD;
+ atio->ccb_h.target_lun = CAM_LUN_WILDCARD;
+ }
+ if (periph->flags & CAM_PERIPH_INVALID) {
+ ctlfe_free_ccb(periph, (union ccb *)atio);
+ return;
+ } else {
+ xpt_action((union ccb *)atio);
+ softc->atios_sent++;
+ }
+ } else {
+ struct ctlfe_lun_cmd_info *cmd_info;
+ struct ccb_scsiio *csio;
+
+ csio = &done_ccb->csio;
+ cmd_info = (struct ctlfe_lun_cmd_info *)
+ io->io_hdr.port_priv;
+
+ io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
+
+ io->scsiio.ext_data_len += csio->dxfer_len;
+ if (io->scsiio.ext_data_len >
+ io->scsiio.kern_total_len) {
+ xpt_print(periph->path, "%s: tag 0x%04x "
+ "done len %u > total %u sent %u\n",
+ __func__, io->scsiio.tag_num,
+ io->scsiio.ext_data_len,
+ io->scsiio.kern_total_len,
+ io->scsiio.ext_data_filled);
+ }
+ /*
+ * Translate CAM status to CTL status. Success
+ * does not change the overall, ctl_io status. In
+ * that case we just set port_status to 0. If we
+ * have a failure, though, set a data phase error
+ * for the overall ctl_io.
+ */
+ switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) {
+ case CAM_REQ_CMP:
+ io->io_hdr.port_status = 0;
+ break;
+ default:
+ /*
+ * XXX KDM the isp(4) driver doesn't really
+ * seem to send errors back for data
+ * transfers that I can tell. There is one
+ * case where it'll send CAM_REQ_CMP_ERR,
+ * but probably not that many more cases.
+ * So set a generic data phase error here,
+ * like the SXP driver sets.
+ */
+ io->io_hdr.port_status = 0xbad1;
+ ctl_set_data_phase_error(&io->scsiio);
+ /*
+ * XXX KDM figure out residual.
+ */
+ break;
+ }
+ /*
+ * If we had to break this S/G list into multiple
+ * pieces, figure out where we are in the list, and
+ * continue sending pieces if necessary.
+ */
+ if ((cmd_info->flags & CTLFE_CMD_PIECEWISE)
+ && (io->io_hdr.port_status == 0)
+ && (cmd_info->cur_transfer_index <
+ io->scsiio.kern_sg_entries)) {
+ struct ctl_sg_entry *sglist;
+ ccb_flags flags;
+ uint8_t scsi_status;
+ uint8_t *data_ptr;
+ uint32_t dxfer_len;
+ int *ti;
+
+ sglist = (struct ctl_sg_entry *)
+ io->scsiio.kern_data_ptr;
+ ti = &cmd_info->cur_transfer_index;
+ flags = atio->ccb_h.flags &
+ (CAM_DIS_DISCONNECT|
+ CAM_TAG_ACTION_VALID|
+ CAM_DIR_MASK);
+
+ /*
+ * Set the direction, relative to the initiator.
+ */
+ flags &= ~CAM_DIR_MASK;
+ if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+ CTL_FLAG_DATA_IN)
+ flags |= CAM_DIR_IN;
+ else
+ flags |= CAM_DIR_OUT;
+
+ data_ptr = sglist[*ti].addr;
+ dxfer_len = sglist[*ti].len;
+ (*ti)++;
+
+ scsi_status = 0;
+
+ if (((flags & CAM_SEND_STATUS) == 0)
+ && (dxfer_len == 0)) {
+ printf("%s: tag %04x no status or "
+ "len cdb = %02x\n", __func__,
+ atio->tag_id,
+ atio->cdb_io.cdb_bytes[0]);
+ printf("%s: tag %04x io status %#x\n",
+ __func__, atio->tag_id,
+ io->io_hdr.status);
+ }
+
+ cam_fill_ctio(csio,
+ /*retries*/ 2,
+ ctlfedone,
+ flags,
+ (flags & CAM_TAG_ACTION_VALID) ?
+ MSG_SIMPLE_Q_TAG : 0,
+ atio->tag_id,
+ atio->init_id,
+ scsi_status,
+ /*data_ptr*/ data_ptr,
+ /*dxfer_len*/ dxfer_len,
+ /*timeout*/ 5 * 1000);
+
+ csio->resid = 0;
+ csio->ccb_h.ccb_atio = atio;
+ io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
+ softc->ctios_sent++;
+ xpt_action((union ccb *)csio);
+ } else {
+ /*
+ * Release the CTIO. The ATIO will be sent back
+ * down to the SIM once we send status.
+ */
+ softc->ccbs_freed++;
+ xpt_release_ccb(done_ccb);
+
+ /* Call the backend move done callback */
+ io->scsiio.be_move_done(io);
+ }
+ }
+ break;
+ }
+ case XPT_IMMEDIATE_NOTIFY: {
+ union ctl_io *io;
+ struct ccb_immediate_notify *inot;
+ cam_status status;
+ int frozen;
+
+ inot = &done_ccb->cin1;
+
+ softc->inots_returned++;
+
+ frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
+
+ printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x "
+ "seq %#x\n", __func__, inot->ccb_h.status,
+ inot->tag_id, inot->seq_id);
+
+ io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref);
+ if (io != NULL) {
+ int send_ctl_io;
+
+ send_ctl_io = 1;
+
+ ctl_zero_io(io);
+ io->io_hdr.io_type = CTL_IO_TASK;
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb;
+ inot->ccb_h.io_ptr = io;
+ io->io_hdr.nexus.initid.id = inot->initiator_id;
+ io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port;
+ io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id;
+ io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun;
+ /* XXX KDM should this be the tag_id? */
+ io->taskio.tag_num = inot->seq_id;
+
+ status = inot->ccb_h.status & CAM_STATUS_MASK;
+ switch (status) {
+ case CAM_SCSI_BUS_RESET:
+ io->taskio.task_action = CTL_TASK_BUS_RESET;
+ break;
+ case CAM_BDR_SENT:
+ io->taskio.task_action = CTL_TASK_TARGET_RESET;
+ break;
+ case CAM_MESSAGE_RECV:
+ switch (inot->arg) {
+ case MSG_ABORT_TASK_SET:
+ /*
+ * XXX KDM this isn't currently
+ * supported by CTL. It ends up
+ * being a no-op.
+ */
+ io->taskio.task_action =
+ CTL_TASK_ABORT_TASK_SET;
+ break;
+ case MSG_TARGET_RESET:
+ io->taskio.task_action =
+ CTL_TASK_TARGET_RESET;
+ break;
+ case MSG_ABORT_TASK:
+ io->taskio.task_action =
+ CTL_TASK_ABORT_TASK;
+ break;
+ case MSG_LOGICAL_UNIT_RESET:
+ io->taskio.task_action =
+ CTL_TASK_LUN_RESET;
+ break;
+ case MSG_CLEAR_TASK_SET:
+ /*
+ * XXX KDM this isn't currently
+ * supported by CTL. It ends up
+ * being a no-op.
+ */
+ io->taskio.task_action =
+ CTL_TASK_CLEAR_TASK_SET;
+ break;
+ case MSG_CLEAR_ACA:
+ io->taskio.task_action =
+ CTL_TASK_CLEAR_ACA;
+ break;
+ case MSG_NOOP:
+ send_ctl_io = 0;
+ break;
+ default:
+ xpt_print(periph->path, "%s: "
+ "unsupported message 0x%x\n",
+ __func__, inot->arg);
+ send_ctl_io = 0;
+ break;
+ }
+ break;
+ case CAM_REQ_ABORTED:
+ /*
+ * This request was sent back by the driver.
+ * XXX KDM what do we do here?
+ */
+ send_ctl_io = 0;
+ break;
+ default:
+ xpt_print(periph->path, "%s: "
+ "unsupported CAM status 0x%x\n",
+ __func__, status);
+ send_ctl_io = 0;
+ break;
+ }
+ if (send_ctl_io != 0) {
+ ctl_queue(io);
+ } else {
+ ctl_free_io(io);
+ done_ccb->ccb_h.status = CAM_REQ_INPROG;
+ done_ccb->ccb_h.func_code =
+ XPT_NOTIFY_ACKNOWLEDGE;
+ xpt_action(done_ccb);
+ }
+ } else {
+ xpt_print(periph->path, "%s: could not allocate "
+ "ctl_io for immediate notify!\n", __func__);
+ /* requeue this to the adapter */
+ done_ccb->ccb_h.status = CAM_REQ_INPROG;
+ done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
+ xpt_action(done_ccb);
+ }
+
+ if (frozen != 0) {
+ cam_release_devq(periph->path,
+ /*relsim_flags*/ 0,
+ /*opening reduction*/ 0,
+ /*timeout*/ 0,
+ /*getcount_only*/ 0);
+ }
+ break;
+ }
+ case XPT_NOTIFY_ACKNOWLEDGE:
+ /*
+ * Queue this back down to the SIM as an immediate notify.
+ */
+ done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
+ xpt_action(done_ccb);
+ softc->inots_sent++;
+ break;
+ case XPT_ABORT:
+ /*
+ * XPT_ABORT is an immediate CCB, we shouldn't get here.
+ */
+ panic("%s: XPT_ABORT CCB returned!", __func__);
+ break;
+ case XPT_SET_SIM_KNOB:
+ case XPT_GET_SIM_KNOB:
+ break;
+ default:
+ panic("%s: unexpected CCB type %#x", __func__,
+ done_ccb->ccb_h.func_code);
+ break;
+ }
+}
+
+static void
+ctlfe_onoffline(void *arg, int online)
+{
+ struct ctlfe_softc *bus_softc;
+ union ccb *ccb;
+ cam_status status;
+ struct cam_path *path;
+ struct cam_sim *sim;
+ int set_wwnn;
+
+ bus_softc = (struct ctlfe_softc *)arg;
+
+ set_wwnn = 0;
+
+ status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id,
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
+ if (status != CAM_REQ_CMP) {
+ printf("%s: unable to create path!\n", __func__);
+ return;
+ }
+ ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, M_WAITOK | M_ZERO);
+ if (ccb == NULL) {
+ printf("%s: unable to malloc CCB!\n", __func__);
+ xpt_free_path(path);
+ return;
+ }
+ xpt_setup_ccb(&ccb->ccb_h, path, /*priority*/ 1);
+
+ sim = xpt_path_sim(path);
+
+ /*
+ * Copan WWN format:
+ *
+ * Bits 63-60: 0x5 NAA, IEEE registered name
+ * Bits 59-36: 0x000ED5 IEEE Company name assigned to Copan
+ * Bits 35-12: Copan SSN (Sequential Serial Number)
+ * Bits 11-8: Type of port:
+ * 1 == N-Port
+ * 2 == F-Port
+ * 3 == NL-Port
+ * Bits 7-0: 0 == Node Name, >0 == Port Number
+ */
+
+ if (online != 0) {
+
+ ccb->ccb_h.func_code = XPT_GET_SIM_KNOB;
+
+ CAM_SIM_LOCK(sim);
+
+ xpt_action(ccb);
+
+ CAM_SIM_UNLOCK(sim);
+
+ if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){
+#ifdef RANDOM_WWNN
+ uint64_t random_bits;
+#endif
+
+ printf("%s: %s current WWNN %#jx\n", __func__,
+ bus_softc->port_name,
+ ccb->knob.xport_specific.fc.wwnn);
+ printf("%s: %s current WWPN %#jx\n", __func__,
+ bus_softc->port_name,
+ ccb->knob.xport_specific.fc.wwpn);
+
+#ifdef RANDOM_WWNN
+ arc4rand(&random_bits, sizeof(random_bits), 0);
+#endif
+
+ /*
+ * XXX KDM this is a bit of a kludge for now. We
+ * take the current WWNN/WWPN from the card, and
+ * replace the company identifier and the NL-Port
+ * indicator and the port number (for the WWPN).
+ * This should be replaced later with ddb_GetWWNN,
+ * or possibly a more centralized scheme. (It
+ * would be nice to have the WWNN/WWPN for each
+ * port stored in the ctl_frontend structure.)
+ */
+#ifdef RANDOM_WWNN
+ ccb->knob.xport_specific.fc.wwnn =
+ (random_bits &
+ 0x0000000fffffff00ULL) |
+ /* Company ID */ 0x5000ED5000000000ULL |
+ /* NL-Port */ 0x0300;
+ ccb->knob.xport_specific.fc.wwpn =
+ (random_bits &
+ 0x0000000fffffff00ULL) |
+ /* Company ID */ 0x5000ED5000000000ULL |
+ /* NL-Port */ 0x3000 |
+ /* Port Num */ (bus_softc->fe.targ_port & 0xff);
+
+ /*
+ * This is a bit of an API break/reversal, but if
+ * we're doing the random WWNN that's a little
+ * different anyway. So record what we're actually
+ * using with the frontend code so it's reported
+ * accurately.
+ */
+ bus_softc->fe.wwnn =
+ ccb->knob.xport_specific.fc.wwnn;
+ bus_softc->fe.wwpn =
+ ccb->knob.xport_specific.fc.wwpn;
+ set_wwnn = 1;
+#else /* RANDOM_WWNN */
+ /*
+ * If the user has specified a WWNN/WWPN, send them
+ * down to the SIM. Otherwise, record what the SIM
+ * has reported.
+ */
+ if ((bus_softc->fe.wwnn != 0)
+ && (bus_softc->fe.wwpn != 0)) {
+ ccb->knob.xport_specific.fc.wwnn =
+ bus_softc->fe.wwnn;
+ ccb->knob.xport_specific.fc.wwpn =
+ bus_softc->fe.wwpn;
+ set_wwnn = 1;
+ } else {
+ bus_softc->fe.wwnn =
+ ccb->knob.xport_specific.fc.wwnn;
+ bus_softc->fe.wwpn =
+ ccb->knob.xport_specific.fc.wwpn;
+ }
+#endif /* RANDOM_WWNN */
+
+
+ if (set_wwnn != 0) {
+ printf("%s: %s new WWNN %#jx\n", __func__,
+ bus_softc->port_name,
+ ccb->knob.xport_specific.fc.wwnn);
+ printf("%s: %s new WWPN %#jx\n", __func__,
+ bus_softc->port_name,
+ ccb->knob.xport_specific.fc.wwpn);
+ }
+ } else {
+ printf("%s: %s has no valid WWNN/WWPN\n", __func__,
+ bus_softc->port_name);
+ }
+ }
+ ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
+ ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
+ if (set_wwnn != 0)
+ ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS;
+
+ if (online != 0)
+ ccb->knob.xport_specific.fc.role = KNOB_ROLE_TARGET;
+ else
+ ccb->knob.xport_specific.fc.role = KNOB_ROLE_NONE;
+
+
+ CAM_SIM_LOCK(sim);
+
+ xpt_action(ccb);
+
+ CAM_SIM_UNLOCK(sim);
+
+ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ printf("%s: SIM %s (path id %d) target %s failed with "
+ "status %#x\n",
+ __func__, bus_softc->port_name, bus_softc->path_id,
+ (online != 0) ? "enable" : "disable",
+ ccb->ccb_h.status);
+ } else {
+ printf("%s: SIM %s (path id %d) target %s succeeded\n",
+ __func__, bus_softc->port_name, bus_softc->path_id,
+ (online != 0) ? "enable" : "disable");
+ }
+
+ free(ccb, M_TEMP);
+ xpt_free_path(path);
+
+ return;
+}
+
+static void
+ctlfe_online(void *arg)
+{
+ ctlfe_onoffline(arg, /*online*/ 1);
+}
+
+static void
+ctlfe_offline(void *arg)
+{
+ ctlfe_onoffline(arg, /*online*/ 0);
+}
+
+static int
+ctlfe_targ_enable(void *arg, struct ctl_id targ_id)
+{
+ return (0);
+}
+
+static int
+ctlfe_targ_disable(void *arg, struct ctl_id targ_id)
+{
+ return (0);
+}
+
+/*
+ * This will get called to enable a LUN on every bus that is attached to
+ * CTL. So we only need to create a path/periph for this particular bus.
+ */
+static int
+ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id)
+{
+ struct ctlfe_softc *bus_softc;
+ struct ctlfe_lun_softc *softc;
+ struct cam_path *path;
+ struct cam_periph *periph;
+ struct cam_sim *sim;
+ cam_status status;
+
+
+ bus_softc = (struct ctlfe_softc *)arg;
+
+ status = xpt_create_path_unlocked(&path, /*periph*/ NULL,
+ bus_softc->path_id,
+ targ_id.id,
+ lun_id);
+ /* XXX KDM need some way to return status to CTL here? */
+ if (status != CAM_REQ_CMP) {
+ printf("%s: could not create path, status %#x\n", __func__,
+ status);
+ return (1);
+ }
+
+ softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO);
+ if (softc == NULL) {
+ printf("%s: could not allocate %zd bytes for softc\n",
+ __func__, sizeof(*softc));
+ xpt_free_path(path);
+ return (1);
+ }
+ sim = xpt_path_sim(path);
+ mtx_lock(sim->mtx);
+ periph = cam_periph_find(path, "ctl");
+ if (periph != NULL) {
+ /* We've already got a periph, no need to alloc a new one. */
+ xpt_free_path(path);
+ free(softc, M_CTLFE);
+ mtx_unlock(sim->mtx);
+ return (0);
+ }
+
+ softc->parent_softc = bus_softc;
+ STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links);
+
+ status = cam_periph_alloc(ctlferegister,
+ ctlfeoninvalidate,
+ ctlfecleanup,
+ ctlfestart,
+ "ctl",
+ CAM_PERIPH_BIO,
+ path,
+ ctlfeasync,
+ 0,
+ softc);
+
+ mtx_unlock(sim->mtx);
+
+ xpt_free_path(path);
+
+ return (0);
+}
+
+/*
+ * XXX KDM we disable LUN removal here. The problem is that the isp(4)
+ * driver doesn't currently handle LUN removal properly. We need to keep
+ * enough state here at the peripheral level even after LUNs have been
+ * removed inside CTL.
+ *
+ * Once the isp(4) driver is fixed, this can be re-enabled.
+ */
+static int
+ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id)
+{
+#ifdef NOTYET
+ struct ctlfe_softc *softc;
+ struct ctlfe_lun_softc *lun_softc;
+
+ softc = (struct ctlfe_softc *)arg;
+
+ mtx_lock(softc->sim->mtx);
+ STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) {
+ struct cam_path *path;
+
+ path = lun_softc->periph->path;
+
+ if ((xpt_path_target_id(path) == targ_id.id)
+ && (xpt_path_lun_id(path) == lun_id)) {
+ break;
+ }
+ }
+ if (lun_softc == NULL) {
+ mtx_unlock(softc->sim->mtx);
+ printf("%s: can't find target %d lun %d\n", __func__,
+ targ_id.id, lun_id);
+ return (1);
+ }
+
+ cam_periph_invalidate(lun_softc->periph);
+
+ mtx_unlock(softc->sim->mtx);
+#endif
+
+ return (0);
+}
+
+static void
+ctlfe_dump_sim(struct cam_sim *sim)
+{
+ int i;
+
+ printf("%s%d: max tagged openings: %d, max dev openings: %d\n",
+ sim->sim_name, sim->unit_number,
+ sim->max_tagged_dev_openings, sim->max_dev_openings);
+ printf("%s%d: max_ccbs: %u, ccb_count: %u\n",
+ sim->sim_name, sim->unit_number,
+ sim->max_ccbs, sim->ccb_count);
+ printf("%s%d: ccb_freeq is %sempty\n",
+ sim->sim_name, sim->unit_number,
+ (SLIST_FIRST(&sim->ccb_freeq) == NULL) ? "" : "NOT ");
+ printf("%s%d: alloc_queue.entries %d, alloc_openings %d\n",
+ sim->sim_name, sim->unit_number,
+ sim->devq->alloc_queue.entries, sim->devq->alloc_openings);
+ printf("%s%d: qfrozen_cnt:", sim->sim_name, sim->unit_number);
+ for (i = 0; i < CAM_RL_VALUES; i++) {
+ printf("%s%u", (i != 0) ? ":" : "",
+ sim->devq->alloc_queue.qfrozen_cnt[i]);
+ }
+ printf("\n");
+}
+
+/*
+ * Assumes that the SIM lock is held.
+ */
+static void
+ctlfe_dump_queue(struct ctlfe_lun_softc *softc)
+{
+ struct ccb_hdr *hdr;
+ struct cam_periph *periph;
+ int num_items;
+
+ periph = softc->periph;
+ num_items = 0;
+
+ TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) {
+ union ctl_io *io;
+
+ io = hdr->io_ptr;
+
+ num_items++;
+
+ /*
+ * This can happen when we get an ATIO but can't allocate
+ * a ctl_io. See the XPT_ACCEPT_TARGET_IO case in ctlfedone().
+ */
+ if (io == NULL) {
+ struct ccb_scsiio *csio;
+
+ csio = (struct ccb_scsiio *)hdr;
+
+ xpt_print(periph->path, "CCB %#x ctl_io allocation "
+ "failed\n", csio->tag_id);
+ continue;
+ }
+
+ /*
+ * Only regular SCSI I/O is put on the work
+ * queue, so we can print sense here. There may be no
+ * sense if it's no the queue for a DMA, but this serves to
+ * print out the CCB as well.
+ *
+ * XXX KDM switch this over to scsi_sense_print() when
+ * CTL is merged in with CAM.
+ */
+ ctl_io_error_print(io, NULL);
+
+ /*
+ * We're sending status back to the
+ * initiator, so we're on the queue waiting
+ * for a CTIO to do that.
+ */
+ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
+ continue;
+
+ /*
+ * Otherwise, we're on the queue waiting to
+ * do a data transfer.
+ */
+ xpt_print(periph->path, "Total %u, Current %u, Resid %u\n",
+ io->scsiio.kern_total_len, io->scsiio.kern_data_len,
+ io->scsiio.kern_data_resid);
+ }
+
+ xpt_print(periph->path, "%d requests total waiting for CCBs\n",
+ num_items);
+ xpt_print(periph->path, "%ju CCBs oustanding (%ju allocated, %ju "
+ "freed)\n", (uintmax_t)(softc->ccbs_alloced -
+ softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced,
+ (uintmax_t)softc->ccbs_freed);
+ xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju "
+ "returned\n", (uintmax_t)(softc->ctios_sent -
+ softc->ctios_returned), softc->ctios_sent,
+ softc->ctios_returned);
+}
+
+/*
+ * This function is called when we fail to get a CCB for a DMA or status return
+ * to the initiator within the specified time period.
+ *
+ * The callout code should insure that we hold the sim mutex here.
+ */
+static void
+ctlfe_dma_timeout(void *arg)
+{
+ struct ctlfe_lun_softc *softc;
+ struct cam_periph *periph;
+ struct cam_sim *sim;
+ int num_queued;
+
+ softc = (struct ctlfe_lun_softc *)arg;
+ periph = softc->periph;
+ sim = xpt_path_sim(periph->path);
+ num_queued = 0;
+
+ /*
+ * Nothing to do...
+ */
+ if (TAILQ_FIRST(&softc->work_queue) == NULL) {
+ xpt_print(periph->path, "TIMEOUT triggered after %d "
+ "seconds, but nothing on work queue??\n",
+ CTLFE_DMA_TIMEOUT);
+ return;
+ }
+
+ xpt_print(periph->path, "TIMEOUT (%d seconds) waiting for DMA to "
+ "start\n", CTLFE_DMA_TIMEOUT);
+
+ ctlfe_dump_queue(softc);
+
+ ctlfe_dump_sim(sim);
+
+ xpt_print(periph->path, "calling xpt_schedule() to attempt to "
+ "unstick our queue\n");
+
+ xpt_schedule(periph, /*priority*/ 1);
+
+ xpt_print(periph->path, "xpt_schedule() call complete\n");
+}
+
+/*
+ * Datamove/done routine called by CTL. Put ourselves on the queue to
+ * receive a CCB from CAM so we can queue the continue I/O request down
+ * to the adapter.
+ */
+static void
+ctlfe_datamove_done(union ctl_io *io)
+{
+ union ccb *ccb;
+ struct cam_sim *sim;
+ struct cam_periph *periph;
+ struct ctlfe_lun_softc *softc;
+
+ ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+
+ sim = xpt_path_sim(ccb->ccb_h.path);
+
+ mtx_lock(sim->mtx);
+
+ periph = xpt_path_periph(ccb->ccb_h.path);
+
+ softc = (struct ctlfe_lun_softc *)periph->softc;
+
+ if (io->io_hdr.io_type == CTL_IO_TASK) {
+ /*
+ * Task management commands don't require any further
+ * communication back to the adapter. Requeue the CCB
+ * to the adapter, and free the CTL I/O.
+ */
+ xpt_print(ccb->ccb_h.path, "%s: returning task I/O "
+ "tag %#x seq %#x\n", __func__,
+ ccb->cin1.tag_id, ccb->cin1.seq_id);
+ /*
+ * Send the notify acknowledge down to the SIM, to let it
+ * know we processed the task management command.
+ */
+ ccb->ccb_h.status = CAM_REQ_INPROG;
+ ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
+ xpt_action(ccb);
+ ctl_free_io(io);
+ } else {
+ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
+ io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
+ else
+ io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
+
+ TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h,
+ periph_links.tqe);
+
+ /*
+ * Reset the timeout for our latest active DMA.
+ */
+ callout_reset(&softc->dma_callout,
+ CTLFE_DMA_TIMEOUT * hz,
+ ctlfe_dma_timeout, softc);
+ /*
+ * Ask for the CAM transport layer to send us a CCB to do
+ * the DMA or send status, unless ctlfe_dma_enabled is set
+ * to 0.
+ */
+ if (ctlfe_dma_enabled != 0)
+ xpt_schedule(periph, /*priority*/ 1);
+ }
+
+ mtx_unlock(sim->mtx);
+}
+
+static void
+ctlfe_dump(void)
+{
+ struct ctlfe_softc *bus_softc;
+
+ STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) {
+ struct ctlfe_lun_softc *lun_softc;
+
+ ctlfe_dump_sim(bus_softc->sim);
+
+ STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) {
+ ctlfe_dump_queue(lun_softc);
+ }
+ }
+}
diff --git a/sys/cam/scsi/scsi_all.c b/sys/cam/scsi/scsi_all.c
index 5ec287b..511a3af 100644
--- a/sys/cam/scsi/scsi_all.c
+++ b/sys/cam/scsi/scsi_all.c
@@ -5057,14 +5057,7 @@ scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries,
scsi_cmd->byte2 |= SI_EVPD;
scsi_cmd->page_code = page_code;
}
- /*
- * A 'transfer units' count of 256 is coded as
- * zero for all commands with a single byte count
- * field.
- */
- if (inq_len == 256)
- inq_len = 0;
- scsi_cmd->length = inq_len;
+ scsi_ulto2b(inq_len, scsi_cmd->length);
}
void
diff --git a/sys/cam/scsi/scsi_all.h b/sys/cam/scsi/scsi_all.h
index 309afe0..ff3d96d 100644
--- a/sys/cam/scsi/scsi_all.h
+++ b/sys/cam/scsi/scsi_all.h
@@ -175,8 +175,7 @@ struct scsi_inquiry
#define SI_EVPD 0x01
#define SI_CMDDT 0x02
u_int8_t page_code;
- u_int8_t reserved;
- u_int8_t length;
+ u_int8_t length[2];
u_int8_t control;
};
@@ -532,6 +531,55 @@ struct scsi_caching_page {
uint8_t non_cache_seg_size[3];
};
+/*
+ * XXX KDM move this off to a vendor shim.
+ */
+struct copan_power_subpage {
+ uint8_t page_code;
+#define PWR_PAGE_CODE 0x00
+ uint8_t subpage;
+#define PWR_SUBPAGE_CODE 0x02
+ uint8_t page_length[2];
+ uint8_t page_version;
+#define PWR_VERSION 0x01
+ uint8_t total_luns;
+ uint8_t max_active_luns;
+#define PWR_DFLT_MAX_LUNS 0x07
+ uint8_t reserved[25];
+};
+
+/*
+ * XXX KDM move this off to a vendor shim.
+ */
+struct copan_aps_subpage {
+ uint8_t page_code;
+#define APS_PAGE_CODE 0x00
+ uint8_t subpage;
+#define APS_SUBPAGE_CODE 0x03
+ uint8_t page_length[2];
+ uint8_t page_version;
+#define APS_VERSION 0x00
+ uint8_t lock_active;
+#define APS_LOCK_ACTIVE 0x01
+#define APS_LOCK_INACTIVE 0x00
+ uint8_t reserved[26];
+};
+
+/*
+ * XXX KDM move this off to a vendor shim.
+ */
+struct copan_debugconf_subpage {
+ uint8_t page_code;
+#define DBGCNF_PAGE_CODE 0x00
+ uint8_t subpage;
+#define DBGCNF_SUBPAGE_CODE 0xF0
+ uint8_t page_length[2];
+ uint8_t page_version;
+#define DBGCNF_VERSION 0x00
+ uint8_t ctl_time_io_secs[2];
+};
+
+
struct scsi_info_exceptions_page {
u_int8_t page_code;
#define SIEP_PAGE_SAVABLE 0x80 /* Page is savable */
diff --git a/sys/cam/scsi/scsi_da.h b/sys/cam/scsi/scsi_da.h
index da099f6..7605b1a 100644
--- a/sys/cam/scsi/scsi_da.h
+++ b/sys/cam/scsi/scsi_da.h
@@ -421,6 +421,56 @@ union disk_pages /* this is the structure copied from osf */
} flexible_disk;
};
+/*
+ * XXX KDM
+ * Here for CTL compatibility, reconcile this.
+ */
+struct scsi_format_page {
+ uint8_t page_code;
+ uint8_t page_length;
+ uint8_t tracks_per_zone[2];
+ uint8_t alt_sectors_per_zone[2];
+ uint8_t alt_tracks_per_zone[2];
+ uint8_t alt_tracks_per_lun[2];
+ uint8_t sectors_per_track[2];
+ uint8_t bytes_per_sector[2];
+ uint8_t interleave[2];
+ uint8_t track_skew[2];
+ uint8_t cylinder_skew[2];
+ uint8_t flags;
+#define SFP_SSEC 0x80
+#define SFP_HSEC 0x40
+#define SFP_RMB 0x20
+#define SFP_SURF 0x10
+ uint8_t reserved[3];
+};
+
+/*
+ * XXX KDM
+ * Here for CTL compatibility, reconcile this.
+ */
+struct scsi_rigid_disk_page {
+ uint8_t page_code;
+#define SMS_RIGID_DISK_PAGE 0x04
+ uint8_t page_length;
+ uint8_t cylinders[3];
+ uint8_t heads;
+ uint8_t start_write_precomp[3];
+ uint8_t start_reduced_current[3];
+ uint8_t step_rate[2];
+ uint8_t landing_zone_cylinder[3];
+ uint8_t rpl;
+#define SRDP_RPL_DISABLED 0x00
+#define SRDP_RPL_SLAVE 0x01
+#define SRDP_RPL_MASTER 0x02
+#define SRDP_RPL_MASTER_CONTROL 0x03
+ uint8_t rotational_offset;
+ uint8_t reserved1;
+ uint8_t rotation_rate[2];
+ uint8_t reserved2[2];
+};
+
+
struct scsi_da_rw_recovery_page {
u_int8_t page_code;
#define SMS_RW_ERROR_RECOVERY_PAGE 0x01
diff --git a/sys/cam/scsi/scsi_targ_bh.c b/sys/cam/scsi/scsi_targ_bh.c
index 7f37157..92a7ac8 100644
--- a/sys/cam/scsi/scsi_targ_bh.c
+++ b/sys/cam/scsi/scsi_targ_bh.c
@@ -604,7 +604,7 @@ targbhdone(struct cam_periph *periph, union ccb *done_ccb)
atio->ccb_h.flags |= CAM_DIR_IN;
descr->data = &no_lun_inq_data;
descr->data_resid = MIN(sizeof(no_lun_inq_data),
- SCSI_CDB6_LEN(inq->length));
+ scsi_2btoul(inq->length));
descr->data_increment = descr->data_resid;
descr->timeout = 5 * 1000;
descr->status = SCSI_STATUS_OK;
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index 92e8809..af351fa 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -1297,6 +1297,7 @@ device targ #SCSI Target Mode Code
device targbh #SCSI Target Mode Blackhole Device
device pass #CAM passthrough driver
device sg #Linux SCSI passthrough
+device ctl #CAM Target Layer
# CAM OPTIONS:
# debugging options:
diff --git a/sys/conf/files b/sys/conf/files
index 099404c..6d600e0 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -115,6 +115,19 @@ cam/scsi/scsi_all.c optional scbus
cam/scsi/scsi_cd.c optional cd
cam/scsi/scsi_ch.c optional ch
cam/ata/ata_da.c optional ada | da
+cam/ctl/ctl.c optional ctl
+cam/ctl/ctl_backend.c optional ctl
+cam/ctl/ctl_backend_block.c optional ctl
+cam/ctl/ctl_backend_ramdisk.c optional ctl
+cam/ctl/ctl_cmd_table.c optional ctl
+cam/ctl/ctl_frontend.c optional ctl
+cam/ctl/ctl_frontend_cam_sim.c optional ctl
+cam/ctl/ctl_frontend_internal.c optional ctl
+cam/ctl/ctl_mem_pool.c optional ctl
+cam/ctl/ctl_scsi_all.c optional ctl
+cam/ctl/ctl_error.c optional ctl
+cam/ctl/ctl_util.c optional ctl
+cam/ctl/scsi_ctl.c optional ctl
cam/scsi/scsi_da.c optional da
cam/scsi/scsi_low.c optional ct | ncv | nsp | stg
cam/scsi/scsi_low_pisa.c optional ct | ncv | nsp | stg
diff --git a/sys/dev/ata/atapi-cam.c b/sys/dev/ata/atapi-cam.c
index bce8625..25a579d 100644
--- a/sys/dev/ata/atapi-cam.c
+++ b/sys/dev/ata/atapi-cam.c
@@ -576,9 +576,10 @@ atapi_action(struct cam_sim *sim, union ccb *ccb)
struct scsi_inquiry *inq = (struct scsi_inquiry *) &request->u.atapi.ccb[0];
if (inq->byte2 == 0 && inq->page_code == 0 &&
- inq->length > SHORT_INQUIRY_LENGTH) {
+ scsi_2btoul(inq->length) > SHORT_INQUIRY_LENGTH) {
bzero(buf, len);
- len = inq->length = SHORT_INQUIRY_LENGTH;
+ len = SHORT_INQUIRY_LENGTH;
+ scsi_ulto2b(len, inq->length);
}
break;
}
diff --git a/sys/dev/ciss/ciss.c b/sys/dev/ciss/ciss.c
index b530de1..042d627 100644
--- a/sys/dev/ciss/ciss.c
+++ b/sys/dev/ciss/ciss.c
@@ -1614,7 +1614,7 @@ ciss_inquiry_logical(struct ciss_softc *sc, struct ciss_ldrive *ld)
inq->opcode = INQUIRY;
inq->byte2 = SI_EVPD;
inq->page_code = CISS_VPD_LOGICAL_DRIVE_GEOMETRY;
- inq->length = sizeof(ld->cl_geometry);
+ scsi_ulto2b(sizeof(ld->cl_geometry), inq->length);
if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) {
ciss_printf(sc, "error getting geometry (%d)\n", error);
diff --git a/sys/i386/conf/GENERIC b/sys/i386/conf/GENERIC
index 569b9e1..f636c7a 100644
--- a/sys/i386/conf/GENERIC
+++ b/sys/i386/conf/GENERIC
@@ -138,7 +138,8 @@ device da # Direct Access (disks)
device sa # Sequential Access (tape etc)
device cd # CD
device pass # Passthrough device (direct ATA/SCSI access)
-device ses # SCSI Environmental Services (and SAF-TE)
+device ses # Enclosure Services (SES and SAF-TE)
+device ctl # CAM Target Layer
# RAID controllers interfaced to the SCSI subsystem
device amr # AMI MegaRAID
diff --git a/sys/i386/conf/PAE b/sys/i386/conf/PAE
index 613b7d7..ebf010e 100644
--- a/sys/i386/conf/PAE
+++ b/sys/i386/conf/PAE
@@ -23,6 +23,7 @@ device ispfw
# address properly may cause data corruption when used in a machine with more
# than 4 gigabytes of memory.
+
nodevice ahb
nodevice sym
nodevice trm
@@ -37,6 +38,8 @@ nodevice ncv
nodevice nsp
nodevice stg
+nodevice ctl
+
nodevice asr
nodevice dpt
nodevice mly
diff --git a/sys/ia64/conf/GENERIC b/sys/ia64/conf/GENERIC
index 21e90b8..2a88215 100644
--- a/sys/ia64/conf/GENERIC
+++ b/sys/ia64/conf/GENERIC
@@ -113,7 +113,8 @@ device ch # Media changer
device da # Direct Access (ie disk)
device pass # Passthrough (direct ATA/SCSI access)
device sa # Sequential Access (ie tape)
-device ses # Environmental Services (and SAF-TE)
+device ses # Enclosure Services (SES and SAF-TE)
+device ctl # CAM Target Layer
# RAID controllers
device aac # Adaptec FSA RAID
diff --git a/sys/sparc64/conf/GENERIC b/sys/sparc64/conf/GENERIC
index 5c2a8b4..12d4c15 100644
--- a/sys/sparc64/conf/GENERIC
+++ b/sys/sparc64/conf/GENERIC
@@ -119,6 +119,7 @@ device sa # Sequential Access (tape etc)
device cd # CD
device pass # Passthrough device (direct ATA/SCSI access)
device ses # SCSI Environmental Services (and SAF-TE)
+device ctl # CAM Target Layer
# RAID controllers
#device amr # AMI MegaRAID
diff --git a/usr.bin/Makefile b/usr.bin/Makefile
index d978eba..be5931c 100644
--- a/usr.bin/Makefile
+++ b/usr.bin/Makefile
@@ -33,6 +33,7 @@ SUBDIR= alias \
compress \
cpuset \
csplit \
+ ctlstat \
cut \
dirname \
du \
diff --git a/usr.bin/ctlstat/Makefile b/usr.bin/ctlstat/Makefile
new file mode 100644
index 0000000..0c09184
--- /dev/null
+++ b/usr.bin/ctlstat/Makefile
@@ -0,0 +1,8 @@
+# $FreeBSD$
+
+PROG= ctlstat
+MAN= ctlstat.8
+SDIR= ${.CURDIR}/../../sys
+CFLAGS+= -I${SDIR}
+
+.include <bsd.prog.mk>
diff --git a/usr.bin/ctlstat/ctlstat.8 b/usr.bin/ctlstat/ctlstat.8
new file mode 100644
index 0000000..a213cc9
--- /dev/null
+++ b/usr.bin/ctlstat/ctlstat.8
@@ -0,0 +1,122 @@
+.\"
+.\" Copyright (c) 2010 Silicon Graphics International Corp.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions, and the following disclaimer,
+.\" without modification.
+.\" 2. Redistributions in binary form must reproduce at minimum a disclaimer
+.\" substantially similar to the "NO WARRANTY" disclaimer below
+.\" ("Disclaimer") and any redistribution must be conditioned upon
+.\" including a substantially similar Disclaimer requirement for further
+.\" binary redistribution.
+.\"
+.\" NO WARRANTY
+.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+.\" "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+.\" LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+.\" A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+.\" HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+.\" STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+.\" IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\" POSSIBILITY OF SUCH DAMAGES.
+.\"
+.\" ctlstat utility man page.
+.\"
+.\" Author: Ken Merry <ken@FreeBSD.org>
+.\"
+.\" $Id: //depot/users/kenm/FreeBSD-test2/usr.bin/ctlstat/ctlstat.8#2 $
+.\" $FreeBSD$
+.\"
+.Dd June 4, 2010
+.Dt CTLSTAT 8
+.Os
+.Sh NAME
+.Nm ctlstat
+.Nd CAM Target Layer statistics utility
+.Sh SYNOPSIS
+.Nm
+.Op Fl t
+.Op Fl c Ar count
+.Op Fl C
+.Op Fl d
+.Op Fl D
+.Op Fl j
+.Op Fl l Ar lun
+.Op Fl n Ar numdevs
+.Op Fl w Ar wait
+.Sh DESCRIPTION
+The
+.Nm
+utility provides statistics information for the CAM Target Layer.
+The first display (except for dump and JSON modes) shows average statistics
+since system startup.
+Subsequent displays show average statistics during the measurement
+interval.
+.Pp
+The options are as follows:
+.Bl -tag -width 10n
+.It Fl t
+Total mode.
+This displays separate columns with the total CTL read and write output,
+and a combined total column that also includes non I/O operations.
+.It Fl c Ar count
+Display statistics this many times.
+.It Fl C
+Disable display of CPU statistics.
+.It Fl d
+Display DMA operation time (latency) instead of overall I/O time (latency).
+.It Fl D
+Text dump mode.
+Dump all available statistics every 30 seconds in a text format suitable
+for parsing.
+No statistics are computed in this mode, only raw numbers are displayed.
+.It Fl h
+Suppress display of the header.
+.It Fl j
+JSON dump mode.
+Dump all available statistics every 30 seconds in JavaScript Object
+Notation (JSON) format.
+No statistics are computed in this mode, only raw numbers are displayed.
+.It Fl l Ar lun
+Request statistics for the specified LUN.
+This option is incompatible with total (
+.Fl t )
+mode.
+.It Fl n Ar numdevs
+Display statistics for this many devices.
+.It Fl w Ar wait
+Wait this many seconds in between displays.
+If this option is not specified,
+.Nm
+defaults to a 1 second interval.
+.El
+.Sh EXAMPLES
+.Dl ctlstat -t
+.Pp
+Display total statistics for the system with a one second interval.
+.Pp
+.Dl ctlstat -d -l 5 -C
+.Pp
+Display average DMA time for LUN 5 and omit CPU utilization.
+.Pp
+.Dl ctlstat -n 7 -w 10
+.Pp
+Display statistics for the first 7 LUNs, and display average statistics
+every 10 seconds.
+.Sh SEE ALSO
+.Xr cam 3 ,
+.Xr cam 4 ,
+.Xr xpt 4 ,
+.Xr camcontrol 8 ,
+.Xr ctladm 8 ,
+.Xr iostat 8
+.Sh AUTHORS
+.An Ken Merry Aq ken@FreeBSD.org
+.An Will Andrews Aq will@FreeBSD.org
diff --git a/usr.bin/ctlstat/ctlstat.c b/usr.bin/ctlstat/ctlstat.c
new file mode 100644
index 0000000..11022ec
--- /dev/null
+++ b/usr.bin/ctlstat/ctlstat.c
@@ -0,0 +1,730 @@
+/*-
+ * Copyright (c) 2004, 2008, 2009 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/usr.bin/ctlstat/ctlstat.c#4 $
+ */
+/*
+ * CAM Target Layer statistics program
+ *
+ * Authors: Ken Merry <ken@FreeBSD.org>, Will Andrews <will@FreeBSD.org>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/time.h>
+#include <sys/sysctl.h>
+#include <sys/resource.h>
+#include <sys/queue.h>
+#include <sys/callout.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <string.h>
+#include <errno.h>
+#include <err.h>
+#include <ctype.h>
+#include <bitstring.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl_scsi_all.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_frontend_internal.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_ioctl.h>
+
+/*
+ * The default amount of space we allocate for LUN storage space. We
+ * dynamically allocate more if needed.
+ */
+#define CTL_STAT_NUM_LUNS 30
+
+/*
+ * The default number of LUN selection bits we allocate. This is large
+ * because we don't currently increase it if the user specifies a LUN
+ * number of 1024 or larger.
+ */
+#define CTL_STAT_LUN_BITS 1024L
+
+static const char *ctlstat_opts = "Cc:Ddhjl:n:tw:";
+static const char *ctlstat_usage = "Usage: ctlstat [-CDdjht] [-l lunnum]"
+ "[-c count] [-n numdevs] [-w wait]\n";
+
+struct ctl_cpu_stats {
+ uint64_t user;
+ uint64_t nice;
+ uint64_t system;
+ uint64_t intr;
+ uint64_t idle;
+};
+
+typedef enum {
+ CTLSTAT_MODE_STANDARD,
+ CTLSTAT_MODE_DUMP,
+ CTLSTAT_MODE_JSON,
+} ctlstat_mode_types;
+
+#define CTLSTAT_FLAG_CPU (1 << 0)
+#define CTLSTAT_FLAG_HEADER (1 << 1)
+#define CTLSTAT_FLAG_FIRST_RUN (1 << 2)
+#define CTLSTAT_FLAG_TOTALS (1 << 3)
+#define CTLSTAT_FLAG_DMA_TIME (1 << 4)
+#define CTLSTAT_FLAG_LUN_TIME_VALID (1 << 5)
+#define F_CPU(ctx) ((ctx)->flags & CTLSTAT_FLAG_CPU)
+#define F_HDR(ctx) ((ctx)->flags & CTLSTAT_FLAG_HEADER)
+#define F_FIRST(ctx) ((ctx)->flags & CTLSTAT_FLAG_FIRST_RUN)
+#define F_TOTALS(ctx) ((ctx)->flags & CTLSTAT_FLAG_TOTALS)
+#define F_DMA(ctx) ((ctx)->flags & CTLSTAT_FLAG_DMA_TIME)
+#define F_LUNVAL(ctx) ((ctx)->flags & CTLSTAT_FLAG_LUN_TIME_VALID)
+
+struct ctlstat_context {
+ ctlstat_mode_types mode;
+ int flags;
+ struct ctl_lun_io_stats *cur_lun_stats, *prev_lun_stats,
+ *tmp_lun_stats;
+ struct ctl_lun_io_stats cur_total_stats[3], prev_total_stats[3];
+ struct timespec cur_time, prev_time;
+ struct ctl_cpu_stats cur_cpu, prev_cpu;
+ uint64_t cur_total_jiffies, prev_total_jiffies;
+ uint64_t cur_idle, prev_idle;
+ bitstr_t bit_decl(lun_mask, CTL_STAT_LUN_BITS);
+ int num_luns;
+ int numdevs;
+ int header_interval;
+};
+
+#ifndef min
+#define min(x,y) (((x) < (y)) ? (x) : (y))
+#endif
+
+static void usage(int error);
+static int getstats(int fd, int *num_luns, struct ctl_lun_io_stats **xlun_stats,
+ struct timespec *cur_time, int *lun_time_valid);
+static int getcpu(struct ctl_cpu_stats *cpu_stats);
+static void compute_stats(struct ctl_lun_io_stats *cur_stats,
+ struct ctl_lun_io_stats *prev_stats,
+ long double etime, long double *mbsec,
+ long double *kb_per_transfer,
+ long double *transfers_per_second,
+ long double *ms_per_transfer,
+ long double *ms_per_dma,
+ long double *dmas_per_second);
+
+static void
+usage(int error)
+{
+ fprintf(error ? stderr : stdout, ctlstat_usage);
+}
+
+static int
+getstats(int fd, int *num_luns, struct ctl_lun_io_stats **xlun_stats,
+ struct timespec *cur_time, int *flags)
+{
+ struct ctl_lun_io_stats *lun_stats;
+ struct ctl_stats stats;
+ int more_space_count;
+
+ more_space_count = 0;
+
+ if (*num_luns == 0)
+ *num_luns = CTL_STAT_NUM_LUNS;
+
+ lun_stats = *xlun_stats;
+retry:
+
+ if (lun_stats == NULL) {
+ lun_stats = (struct ctl_lun_io_stats *)malloc(
+ sizeof(*lun_stats) * *num_luns);
+ }
+
+ memset(&stats, 0, sizeof(stats));
+ stats.alloc_len = *num_luns * sizeof(*lun_stats);
+ memset(lun_stats, 0, stats.alloc_len);
+ stats.lun_stats = lun_stats;
+
+ if (ioctl(fd, CTL_GETSTATS, &stats) == -1)
+ err(1, "error returned from CTL_GETSTATS ioctl");
+
+ switch (stats.status) {
+ case CTL_SS_OK:
+ break;
+ case CTL_SS_ERROR:
+ err(1, "CTL_SS_ERROR returned from CTL_GETSTATS ioctl");
+ break;
+ case CTL_SS_NEED_MORE_SPACE:
+ if (more_space_count > 0) {
+ errx(1, "CTL_GETSTATS returned NEED_MORE_SPACE again");
+ }
+ *num_luns = stats.num_luns;
+ free(lun_stats);
+ lun_stats = NULL;
+ more_space_count++;
+ goto retry;
+ break; /* NOTREACHED */
+ default:
+ errx(1, "unknown status %d returned from CTL_GETSTATS ioctl",
+ stats.status);
+ break;
+ }
+
+ *xlun_stats = lun_stats;
+ *num_luns = stats.num_luns;
+ cur_time->tv_sec = stats.timestamp.tv_sec;
+ cur_time->tv_nsec = stats.timestamp.tv_nsec;
+ if (stats.flags & CTL_STATS_FLAG_TIME_VALID)
+ *flags |= CTLSTAT_FLAG_LUN_TIME_VALID;
+ else
+ *flags &= ~CTLSTAT_FLAG_LUN_TIME_VALID;
+
+ return (0);
+}
+
+static int
+getcpu(struct ctl_cpu_stats *cpu_stats)
+{
+ long cp_time[CPUSTATES];
+ size_t cplen;
+
+ cplen = sizeof(cp_time);
+
+ if (sysctlbyname("kern.cp_time", &cp_time, &cplen, NULL, 0) == -1) {
+ warn("sysctlbyname(kern.cp_time...) failed");
+ return (1);
+ }
+
+ cpu_stats->user = cp_time[CP_USER];
+ cpu_stats->nice = cp_time[CP_NICE];
+ cpu_stats->system = cp_time[CP_SYS];
+ cpu_stats->intr = cp_time[CP_INTR];
+ cpu_stats->idle = cp_time[CP_IDLE];
+
+ return (0);
+}
+
+static void
+compute_stats(struct ctl_lun_io_stats *cur_stats,
+ struct ctl_lun_io_stats *prev_stats, long double etime,
+ long double *mbsec, long double *kb_per_transfer,
+ long double *transfers_per_second, long double *ms_per_transfer,
+ long double *ms_per_dma, long double *dmas_per_second)
+{
+ uint64_t total_bytes = 0, total_operations = 0, total_dmas = 0;
+ uint32_t port;
+ struct bintime total_time_bt, total_dma_bt;
+ struct timespec total_time_ts, total_dma_ts;
+ int i;
+
+ bzero(&total_time_bt, sizeof(total_time_bt));
+ bzero(&total_dma_bt, sizeof(total_dma_bt));
+ bzero(&total_time_ts, sizeof(total_time_ts));
+ bzero(&total_dma_ts, sizeof(total_dma_ts));
+ for (port = 0; port < CTL_MAX_PORTS; port++) {
+ for (i = 0; i < CTL_STATS_NUM_TYPES; i++) {
+ total_bytes += cur_stats->ports[port].bytes[i];
+ total_operations +=
+ cur_stats->ports[port].operations[i];
+ total_dmas += cur_stats->ports[port].num_dmas[i];
+ bintime_add(&total_time_bt,
+ &cur_stats->ports[port].time[i]);
+ bintime_add(&total_dma_bt,
+ &cur_stats->ports[port].dma_time[i]);
+ if (prev_stats != NULL) {
+ total_bytes -=
+ prev_stats->ports[port].bytes[i];
+ total_operations -=
+ prev_stats->ports[port].operations[i];
+ total_dmas -=
+ prev_stats->ports[port].num_dmas[i];
+ bintime_sub(&total_time_bt,
+ &prev_stats->ports[port].time[i]);
+ bintime_sub(&total_dma_bt,
+ &prev_stats->ports[port].dma_time[i]);
+ }
+ }
+ }
+
+ *mbsec = total_bytes;
+ *mbsec /= 1024 * 1024;
+ if (etime > 0.0)
+ *mbsec /= etime;
+ else
+ *mbsec = 0;
+ *kb_per_transfer = total_bytes;
+ *kb_per_transfer /= 1024;
+ if (total_operations > 0)
+ *kb_per_transfer /= total_operations;
+ else
+ *kb_per_transfer = 0;
+ *transfers_per_second = total_operations;
+ *dmas_per_second = total_dmas;
+ if (etime > 0.0) {
+ *transfers_per_second /= etime;
+ *dmas_per_second /= etime;
+ } else {
+ *transfers_per_second = 0;
+ *dmas_per_second = 0;
+ }
+
+ bintime2timespec(&total_time_bt, &total_time_ts);
+ bintime2timespec(&total_dma_bt, &total_dma_ts);
+ if (total_operations > 0) {
+ /*
+ * Convert the timespec to milliseconds.
+ */
+ *ms_per_transfer = total_time_ts.tv_sec * 1000;
+ *ms_per_transfer += total_time_ts.tv_nsec / 1000000;
+ *ms_per_transfer /= total_operations;
+ } else
+ *ms_per_transfer = 0;
+
+ if (total_dmas > 0) {
+ /*
+ * Convert the timespec to milliseconds.
+ */
+ *ms_per_dma = total_dma_ts.tv_sec * 1000;
+ *ms_per_dma += total_dma_ts.tv_nsec / 1000000;
+ *ms_per_dma /= total_dmas;
+ } else
+ *ms_per_dma = 0;
+}
+
+/* The dump_stats() and json_stats() functions perform essentially the same
+ * purpose, but dump the statistics in different formats. JSON is more
+ * conducive to programming, however.
+ */
+
+#define PRINT_BINTIME(prefix, bt) \
+ printf("%s %jd s %ju frac\n", prefix, (intmax_t)(bt).sec, \
+ (uintmax_t)(bt).frac)
+const char *iotypes[] = {"NO IO", "READ", "WRITE"};
+
+static void
+ctlstat_dump(struct ctlstat_context *ctx) {
+ int iotype, lun, port;
+ struct ctl_lun_io_stats *stats = ctx->cur_lun_stats;
+
+ for (lun = 0; lun < ctx->num_luns;lun++) {
+ printf("lun %d\n", lun);
+ for (port = 0; port < CTL_MAX_PORTS; port++) {
+ printf(" port %d\n",
+ stats[lun].ports[port].targ_port);
+ for (iotype = 0; iotype < CTL_STATS_NUM_TYPES;
+ iotype++) {
+ printf(" io type %d (%s)\n", iotype,
+ iotypes[iotype]);
+ printf(" bytes %ju\n", (uintmax_t)
+ stats[lun].ports[port].bytes[iotype]);
+ printf(" operations %ju\n", (uintmax_t)
+ stats[lun].ports[port].operations[iotype]);
+ PRINT_BINTIME(" io time",
+ stats[lun].ports[port].time[iotype]);
+ printf(" num dmas %ju\n", (uintmax_t)
+ stats[lun].ports[port].num_dmas[iotype]);
+ PRINT_BINTIME(" dma time",
+ stats[lun].ports[port].dma_time[iotype]);
+ }
+ }
+ }
+}
+
+#define JSON_BINTIME(prefix, bt) \
+ printf("\"%s\":{\"sec\":%jd,\"frac\":%ju},", \
+ prefix, (intmax_t)(bt).sec, (uintmax_t)(bt).frac)
+
+static void
+ctlstat_json(struct ctlstat_context *ctx) {
+ int iotype, lun, port;
+ struct ctl_lun_io_stats *stats = ctx->cur_lun_stats;
+
+ printf("{\"luns\":[");
+ for (lun = 0; lun < ctx->num_luns; lun++) {
+ printf("{\"ports\":[");
+ for (port = 0; port < CTL_MAX_PORTS;port++) {
+ printf("{\"num\":%d,\"io\":[",
+ stats[lun].ports[port].targ_port);
+ for (iotype = 0; iotype < CTL_STATS_NUM_TYPES;
+ iotype++) {
+ printf("{\"type\":\"%s\",", iotypes[iotype]);
+ printf("\"bytes\":%ju,", (uintmax_t)stats[
+ lun].ports[port].bytes[iotype]);
+ printf("\"operations\":%ju,", (uintmax_t)stats[
+ lun].ports[port].operations[iotype]);
+ JSON_BINTIME("io time",
+ stats[lun].ports[port].time[iotype]);
+ JSON_BINTIME("dma time",
+ stats[lun].ports[port].dma_time[iotype]);
+ printf("\"num dmas\":%ju}", (uintmax_t)
+ stats[lun].ports[port].num_dmas[iotype]);
+ if (iotype < (CTL_STATS_NUM_TYPES - 1))
+ printf(","); /* continue io array */
+ }
+ printf("]}"); /* close port */
+ if (port < (CTL_MAX_PORTS - 1))
+ printf(","); /* continue port array */
+ }
+ printf("]}"); /* close lun */
+ if (lun < (ctx->num_luns - 1))
+ printf(","); /* continue lun array */
+ }
+ printf("]}"); /* close luns and toplevel */
+}
+
+static void
+ctlstat_standard(struct ctlstat_context *ctx) {
+ long double cur_secs, prev_secs, etime;
+ uint64_t delta_jiffies, delta_idle;
+ uint32_t port;
+ long double cpu_percentage;
+ int i;
+ int j;
+
+ cpu_percentage = 0;
+
+ if (F_CPU(ctx) && (getcpu(&ctx->cur_cpu) != 0))
+ errx(1, "error returned from getcpu()");
+
+ cur_secs = ctx->cur_time.tv_sec + (ctx->cur_time.tv_nsec / 1000000000);
+ prev_secs = ctx->prev_time.tv_sec +
+ (ctx->prev_time.tv_nsec / 1000000000);
+
+ etime = cur_secs - prev_secs;
+
+ if (F_CPU(ctx)) {
+ ctx->prev_total_jiffies = ctx->cur_total_jiffies;
+ ctx->cur_total_jiffies = ctx->cur_cpu.user +
+ ctx->cur_cpu.nice + ctx->cur_cpu.system +
+ ctx->cur_cpu.intr + ctx->cur_cpu.idle;
+ delta_jiffies = ctx->cur_total_jiffies;
+ if (F_FIRST(ctx) == 0)
+ delta_jiffies -= ctx->prev_total_jiffies;
+ ctx->prev_idle = ctx->cur_idle;
+ ctx->cur_idle = ctx->cur_cpu.idle;
+ delta_idle = ctx->cur_idle - ctx->prev_idle;
+
+ cpu_percentage = delta_jiffies - delta_idle;
+ cpu_percentage /= delta_jiffies;
+ cpu_percentage *= 100;
+ }
+
+ if (F_HDR(ctx)) {
+ ctx->header_interval--;
+ if (ctx->header_interval <= 0) {
+ int hdr_devs;
+
+ hdr_devs = 0;
+
+ if (F_TOTALS(ctx)) {
+ fprintf(stdout, "%s System Read %s"
+ "System Write %sSystem Total%s\n",
+ (F_LUNVAL(ctx) != 0) ? " " : "",
+ (F_LUNVAL(ctx) != 0) ? " " : "",
+ (F_LUNVAL(ctx) != 0) ? " " : "",
+ (F_CPU(ctx) == 0) ? " CPU" : "");
+ hdr_devs = 3;
+ } else {
+ if (F_CPU(ctx))
+ fprintf(stdout, " CPU ");
+ for (i = 0; i < min(CTL_STAT_LUN_BITS,
+ ctx->num_luns); i++) {
+ int lun;
+
+ /*
+ * Obviously this won't work with
+ * LUN numbers greater than a signed
+ * integer.
+ */
+ lun = (int)ctx->cur_lun_stats[i
+ ].lun_number;
+
+ if (bit_test(ctx->lun_mask, lun) == 0)
+ continue;
+ fprintf(stdout, "%15.6s%d ",
+ "lun", lun);
+ hdr_devs++;
+ }
+ fprintf(stdout, "\n");
+ }
+ for (i = 0; i < hdr_devs; i++)
+ fprintf(stdout, "%s %sKB/t %s MB/s ",
+ ((F_CPU(ctx) != 0) && (i == 0) &&
+ (F_TOTALS(ctx) == 0)) ? " " : "",
+ (F_LUNVAL(ctx) != 0) ? " ms " : "",
+ (F_DMA(ctx) == 0) ? "tps" : "dps");
+ fprintf(stdout, "\n");
+ ctx->header_interval = 20;
+ }
+ }
+
+ if (F_TOTALS(ctx) != 0) {
+ long double mbsec[3];
+ long double kb_per_transfer[3];
+ long double transfers_per_sec[3];
+ long double ms_per_transfer[3];
+ long double ms_per_dma[3];
+ long double dmas_per_sec[3];
+
+ for (i = 0; i < 3; i++)
+ ctx->prev_total_stats[i] = ctx->cur_total_stats[i];
+
+ memset(&ctx->cur_total_stats, 0, sizeof(ctx->cur_total_stats));
+
+ /* Use macros to make the next loop more readable. */
+#define ADD_STATS_BYTES(st, p, i, j) \
+ ctx->cur_total_stats[st].ports[p].bytes[j] += \
+ ctx->cur_lun_stats[i].ports[p].bytes[j]
+#define ADD_STATS_OPERATIONS(st, p, i, j) \
+ ctx->cur_total_stats[st].ports[p].operations[j] += \
+ ctx->cur_lun_stats[i].ports[p].operations[j]
+#define ADD_STATS_NUM_DMAS(st, p, i, j) \
+ ctx->cur_total_stats[st].ports[p].num_dmas[j] += \
+ ctx->cur_lun_stats[i].ports[p].num_dmas[j]
+#define ADD_STATS_TIME(st, p, i, j) \
+ bintime_add(&ctx->cur_total_stats[st].ports[p].time[j], \
+ &ctx->cur_lun_stats[i].ports[p].time[j])
+#define ADD_STATS_DMA_TIME(st, p, i, j) \
+ bintime_add(&ctx->cur_total_stats[st].ports[p].dma_time[j], \
+ &ctx->cur_lun_stats[i].ports[p].dma_time[j])
+
+ for (i = 0; i < ctx->num_luns; i++) {
+ for (port = 0; port < CTL_MAX_PORTS; port++) {
+ for (j = 0; j < CTL_STATS_NUM_TYPES; j++) {
+ ADD_STATS_BYTES(2, port, i, j);
+ ADD_STATS_OPERATIONS(2, port, i, j);
+ ADD_STATS_NUM_DMAS(2, port, i, j);
+ ADD_STATS_TIME(2, port, i, j);
+ ADD_STATS_DMA_TIME(2, port, i, j);
+ }
+ ADD_STATS_BYTES(0, port, i, CTL_STATS_READ);
+ ADD_STATS_OPERATIONS(0, port, i,
+ CTL_STATS_READ);
+ ADD_STATS_NUM_DMAS(0, port, i, CTL_STATS_READ);
+ ADD_STATS_TIME(0, port, i, CTL_STATS_READ);
+ ADD_STATS_DMA_TIME(0, port, i, CTL_STATS_READ);
+
+ ADD_STATS_BYTES(1, port, i, CTL_STATS_WRITE);
+ ADD_STATS_OPERATIONS(1, port, i,
+ CTL_STATS_WRITE);
+ ADD_STATS_NUM_DMAS(1, port, i, CTL_STATS_WRITE);
+ ADD_STATS_TIME(1, port, i, CTL_STATS_WRITE);
+ ADD_STATS_DMA_TIME(1, port, i, CTL_STATS_WRITE);
+ }
+ }
+
+ for (i = 0; i < 3; i++) {
+ compute_stats(&ctx->cur_total_stats[i],
+ F_FIRST(ctx) ? NULL : &ctx->prev_total_stats[i],
+ etime, &mbsec[i], &kb_per_transfer[i],
+ &transfers_per_sec[i],
+ &ms_per_transfer[i], &ms_per_dma[i],
+ &dmas_per_sec[i]);
+ if (F_DMA(ctx) != 0)
+ fprintf(stdout, " %2.2Lf",
+ ms_per_dma[i]);
+ else if (F_LUNVAL(ctx) != 0)
+ fprintf(stdout, " %2.2Lf",
+ ms_per_transfer[i]);
+ fprintf(stdout, " %5.2Lf %3.0Lf %5.2Lf ",
+ kb_per_transfer[i],
+ (F_DMA(ctx) == 0) ? transfers_per_sec[i] :
+ dmas_per_sec[i], mbsec[i]);
+ }
+ if (F_CPU(ctx))
+ fprintf(stdout, " %5.1Lf%%", cpu_percentage);
+ } else {
+ if (F_CPU(ctx))
+ fprintf(stdout, "%5.1Lf%% ", cpu_percentage);
+
+ for (i = 0; i < min(CTL_STAT_LUN_BITS, ctx->num_luns); i++) {
+ long double mbsec, kb_per_transfer;
+ long double transfers_per_sec;
+ long double ms_per_transfer;
+ long double ms_per_dma;
+ long double dmas_per_sec;
+
+ if (bit_test(ctx->lun_mask,
+ (int)ctx->cur_lun_stats[i].lun_number) == 0)
+ continue;
+ compute_stats(&ctx->cur_lun_stats[i], F_FIRST(ctx) ?
+ NULL : &ctx->prev_lun_stats[i], etime,
+ &mbsec, &kb_per_transfer,
+ &transfers_per_sec, &ms_per_transfer,
+ &ms_per_dma, &dmas_per_sec);
+ if (F_DMA(ctx))
+ fprintf(stdout, " %2.2Lf",
+ ms_per_dma);
+ else if (F_LUNVAL(ctx) != 0)
+ fprintf(stdout, " %2.2Lf",
+ ms_per_transfer);
+ fprintf(stdout, " %5.2Lf %3.0Lf %5.2Lf ",
+ kb_per_transfer, (F_DMA(ctx) == 0) ?
+ transfers_per_sec : dmas_per_sec, mbsec);
+ }
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ int c;
+ int count, waittime;
+ int set_lun;
+ int fd, retval;
+ struct ctlstat_context ctx;
+
+ /* default values */
+ retval = 0;
+ waittime = 1;
+ count = -1;
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.numdevs = 3;
+ ctx.mode = CTLSTAT_MODE_STANDARD;
+ ctx.flags |= CTLSTAT_FLAG_CPU;
+ ctx.flags |= CTLSTAT_FLAG_FIRST_RUN;
+ ctx.flags |= CTLSTAT_FLAG_HEADER;
+
+ while ((c = getopt(argc, argv, ctlstat_opts)) != -1) {
+ switch (c) {
+ case 'C':
+ ctx.flags &= ~CTLSTAT_FLAG_CPU;
+ break;
+ case 'c':
+ count = atoi(optarg);
+ break;
+ case 'd':
+ ctx.flags |= CTLSTAT_FLAG_DMA_TIME;
+ break;
+ case 'D':
+ ctx.mode = CTLSTAT_MODE_DUMP;
+ waittime = 30;
+ break;
+ case 'h':
+ ctx.flags &= ~CTLSTAT_FLAG_HEADER;
+ break;
+ case 'j':
+ ctx.mode = CTLSTAT_MODE_JSON;
+ waittime = 30;
+ break;
+ case 'l': {
+ int cur_lun;
+
+ cur_lun = atoi(optarg);
+ if (cur_lun > CTL_STAT_LUN_BITS)
+ errx(1, "Invalid LUN number %d", cur_lun);
+
+ bit_ffs(ctx.lun_mask, CTL_STAT_LUN_BITS, &set_lun);
+ if (set_lun == -1)
+ ctx.numdevs = 1;
+ else
+ ctx.numdevs++;
+ bit_set(ctx.lun_mask, cur_lun);
+ break;
+ }
+ case 'n':
+ ctx.numdevs = atoi(optarg);
+ break;
+ case 't':
+ ctx.flags |= CTLSTAT_FLAG_TOTALS;
+ ctx.numdevs = 3;
+ break;
+ case 'w':
+ waittime = atoi(optarg);
+ break;
+ default:
+ retval = 1;
+ usage(retval);
+ exit(retval);
+ break;
+ }
+ }
+
+ bit_ffs(ctx.lun_mask, CTL_STAT_LUN_BITS, &set_lun);
+
+ if ((F_TOTALS(&ctx))
+ && (set_lun != -1)) {
+ errx(1, "Total Mode (-t) is incompatible with individual "
+ "LUN mode (-l)");
+ } else if (set_lun == -1) {
+ /*
+ * Note that this just selects the first N LUNs to display,
+ * but at this point we have no knoweledge of which LUN
+ * numbers actually exist. So we may select LUNs that
+ * aren't there.
+ */
+ bit_nset(ctx.lun_mask, 0, min(ctx.numdevs - 1,
+ CTL_STAT_LUN_BITS - 1));
+ }
+
+ if ((fd = open(CTL_DEFAULT_DEV, O_RDWR)) == -1)
+ err(1, "cannot open %s", CTL_DEFAULT_DEV);
+
+ for (;count != 0;) {
+ ctx.tmp_lun_stats = ctx.prev_lun_stats;
+ ctx.prev_lun_stats = ctx.cur_lun_stats;
+ ctx.cur_lun_stats = ctx.tmp_lun_stats;
+ ctx.prev_time = ctx.cur_time;
+ ctx.prev_cpu = ctx.cur_cpu;
+ if (getstats(fd, &ctx.num_luns, &ctx.cur_lun_stats,
+ &ctx.cur_time, &ctx.flags) != 0)
+ errx(1, "error returned from getstats()");
+
+ switch(ctx.mode) {
+ case CTLSTAT_MODE_STANDARD:
+ ctlstat_standard(&ctx);
+ break;
+ case CTLSTAT_MODE_DUMP:
+ ctlstat_dump(&ctx);
+ break;
+ case CTLSTAT_MODE_JSON:
+ ctlstat_json(&ctx);
+ break;
+ default:
+ break;
+ }
+
+ fprintf(stdout, "\n");
+ ctx.flags &= ~CTLSTAT_FLAG_FIRST_RUN;
+ if (count != 1)
+ sleep(waittime);
+ if (count > 0)
+ count--;
+ }
+
+ exit (retval);
+}
+
+/*
+ * vim: ts=8
+ */
diff --git a/usr.sbin/Makefile b/usr.sbin/Makefile
index 2063ea6..b7ea932 100644
--- a/usr.sbin/Makefile
+++ b/usr.sbin/Makefile
@@ -16,6 +16,7 @@ SUBDIR= adduser \
clear_locks \
crashinfo \
cron \
+ ctladm \
daemon \
dconschat \
devinfo \
diff --git a/usr.sbin/ctladm/Makefile b/usr.sbin/ctladm/Makefile
new file mode 100644
index 0000000..88e82f4
--- /dev/null
+++ b/usr.sbin/ctladm/Makefile
@@ -0,0 +1,21 @@
+# $FreeBSD$
+
+PROG= ctladm
+SRCS= ctladm.c util.c ctl_util.c ctl_scsi_all.c
+.PATH: ${.CURDIR}/../../sys/cam/ctl
+SDIR= ${.CURDIR}/../../sys
+CFLAGS+= -I${SDIR}
+# This is necessary because of these warnings:
+# warning: cast increases required alignment of target type
+# The solution is to either upgrade the compiler (preferred), or do void
+# pointer gymnastics to get around the warning. For now, disable the
+# warning instead of doing the void pointer workaround.
+.if ${MACHINE_CPUARCH} == "arm"
+WARNS?= 3
+.endif
+
+DPADD= ${LIBCAM} ${LIBSBUF}
+LDADD= -lcam -lsbuf -lbsdxml
+MAN= ctladm.8
+
+.include <bsd.prog.mk>
diff --git a/usr.sbin/ctladm/ctladm.8 b/usr.sbin/ctladm/ctladm.8
new file mode 100644
index 0000000..027d89e
--- /dev/null
+++ b/usr.sbin/ctladm/ctladm.8
@@ -0,0 +1,963 @@
+.\"
+.\" Copyright (c) 2003 Silicon Graphics International Corp.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions, and the following disclaimer,
+.\" without modification.
+.\" 2. Redistributions in binary form must reproduce at minimum a disclaimer
+.\" substantially similar to the "NO WARRANTY" disclaimer below
+.\" ("Disclaimer") and any redistribution must be conditioned upon
+.\" including a substantially similar Disclaimer requirement for further
+.\" binary redistribution.
+.\"
+.\" NO WARRANTY
+.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+.\" "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+.\" LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+.\" A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+.\" HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+.\" STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+.\" IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\" POSSIBILITY OF SUCH DAMAGES.
+.\"
+.\" ctladm utility man page.
+.\"
+.\" Author: Ken Merry <ken@FreeBSD.org>
+.\"
+.\" $Id: //depot/users/kenm/FreeBSD-test2/usr.sbin/ctladm/ctladm.8#3 $
+.\" $FreeBSD$
+.\"
+.Dd July 8, 2011
+.Dt CTLADM 8
+.Os
+.Sh NAME
+.Nm ctladm
+.Nd CAM Target Layer control utility
+.Sh SYNOPSIS
+.Nm
+.Aq Ar command
+.Op target:lun
+.Op generic args
+.Op command args
+.Nm
+.Ic tur
+.Aq target:lun
+.Op general options
+.Nm
+.Ic inquiry
+.Aq target:lun
+.Op general options
+.Nm
+.Ic reqsense
+.Aq target:lun
+.Op general options
+.Nm
+.Ic reportluns
+.Aq target:lun
+.Op general options
+.Nm
+.Ic read
+.Aq target:lun
+.Op general options
+.Aq Fl l Ar lba
+.Aq Fl d Ar datalen
+.Aq Fl f Ar file|-
+.Aq Fl b Ar blocksize_bytes
+.Op Fl c Ar cdbsize
+.Op Fl N
+.Nm
+.Ic write
+.Aq target:lun
+.Op general options
+.Aq Fl l Ar lba
+.Aq Fl d Ar datalen
+.Aq Fl f Ar file|-
+.Aq Fl b Ar blocksize_bytes
+.Op Fl c Ar cdbsize
+.Op Fl N
+.Nm
+.Ic bbrread
+.Aq target:lun
+.Op general options
+.Aq Fl -l Ar lba
+.Aq Fl -d Ar datalen
+.Nm
+.Ic readcap
+.Aq target:lun
+.Op general options
+.Op Fl c Ar cdbsize
+.Nm
+.Ic modesense
+.Aq target:lun
+.Aq Fl m Ar page | Fl l
+.Op Fl P Ar pc
+.Op Fl d
+.Op Fl S Ar subpage
+.Op Fl c Ar size
+.Nm
+.Ic start
+.Aq target:lun
+.Op general options
+.Op Fl i
+.Op Fl o
+.Nm
+.Ic stop
+.Aq target:lun
+.Op general options
+.Op Fl i
+.Op Fl o
+.Nm
+.Ic synccache
+.Aq target:lun
+.Op general options
+.Op Fl l Ar lba
+.Op Fl b Ar blockcount
+.Op Fl r
+.Op Fl i
+.Op Fl c Ar cdbsize
+.Nm
+.Ic shutdown
+.Op general options
+.Nm
+.Ic startup
+.Op general options
+.Nm
+.Ic hardstop
+.Nm
+.Ic hardstart
+.Nm
+.Ic lunlist
+.Nm
+.Ic delay
+.Aq target:lun
+.Aq Fl l Ar datamove|done
+.Aq Fl t Ar secs
+.Op Fl T Ar oneshot|cont
+.Nm
+.Ic realsync Aq on|off|query
+.Nm
+.Ic setsync interval
+.Aq target:lun
+.Aq Fl i Ar interval
+.Nm
+.Ic getsync
+.Aq target:lun
+.Nm
+.Ic inject
+.Aq Fl i Ar action
+.Aq Fl p Ar pattern
+.Op Fl r Ar lba,len
+.Op Fl s Ar len fmt Op Ar args
+.Op Fl c
+.Op Fl d Ar delete_id
+.Nm
+.Ic create
+.Aq Fl b Ar backend
+.Op Fl B Ar blocksize
+.Op Fl d Ar device_id
+.Op Fl l Ar lun_id
+.Op Fl o Ar name=value
+.Op Fl s Ar size_bytes
+.Op Fl S Ar serial_num
+.Op Fl t Ar device_type
+.Nm
+.Ic remove
+.Aq Fl b Ar backend
+.Aq Fl l Ar lun_id
+.Op Fl o Ar name=value
+.Nm
+.Ic devlist
+.Op Fl b Ar backend
+.Op Fl v
+.Op Fl x
+.Nm
+.Ic port
+.Op Fl l
+.Op Fl o Ar on|off
+.Op Fl w Ar wwpn
+.Op Fl W Ar wwnn
+.Op Fl p Ar targ_port
+.Op Fl t Ar fe_type
+.Op Fl q
+.Op Fl x
+.Nm
+.Ic dumpooa
+.Nm
+.Ic dumpstructs
+.Nm
+.Ic help
+.Sh DESCRIPTION
+The
+.Nm
+utility is designed to provide a way to access and control the CAM Target
+Layer (CTL).
+It provides a way to send
+.Tn SCSI
+commands to the CTL layer, and also provides
+some meta-commands that utilize
+.Tn SCSI
+commands.
+(For instance, the
+.Ic lunlist
+command is implemented using the
+.Tn SCSI
+REPORT LUNS and INQUIRY commands.)
+.Pp
+The
+.Nm
+utility has a number of primary functions, many of which require a device
+identifier.
+The device identifier takes the following form:
+.Bl -tag -width 14n
+.It target:lun
+Specify the target (almost always 0) and LUN number to operate on.
+.El
+Many of the primary functions of the
+.Nm
+utility take the following optional arguments:
+.Pp
+.Bl -tag -width 10n
+.It Fl C Ar retries
+Specify the number of times to retry a command in the event of failure.
+.It Fl D Ar device
+Specify the device to open. This allows opening a device other than the
+default device,
+.Pa /dev/cam/ctl ,
+to be opened for sending commands.
+.It Fl I Ar id
+Specify the initiator number to use.
+By default,
+.Nm
+will use 7 as the initiator number.
+.El
+.Pp
+Primary commands:
+.Bl -tag -width 11n
+.It Ic tur
+Send the
+.Tn SCSI
+TEST UNIT READY command to the device and report whether or not it is
+ready.
+.It Ic inquiry
+Send the
+.Tn SCSI
+INQUIRY command to the device and display some of the returned inquiry
+data.
+.It Ic reqsense
+Send the
+.Tn SCSI
+REQUEST SENSE command to the device and display the returned sense
+information.
+.It Ic reportluns
+Send the
+.Tn SCSI
+REPORT LUNS command to the device and display supported LUNs.
+.It Ic read
+Send a
+.Tn SCSI
+READ command to the device, and write the requested data to a file or
+stdout.
+.Bl -tag -width 12n
+.It Fl l Ar lba
+Specify the starting Logical Block Address for the READ. This can be
+specified in decimal, octal (starting with 0), hexadecimal (starting with
+0x) or any other base supported by
+.Xr strtoull 3 .
+.It Fl d Ar datalen
+Specify the length, in 512 byte blocks, of the READ request.
+.It Fl f Ar file
+Specify the destination for the data read by the READ command. Either a
+filename or
+.Sq -
+for stdout may be specified.
+.It Fl c Ar cdbsize
+Specify the minimum
+.Tn SCSI
+CDB (Command Data Block) size to be used for the READ request. Allowable
+values are 6, 10, 12 and 16. Depending upon the LBA and amount of data
+requested, a larger CDB size may be used to satisfy the request. (e.g.,
+for LBAs above 0xffffffff, READ(16) must be used to satisfy the request.)
+.It Fl b Ar blocksize
+Specify the blocksize of the underlying
+.Tn SCSI
+device, so the transfer length
+can be calculated accurately. The blocksize can be obtained via the
+.Tn SCSI
+READ CAPACITY command.
+.It Fl N
+Do not copy data to
+.Nm
+from the kernel when doing a read, just execute the command without copying
+data.
+This is to be used for performance testing.
+.El
+.It Ic write
+Read data from a file or stdin, and write the data to the device using the
+.Tn SCSI
+WRITE command.
+.Bl -tag -width 12n
+.It Fl l Ar lba
+Specify the starting Logical Block Address for the WRITE. This can be
+specified in decimal, octal (starting with 0), hexadecimal (starting with
+0x) or any other base supported by
+.Xr strtoull 3 .
+.It Fl d Ar atalen
+Specify the length, in 512 byte blocks, of the WRITE request.
+.It Fl f Ar file
+Specify the source for the data to be written by the WRITE command. Either a
+filename or
+.Sq -
+for stdin may be specified.
+.It Fl c Ar cdbsize
+Specify the minimum
+.Tn SCSI
+CDB (Command Data Block) size to be used for the READ request. Allowable
+values are 6, 10, 12 and 16. Depending upon the LBA and amount of data
+requested, a larger CDB size may be used to satisfy the request. (e.g.,
+for LBAs above 0xffffffff, READ(16) must be used to satisfy the request.)
+.It Fl b Ar blocksize
+Specify the blocksize of the underlying
+.Tn SCSI
+device, so the transfer length
+can be calculated accurately. The blocksize can be obtained via the
+.Tn SCSI
+READ CAPACITY command.
+.It Fl N
+Do not copy data to
+.Nm
+to the kernel when doing a write, just execute the command without copying
+data.
+This is to be used for performance testing.
+.El
+.It Ic bbrread
+Issue a SCSI READ command to the logical device to potentially force a bad
+block on a disk in the RAID set to be reconstructed from the other disks in
+the array. This command should only be used on an array that is in the
+normal state. If used on a critical array, it could cause the array to go
+offline if the bad block to be remapped is on one of the disks that is
+still active in the array.
+.Pp
+The data for this particular command will be discarded, and not returned to
+the user.
+.Pp
+In order to determine which LUN to read from, the user should first
+determine which LUN the disk with a bad block belongs to. Then he should
+map the bad disk block back to the logical block address for the array in
+order to determine which LBA to pass in to the
+.Ic bbrread
+command.
+.Pp
+This command is primarily intended for testing. In practice, bad block
+remapping will generally be triggered by the in-kernel Disk Aerobics and
+Disk Scrubbing code.
+.Bl -tag -width 10n
+.It Fl l Ar lba
+Specify the starting Logical Block Address.
+.It Fl d Ar datalen
+Specify the amount of data in bytes to read from the LUN. This must be a
+multiple of the LUN blocksize.
+.El
+.It Ic readcap
+Send the
+.Tn SCSI
+READ CAPACITY command to the device and display the device size and device
+block size. By default, READ CAPACITY(10) is
+used. If the device returns a maximum LBA of 0xffffffff, however,
+.Nm
+will automatically issue a READ CAPACITY(16), which is implemented as a
+service action of the SERVICE ACTION IN(16) opcode. The user can specify
+the minimum CDB size with the
+.Fl c
+argument. Valid values for the
+.Fl c
+option are 10 and 16. If a 10 byte CDB is specified, the request will be
+automatically reissued with a 16 byte CDB if the maximum LBA returned is
+0xffffffff.
+.It Ic modesense
+Send a
+.Tn SCSI
+MODE SENSE command to the device, and display the requested mode page(s) or
+page list.
+.Bl -tag -width 10n
+.It Fl m Ar page
+Specify the mode page to display. This option and the
+.Fl l
+option are mutually exclusive. One of the two must be specified, though.
+Mode page numbers may be specified in decimal or hexadecimal.
+.It Fl l
+Request that the list of mode pages supported by the device be returned.
+This option and the
+.Fl m
+option are mutually exclusive. One of the two must be specified, though.
+.It Fl P Ar pc
+Specify the mode page page control value. Possible values are:
+.Bl -tag -width 2n -compact
+.It 0
+Current values.
+.It 1
+Changeable value bitmask.
+.It 2
+Default values.
+.It 3
+Saved values.
+.El
+.It Fl d
+Disable block descriptors when sending the mode sense request.
+.It Fl S Ar subpage
+Specify the subpage used with the mode sense request.
+.It Fl c Ar cdbsize
+Specify the CDB size used for the mode sense request. Supported values are
+6 and 10.
+.El
+.It Ic start
+Send the
+.Tn SCSI
+START STOP UNIT command to the specified LUN with the start
+bit set.
+.Bl -tag -width 4n
+.It Fl i
+Set the immediate bit in the CDB. Note that CTL does not support the
+immediate bit, so this is primarily useful for making sure that CTL returns
+the proper error.
+.It Fl o
+Set the Copan proprietary on/offline bit in the CDB. When this flag is
+used, the LUN will be marked online again (see the description of the
+.Ic shutdown
+and
+.Ic startup
+commands). When this flag is used with a
+start command, the LUN will NOT be spun up. You need to use a start
+command without the
+.Fl o
+flag to spin up the disks in the LUN.
+.El
+.It Ic stop
+Send the
+.Tn SCSI
+START STOP UNIT command to the specified LUN with the start
+bit cleared. We use an ordered tag to stop the LUN, so we can guarantee
+that all pending I/O executes before it is stopped. (CTL guarantees this
+anyway, but
+.Nm
+sends an ordered tag for completeness.)
+.Bl -tag -width 4n
+.It Fl i
+Set the immediate bit in the CDB. Note that CTL does not support the
+immediate bit, so this is primarily useful for making sure that CTL returns
+the proper error.
+.It Fl o
+Set the Copan proprietary on/offline bit in the CDB. When this flag is
+used, the LUN will be spun down and taken offline ("Logical unit not ready,
+manual intervention required"). See the description of the
+.Ic shutdown
+and
+.Ic startup
+options.
+.El
+.It Ic synccache
+Send the
+.Tn SCSI
+SYNCHRONIZE CACHE command to the device. By default, SYNCHRONIZE
+CACHE(10) is used. If the specified starting LBA is greater than
+0xffffffff or the length is greater than 0xffff, though,
+SYNCHRONIZE CACHE(16) will be used. The 16 byte command will also be used
+if the user specifies a 16 byte CDB with the
+.Fl c
+argument.
+.Bl -tag -width 14n
+.It Fl l Ar lba
+Specify the starting LBA of the cache region to synchronize. This option is a
+no-op for CTL. If you send a SYNCHRONIZE CACHE command, it will sync the
+cache for the entire LUN.
+.It Fl b Ar blockcount
+Specify the length of the cache region to synchronize. This option is a
+no-op for CTL. If you send a SYNCHRONIZE CACHE command, it will sync the
+cache for the entire LUN.
+.It Fl r
+Specify relative addressing for the starting LBA. CTL does not support
+relative addressing, since it only works for linked commands, and CTL
+doesn't support linked commands.
+.It Fl i
+Tell the target to return status immediately after issuing the SYHCHRONIZE CACHE
+command rather than waiting for the cache to finish syncing. CTL does not
+support this bit.
+.It Fl c Ar cdbsize
+Specify the minimum CDB size. Valid values are 10 and 16 bytes.
+.El
+.It Ic shutdown
+Issue a
+.Tn SCSI
+START STOP UNIT command with the start bit cleared and the on/offline bit
+set to all direct access LUNs. This will spin down all direct access LUNs,
+and mark them offline ("Logical unit not ready, manual intervention
+required"). Once marked offline, the state can only be cleared by sending
+a START STOP UNIT command with the start bit set and the on/offline bit
+set. The
+.Nm
+commands
+.Ic startup
+and
+.Ic start
+will accomplish this. Note that the
+on/offline bit is a non-standard Copan extension to the
+.Tn SCSI
+START STOP UNIT command, so merely sending a normal start command from an
+initiator will not clear the condition. (This is by design.)
+.It Ic startup
+Issue a
+.Tn SCSI
+START STOP UNIT command with the start bit set and the on/offline bit set
+to all direct access LUNs. This will mark all direct access LUNs "online"
+again. It will not cause any LUNs to start up. A separate start command
+without the on/offline bit set is necessary for that.
+.It Ic hardstop
+Use the kernel facility for stopping all direct access LUNs and setting the
+offline bit. Unlike the
+.Ic shutdown
+command above, this command allows shutting down LUNs with I/O active. It
+will also issue a LUN reset to any reserved LUNs to break the reservation
+so that the LUN can be stopped.
+.Ic shutdown
+command instead.
+.It Ic hardstart
+This command is functionally identical to the
+.Ic startup
+command described above. The primary difference is that the LUNs are
+enumerated and commands sent by the in-kernel Front End Target Driver
+instead of by
+.Nm .
+.It Ic lunlist
+List all LUNs registered with CTL.
+Because this command uses the ioctl port, it will only work when the FETDs
+(Front End Target Drivers) are enabled.
+This command is the equivalent of doing a REPORT LUNS on one LUN and then
+and then an INQUIRY on each LUN in the system.
+.It Ic delay
+Delay commands at the given location. There are two places where commands
+may be delayed currently: before data is transferred
+.Pq Dq datamove
+and just prior to sending status to the host
+.Pq Dq done .
+One of the two must be supplied as an argument to the
+.Fl l
+option. The
+.Fl t
+option must also be specified.
+.Bl -tag -width 12n
+.It Fl l Ar delayloc
+Delay command(s) at the specified location.
+This can either be at the data movement stage (datamove) or prior to
+command completion (done).
+.It Fl t Ar delaytime
+Delay command(s) for the specified number of seconds. This must be
+specified. If set to 0, it will clear out any previously set delay for
+this particular location (datamove or done).
+.It Fl T Ar delaytype
+Specify the delay type.
+By default, the
+.Ic delay
+option will delay the next command sent to the given LUN.
+With the
+.Fl T Ar cont
+option, every command will be delayed by the specified period of time.
+With the
+.Fl T Ar oneshot
+the next command sent to the given LUN will be delayed and all subsequent
+commands will be completed normally.
+This is the default.
+.El
+.It Ic realsync
+Query and control CTL's SYNCHRONIZE CACHE behavior. The
+.Sq query
+argument
+will show whether SYNCHRONIZE CACHE commands are being sent to the backend
+or not.
+The default is to send SYNCHRONIZE CACHE commands to the backend.
+The
+.Sq on
+argument will cause all SYNCHRONIZE CACHE commands sent to all LUNs to be
+sent to the backend.
+The
+.Sq off
+argument will cause all SYNCHRONIZE CACHE commands sent to all LUNs to be
+immediately returned to the initiator with successful status.
+.It Ic setsync
+For a given lun, only actually service every Nth SYNCHRONIZE CACHE command
+that is sent. This can be used for debugging the optimal time period for
+sending SYNCHRONIZE cache commands. An interval of 0 means that the cache
+will be flushed for this LUN every time a SYNCHRONIZE CACHE command is
+received.
+.Pp
+You must specify the target and LUN you want to modify.
+.It Ic getsync
+Get the interval at which we actually service the SYNCHRONIZE CACHE
+command, as set by the
+.Ic setsync
+command above.
+The reported number means that we will actually flush the cache on every
+Nth SYNCHRONIZE CACHE command. A value of 0 means that we will flush the
+cache every time.
+.Pp
+You must specify the target and LUN you want to query.
+.It Ic inject
+Inject the specified type of error for the LUN specified, when a command
+that matches the given pattern is seen.
+The sense data returned is in either fixed or descriptor format, depending
+upon the status of the D_SENSE bit in the control mode page (page 0xa) for
+the LUN.
+.Pp
+Errors are only injected for commands that have not already failed for
+other reasons.
+By default, only the first command matching the pattern specified is
+returned with the supplied error.
+.Pp
+If the
+.Fl c
+flag is specified, all commands matching the pattern will be returned with
+the specified error until the error injection command is deleted with
+.Fl d
+flag.
+.Bl -tag -width 17n
+.It Fl i Ar action
+Specify the error to return:
+.Bl -tag -width 10n
+.It aborted
+Return the next matching command on the specified LUN with the sense key
+ABORTED COMMAND (0x0b), and the ASC/ASCQ 0x45,0x00 ("Select or reselect
+failure").
+.It mediumerr
+Return the next matching command on the specified LUN with the sense key
+MEDIUM ERROR (0x03) and the ASC/ASCQ 0x11,0x00 ("Unrecovered read error") for
+reads, or ASC/ASCQ 0x0c,0x02 ("Write error - auto reallocation failed")
+for write errors.
+.It ua
+Return the next matching command on the specified LUN with the sense key
+UNIT ATTENTION (0x06) and the ASC/ASCQ 0x29,0x00 ("POWER ON, RESET, OR BUS
+DEVICE RESET OCCURRED").
+.It custom
+Return the next matching command on the specified LUN with the supplied
+sense data.
+The
+.Fl s
+argument must be specified.
+.El
+.It Fl p Ar pattern
+Specify which commands should be returned with the given error.
+.Bl -tag -width 10n
+.It read
+The error should apply to READ(6), READ(10), READ(12), READ(16), etc.
+.It write
+The error should apply to WRITE(6), WRITE(10), WRITE(12), WRITE(16), WRITE
+AND VERIFY(10), etc.
+.It rw
+The error should apply to both read and write type commands.
+.It readcap
+The error should apply to READ CAPACITY(10) and READ CAPACITY(16) commands.
+.It tur
+The error should apply to TEST UNIT READY commands.
+.It any
+The error should apply to any command.
+.El
+.It Fl r Ar lba,len
+Specify the starting lba and length of the range of LBAs which should
+trigger an error.
+This option is only applies when read and/or write patterns are specified.
+If used with other command types, the error will never be triggered.
+.It Fl s Ar len fmt Op Ar args
+Specify the sense data that is to be returned for custom actions.
+If the format is
+.Sq - ,
+len bytes of sense data will be read from standard input and written to the
+sense buffer.
+If len is longer than 252 bytes (the maximum allowable
+.Tn SCSI
+sense data length), it will be truncated to that length.
+The sense data format is described in
+.Xr cam_cdparse 3 .
+.It Fl c
+The error injection should be persistent, instead of happening once.
+Persistent errors must be deleted with the
+.Fl d
+argument.
+.It Fl d Ar delete_id
+Delete the specified error injection serial number.
+The serial number is returned when the error is injected.
+.El
+.It Ic port
+Perform one of several CTL frontend port operations.
+Either get a list of frontend ports
+.Pq Fl l ,
+turn one or more frontends on
+or off
+.Pq Fl o Ar on|off ,
+or set the World Wide Node Name
+.Pq Fl w Ar wwnn
+or World Wide Port Name
+.Pq Fl W Ar wwpn
+for a given port.
+One of
+.Fl l ,
+.Fl o ,
+or
+.Fl w
+or
+.Fl W
+must be specified.
+The WWNN and WWPN may both be specified at the same time, but cannot be
+combined with enabling/disabling or listing ports.
+.Bl -tag -width 12n
+.It Fl l
+List all CTL frontend ports or a specific port type or number.
+.It Fl o Ar on|off
+Turn the specified CTL frontend ports off or on.
+If no port number or port type is specified, all ports are turned on or
+off.
+.It Fl p Ar targ_port
+Specify the frontend port number.
+The port numbers can be found in the frontend port list.
+.It Fl q
+Omit the header in the port list output.
+.It Fl t Ar fe_type
+Specify the frontend type.
+Currently defined port types are
+.Dq fc
+(Fibre Channel),
+.Dq scsi
+(Parallel SCSI),
+.Dq ioctl
+(CTL ioctl interface),
+and
+.Dq internal
+(CTL CAM SIM).
+.It Fl w Ar wwnn
+Set the World Wide Node Name for the given port.
+The
+.Fl n
+argument must be specified, since this is only possible to implement on a
+single port.
+As a general rule, the WWNN should be the same across all ports on the
+system.
+.It Fl W Ar wwpn
+Set the World Wide Node Name for the given port.
+The
+.Fl n
+argument must be specified, since this is only possible to implement on a
+single port.
+As a general rule, the WWPN must be different for every port in the system.
+.It Fl x
+Output the port list in XML format.
+.El
+.It Ic dumpooa
+Dump the OOA (Order Of Arrival) queue for each LUN registered with CTL.
+.It Ic dumpstructs
+Dump the CTL structures to the console.
+.It Ic create
+Create a new LUN.
+The backend must be specified, and depending upon the backend requested,
+some of the other options may be required.
+If the LUN is created successfully, the LUN configuration will be
+displayed.
+If LUN creation fails, a message will be displayed describing the failure.
+.Bl -tag -width 14n
+.It Fl b Ar backend
+The
+.Fl b
+flag is required.
+This specifies the name backend to use when creating the LUN.
+Examples are
+.Dq ramdisk
+and
+.Dq block .
+.It Fl B Ar blocksize
+Specify the blocksize of the backend in bytes.
+.It Fl d Ar device_id
+Specify the LUN-associated string to use in the
+.Tn SCSI
+INQUIRY VPD page 0x83 data.
+.It Fl l Ar lun_id
+Request that a particular LUN number be assigned.
+If the requested LUN number is not available, the request will fail.
+.It Fl o Ar name=value
+Specify a backend-specific name/value pair.
+Multiple
+.Fl o
+arguments may be specified.
+Refer to the backend documentation for arguments that may be used.
+.It Fl s Ar size_bytes
+Specify the size of the LUN in bytes.
+Some backends may allow setting the size (e.g. the ramdisk backend) and for
+others the size may be implicit (e.g. the block backend).
+.It Fl S Ar serial_num
+Specify the serial number to be used in the
+.Tn SCSI
+INQUIRY VPD page 0x80 data.
+.It Fl t Ar device_type
+Specify the numeric SCSI device type to use when creating the LUN.
+For example, the Direct Access type is 0.
+If this flag is not used, the type of LUN created is backend-specific.
+Not all LUN types are supported.
+Currently CTL only supports Direct Access (type 0) and Processor (type 3)
+LUNs.
+The backend requested may or may not support all of the LUN types that CTL
+supports.
+.El
+.It Ic remove
+Remove a LUN.
+The backend must be specified, and the LUN number must also be specified.
+Backend-specific options may also be specified with the
+.Fl o
+flag.
+.Bl -tag -width 14n
+.It Fl b Ar backend
+Specify the backend that owns the LUN to be removed.
+Examples are
+.Dq ramdisk
+and
+.Dq block .
+.It Fl l Ar lun_id
+Specify the LUN number to remove.
+.It Fl o Ar name=value
+Specify a backend-specific name/value pair.
+Multiple
+.Fl o
+arguments may be specified.
+Refer to the backend documentation for arguments that may be used.
+.El
+.It Ic devlist
+Get a list of all configured LUNs.
+This also includes the LUN size and blocksize, serial number and device ID.
+.Bl -tag -width 11n
+.It Fl b Ar backend
+Specify the backend.
+This restricts the LUN list to the named backend.
+Examples are
+.Dq ramdisk
+and
+.Dq block .
+.It Fl v
+Be verbose.
+This will also display any backend-specific LUN attributes in addition to
+the standard per-LUN information.
+.It Fl x
+Dump the raw XML.
+The LUN list information from the kernel comes in XML format, and this
+option allows the display of the raw XML data.
+This option and the
+.Fl v
+and
+.Fl b
+options are mutually exclusive.
+If you specify
+.Fl x ,
+the entire LUN database is displayed in XML format.
+.El
+.It Ic help
+Display
+.Nm
+usage information.
+.El
+.Sh EXAMPLES
+.Dl ctladm tur 0:1
+.Pp
+Send a
+.Tn SCSI
+TEST UNIT READY command to LUN 1.
+.Pp
+.Dl ctladm modesense 0:1 -l
+.Pp
+Display the list of mode pages supported by LUN 1.
+.Pp
+.Dl ctladm modesense 0:0 -m 10 -P 3 -d -c 10
+.Pp
+Display the saved version of the Control mode page (page 10) on LUN 0.
+Disable fetching block descriptors, and use a 10 byte MODE SENSE command
+instead of the default 6 byte command.
+.Pp
+.Bd -literal
+ctladm read 0:2 -l 0 -d 1 -b 512 -f - > foo
+.Ed
+.Pp
+Read the first 512 byte block from LUN 2 and dump it to the file
+.Pa foo .
+.Bd -literal
+ctladm write 0:3 -l 0xff432140 -d 20 -b 512 -f /tmp/bar
+.Ed
+.Pp
+Read 10240 bytes from the file
+.Pa /tmp/bar
+and write it to target 0, LUN 3.
+starting at LBA 0xff432140.
+.Pp
+.Dl ctladm create -b ramdisk -s 10485760000000000
+.Pp
+Create a LUN with the
+.Dq fake
+ramdisk as a backing store.
+The LUN will claim to have a size of approximately 10 terabytes.
+.Pp
+.Dl ctladm create -b block -o file=src/usr.sbin/ctladm/ctladm.8
+.Pp
+Create a LUN using the block backend, and specify the file
+.Pa src/usr.sbin/ctladm/ctladm.8
+as the backing store.
+The size of the LUN will be derived from the size of the file.
+.Pp
+.Dl ctladm create -b block -o file=src/usr.sbin/ctladm/ctladm.8 -S MYSERIAL321 -d MYDEVID123
+.Pp
+Create a LUN using the block backend, specify the file
+.Pa src/usr.sbin/ctladm/ctladm.8
+as the backing store, and specify the
+.Tn SCSI
+VPD page 0x80 and 0x83 serial number (
+.Fl S)
+and device ID (
+.Fl d).
+.Pp
+.Dl ctladm remove -b block -l 12
+.Pp
+Remove LUN 12, which is handled by the block backend, from the system.
+.Pp
+.Dl ctladm devlist
+.Pp
+List configured LUNs in the system, along with their backend and serial
+number.
+This works when the Front End Target Drivers are enabled or disabled.
+.Pp
+.Dl ctladm lunlist
+.Pp
+List all LUNs in the system, along with their inquiry data and device type.
+This only works when the FETDs are enabled, since the commands go through the
+ioctl port.
+.Pp
+.Dl ctladm inject 0:6 -i mediumerr -p read -r 0,512 -c
+.Pp
+Inject a medium error on LUN 6 for every read that covers the first 512
+blocks of the LUN.
+.Pp
+.Bd -literal -offset indent
+ctladm inject 0:6 -i custom -p tur -s 18 "f0 0 02 s12 04 02"
+.Ed
+.Pp
+Inject a custom error on LUN 6 for the next TEST UNIT READY command only.
+This will result in a sense key of NOT READY (0x02), and an ASC/ASCQ of
+0x04,0x02 ("Logical unit not ready, initializing command required").
+.Sh SEE ALSO
+.Xr cam 3 ,
+.Xr cam_cdbparse 3 ,
+.Xr cam 4 ,
+.Xr xpt 4 ,
+.Xr camcontrol 8
+.Sh HISTORY
+The
+.Nm
+utility was originally written during the Winter/Spring of 2003 as an
+interface to CTL.
+.Sh AUTHORS
+.An Ken Merry Aq ken@FreeBSD.org
diff --git a/usr.sbin/ctladm/ctladm.c b/usr.sbin/ctladm/ctladm.c
new file mode 100644
index 0000000..77e7b02
--- /dev/null
+++ b/usr.sbin/ctladm/ctladm.c
@@ -0,0 +1,4005 @@
+/*-
+ * Copyright (c) 2003, 2004 Silicon Graphics International Corp.
+ * Copyright (c) 1997-2007 Kenneth D. Merry
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/users/kenm/FreeBSD-test2/usr.sbin/ctladm/ctladm.c#4 $
+ */
+/*
+ * CAM Target Layer exercise program.
+ *
+ * Author: Ken Merry <ken@FreeBSD.org>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/callout.h>
+#include <sys/sbuf.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <string.h>
+#include <errno.h>
+#include <err.h>
+#include <ctype.h>
+#include <bsdxml.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl_frontend_internal.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_backend_block.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_scsi_all.h>
+#include <camlib.h>
+#include "ctladm.h"
+
+#ifdef min
+#undef min
+#endif
+#define min(x,y) (x < y) ? x : y
+
+typedef enum {
+ CTLADM_CMD_TUR,
+ CTLADM_CMD_INQUIRY,
+ CTLADM_CMD_REQ_SENSE,
+ CTLADM_CMD_ARRAYLIST,
+ CTLADM_CMD_REPORT_LUNS,
+ CTLADM_CMD_HELP,
+ CTLADM_CMD_DEVLIST,
+ CTLADM_CMD_ADDDEV,
+ CTLADM_CMD_RM,
+ CTLADM_CMD_CREATE,
+ CTLADM_CMD_READ,
+ CTLADM_CMD_WRITE,
+ CTLADM_CMD_PORT,
+ CTLADM_CMD_READCAPACITY,
+ CTLADM_CMD_MODESENSE,
+ CTLADM_CMD_DUMPOOA,
+ CTLADM_CMD_DUMPSTRUCTS,
+ CTLADM_CMD_START,
+ CTLADM_CMD_STOP,
+ CTLADM_CMD_SYNC_CACHE,
+ CTLADM_CMD_SHUTDOWN,
+ CTLADM_CMD_STARTUP,
+ CTLADM_CMD_LUNLIST,
+ CTLADM_CMD_HARDSTOP,
+ CTLADM_CMD_HARDSTART,
+ CTLADM_CMD_DELAY,
+ CTLADM_CMD_REALSYNC,
+ CTLADM_CMD_SETSYNC,
+ CTLADM_CMD_GETSYNC,
+ CTLADM_CMD_ERR_INJECT,
+ CTLADM_CMD_BBRREAD,
+ CTLADM_CMD_PRES_IN,
+ CTLADM_CMD_PRES_OUT,
+ CTLADM_CMD_INQ_VPD_DEVID,
+ CTLADM_CMD_RTPG
+} ctladm_cmdfunction;
+
+typedef enum {
+ CTLADM_ARG_NONE = 0x0000000,
+ CTLADM_ARG_AUTOSENSE = 0x0000001,
+ CTLADM_ARG_DEVICE = 0x0000002,
+ CTLADM_ARG_ARRAYSIZE = 0x0000004,
+ CTLADM_ARG_BACKEND = 0x0000008,
+ CTLADM_ARG_CDBSIZE = 0x0000010,
+ CTLADM_ARG_DATALEN = 0x0000020,
+ CTLADM_ARG_FILENAME = 0x0000040,
+ CTLADM_ARG_LBA = 0x0000080,
+ CTLADM_ARG_PC = 0x0000100,
+ CTLADM_ARG_PAGE_CODE = 0x0000200,
+ CTLADM_ARG_PAGE_LIST = 0x0000400,
+ CTLADM_ARG_SUBPAGE = 0x0000800,
+ CTLADM_ARG_PAGELIST = 0x0001000,
+ CTLADM_ARG_DBD = 0x0002000,
+ CTLADM_ARG_TARG_LUN = 0x0004000,
+ CTLADM_ARG_BLOCKSIZE = 0x0008000,
+ CTLADM_ARG_IMMED = 0x0010000,
+ CTLADM_ARG_RELADR = 0x0020000,
+ CTLADM_ARG_RETRIES = 0x0040000,
+ CTLADM_ARG_ONOFFLINE = 0x0080000,
+ CTLADM_ARG_ONESHOT = 0x0100000,
+ CTLADM_ARG_TIMEOUT = 0x0200000,
+ CTLADM_ARG_INITIATOR = 0x0400000,
+ CTLADM_ARG_NOCOPY = 0x0800000,
+ CTLADM_ARG_NEED_TL = 0x1000000
+} ctladm_cmdargs;
+
+struct ctladm_opts {
+ const char *optname;
+ uint32_t cmdnum;
+ ctladm_cmdargs argnum;
+ const char *subopt;
+};
+
+typedef enum {
+ CC_OR_NOT_FOUND,
+ CC_OR_AMBIGUOUS,
+ CC_OR_FOUND
+} ctladm_optret;
+
+static const char rw_opts[] = "Nb:c:d:f:l:";
+static const char startstop_opts[] = "io";
+
+struct ctladm_opts option_table[] = {
+ {"adddev", CTLADM_CMD_ADDDEV, CTLADM_ARG_NONE, NULL},
+ {"bbrread", CTLADM_CMD_BBRREAD, CTLADM_ARG_NEED_TL, "d:l:"},
+ {"create", CTLADM_CMD_CREATE, CTLADM_ARG_NONE, "b:B:d:l:o:s:S:t:"},
+ {"delay", CTLADM_CMD_DELAY, CTLADM_ARG_NEED_TL, "T:l:t:"},
+ {"devid", CTLADM_CMD_INQ_VPD_DEVID, CTLADM_ARG_NEED_TL, NULL},
+ {"devlist", CTLADM_CMD_DEVLIST, CTLADM_ARG_NONE, "b:vx"},
+ {"dumpooa", CTLADM_CMD_DUMPOOA, CTLADM_ARG_NONE, NULL},
+ {"dumpstructs", CTLADM_CMD_DUMPSTRUCTS, CTLADM_ARG_NONE, NULL},
+ {"getsync", CTLADM_CMD_GETSYNC, CTLADM_ARG_NEED_TL, NULL},
+ {"hardstart", CTLADM_CMD_HARDSTART, CTLADM_ARG_NONE, NULL},
+ {"hardstop", CTLADM_CMD_HARDSTOP, CTLADM_ARG_NONE, NULL},
+ {"help", CTLADM_CMD_HELP, CTLADM_ARG_NONE, NULL},
+ {"inject", CTLADM_CMD_ERR_INJECT, CTLADM_ARG_NEED_TL, "cd:i:p:r:s:"},
+ {"inquiry", CTLADM_CMD_INQUIRY, CTLADM_ARG_NEED_TL, NULL},
+ {"lunlist", CTLADM_CMD_LUNLIST, CTLADM_ARG_NONE, NULL},
+ {"modesense", CTLADM_CMD_MODESENSE, CTLADM_ARG_NEED_TL, "P:S:dlm:c:"},
+ {"port", CTLADM_CMD_PORT, CTLADM_ARG_NONE, "lo:p:qt:w:W:x"},
+ {"prin", CTLADM_CMD_PRES_IN, CTLADM_ARG_NEED_TL, "a:"},
+ {"prout", CTLADM_CMD_PRES_OUT, CTLADM_ARG_NEED_TL, "a:k:r:s:"},
+ {"read", CTLADM_CMD_READ, CTLADM_ARG_NEED_TL, rw_opts},
+ {"readcapacity", CTLADM_CMD_READCAPACITY, CTLADM_ARG_NEED_TL, "c:"},
+ {"realsync", CTLADM_CMD_REALSYNC, CTLADM_ARG_NONE, NULL},
+ {"remove", CTLADM_CMD_RM, CTLADM_ARG_NONE, "b:l:o:"},
+ {"reportluns", CTLADM_CMD_REPORT_LUNS, CTLADM_ARG_NEED_TL, NULL},
+ {"reqsense", CTLADM_CMD_REQ_SENSE, CTLADM_ARG_NEED_TL, NULL},
+ {"rtpg", CTLADM_CMD_RTPG, CTLADM_ARG_NEED_TL, NULL},
+ {"setsync", CTLADM_CMD_SETSYNC, CTLADM_ARG_NEED_TL, "i:"},
+ {"shutdown", CTLADM_CMD_SHUTDOWN, CTLADM_ARG_NONE, NULL},
+ {"start", CTLADM_CMD_START, CTLADM_ARG_NEED_TL, startstop_opts},
+ {"startup", CTLADM_CMD_STARTUP, CTLADM_ARG_NONE, NULL},
+ {"stop", CTLADM_CMD_STOP, CTLADM_ARG_NEED_TL, startstop_opts},
+ {"synccache", CTLADM_CMD_SYNC_CACHE, CTLADM_ARG_NEED_TL, "b:c:il:r"},
+ {"tur", CTLADM_CMD_TUR, CTLADM_ARG_NEED_TL, NULL},
+ {"write", CTLADM_CMD_WRITE, CTLADM_ARG_NEED_TL, rw_opts},
+ {"-?", CTLADM_CMD_HELP, CTLADM_ARG_NONE, NULL},
+ {"-h", CTLADM_CMD_HELP, CTLADM_ARG_NONE, NULL},
+ {NULL, 0, 0, NULL}
+};
+
+
+ctladm_optret getoption(struct ctladm_opts *table, char *arg, uint32_t *cmdnum,
+ ctladm_cmdargs *argnum, const char **subopt);
+static int cctl_parse_tl(char *str, int *target, int *lun);
+static int cctl_dump_ooa(int fd, int argc, char **argv);
+static int cctl_port_dump(int fd, int quiet, int xml, int32_t fe_num,
+ ctl_port_type port_type);
+static int cctl_port(int fd, int argc, char **argv, char *combinedopt);
+static int cctl_do_io(int fd, int retries, union ctl_io *io, const char *func);
+static int cctl_delay(int fd, int target, int lun, int argc, char **argv,
+ char *combinedopt);
+static int cctl_lunlist(int fd);
+static void cctl_cfi_mt_statusstr(cfi_mt_status status, char *str, int str_len);
+static void cctl_cfi_bbr_statusstr(cfi_bbrread_status, char *str, int str_len);
+static int cctl_hardstopstart(int fd, ctladm_cmdfunction command);
+static int cctl_bbrread(int fd, int target, int lun, int iid, int argc,
+ char **argv, char *combinedopt);
+static int cctl_startup_shutdown(int fd, int target, int lun, int iid,
+ ctladm_cmdfunction command);
+static int cctl_sync_cache(int fd, int target, int lun, int iid, int retries,
+ int argc, char **argv, char *combinedopt);
+static int cctl_start_stop(int fd, int target, int lun, int iid, int retries,
+ int start, int argc, char **argv, char *combinedopt);
+static int cctl_mode_sense(int fd, int target, int lun, int iid, int retries,
+ int argc, char **argv, char *combinedopt);
+static int cctl_read_capacity(int fd, int target, int lun, int iid,
+ int retries, int argc, char **argv,
+ char *combinedopt);
+static int cctl_read_write(int fd, int target, int lun, int iid, int retries,
+ int argc, char **argv, char *combinedopt,
+ ctladm_cmdfunction command);
+static int cctl_get_luns(int fd, int target, int lun, int iid, int retries,
+ struct scsi_report_luns_data **lun_data,
+ uint32_t *num_luns);
+static int cctl_report_luns(int fd, int target, int lun, int iid, int retries);
+static int cctl_tur(int fd, int target, int lun, int iid, int retries);
+static int cctl_get_inquiry(int fd, int target, int lun, int iid, int retries,
+ char *path_str, int path_len,
+ struct scsi_inquiry_data *inq_data);
+static int cctl_inquiry(int fd, int target, int lun, int iid, int retries);
+static int cctl_req_sense(int fd, int target, int lun, int iid, int retries);
+static int cctl_persistent_reserve_in(int fd, int target, int lun,
+ int initiator, int argc, char **argv,
+ char *combinedopt, int retry_count);
+static int cctl_persistent_reserve_out(int fd, int target, int lun,
+ int initiator, int argc, char **argv,
+ char *combinedopt, int retry_count);
+static int cctl_create_lun(int fd, int argc, char **argv, char *combinedopt);
+static int cctl_inquiry_vpd_devid(int fd, int target, int lun, int initiator);
+static int cctl_report_target_port_group(int fd, int target, int lun,
+ int initiator);
+
+ctladm_optret
+getoption(struct ctladm_opts *table, char *arg, uint32_t *cmdnum,
+ ctladm_cmdargs *argnum, const char **subopt)
+{
+ struct ctladm_opts *opts;
+ int num_matches = 0;
+
+ for (opts = table; (opts != NULL) && (opts->optname != NULL);
+ opts++) {
+ if (strncmp(opts->optname, arg, strlen(arg)) == 0) {
+ *cmdnum = opts->cmdnum;
+ *argnum = opts->argnum;
+ *subopt = opts->subopt;
+
+ if (strcmp(opts->optname, arg) == 0)
+ return (CC_OR_FOUND);
+
+ if (++num_matches > 1)
+ return(CC_OR_AMBIGUOUS);
+ }
+ }
+
+ if (num_matches > 0)
+ return(CC_OR_FOUND);
+ else
+ return(CC_OR_NOT_FOUND);
+}
+
+
+static int
+cctl_parse_tl(char *str, int *target, int *lun)
+{
+ char *tmpstr;
+ int retval;
+
+ retval = 0;
+
+ while (isspace(*str) && (*str != '\0'))
+ str++;
+
+ tmpstr = (char *)strtok(str, ":");
+ if ((tmpstr != NULL) && (*tmpstr != '\0')) {
+ *target = strtol(tmpstr, NULL, 0);
+ tmpstr = (char *)strtok(NULL, ":");
+ if ((tmpstr != NULL) && (*tmpstr != '\0')) {
+ *lun = strtol(tmpstr, NULL, 0);
+ } else
+ retval = -1;
+ } else
+ retval = -1;
+
+ return (retval);
+}
+
+static int
+cctl_dump_ooa(int fd, int argc, char **argv)
+{
+ struct ctl_ooa ooa;
+ long double cmd_latency;
+ int num_entries, len;
+ int target = -1, lun = -1;
+ int retval;
+ unsigned int i;
+
+ num_entries = 104;
+
+ if ((argc > 2)
+ && (isdigit(argv[2][0]))) {
+ retval = cctl_parse_tl(argv[2], &target, &lun);
+ if (retval != 0)
+ warnx("invalid target:lun argument %s", argv[2]);
+ }
+retry:
+
+ len = num_entries * sizeof(struct ctl_ooa_entry);
+
+ bzero(&ooa, sizeof(ooa));
+
+ ooa.entries = malloc(len);
+
+ if (ooa.entries == NULL) {
+ warn("%s: error mallocing %d bytes", __func__, len);
+ return (1);
+ }
+
+ if (argc > 2) {
+ ooa.lun_num = lun;
+ } else
+ ooa.flags |= CTL_OOA_FLAG_ALL_LUNS;
+
+ ooa.alloc_len = len;
+ ooa.alloc_num = num_entries;
+ if (ioctl(fd, CTL_GET_OOA, &ooa) == -1) {
+ warn("%s: CTL_GET_OOA ioctl failed", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (ooa.status == CTL_OOA_NEED_MORE_SPACE) {
+ num_entries = num_entries * 2;
+ free(ooa.entries);
+ ooa.entries = NULL;
+ goto retry;
+ }
+
+ if (ooa.status != CTL_OOA_OK) {
+ warnx("%s: CTL_GET_OOA ioctl returned error %d", __func__,
+ ooa.status);
+ retval = 1;
+ goto bailout;
+ }
+
+ fprintf(stdout, "Dumping OOA queues\n");
+ for (i = 0; i < ooa.fill_num; i++) {
+ struct ctl_ooa_entry *entry;
+ char cdb_str[(SCSI_MAX_CDBLEN * 3) +1];
+ struct bintime delta_bt;
+ struct timespec ts;
+
+ entry = &ooa.entries[i];
+
+ delta_bt = ooa.cur_bt;
+ bintime_sub(&delta_bt, &entry->start_bt);
+ bintime2timespec(&delta_bt, &ts);
+ cmd_latency = ts.tv_sec * 1000;
+ if (ts.tv_nsec > 0)
+ cmd_latency += ts.tv_nsec / 1000000;
+
+ fprintf(stdout, "LUN %jd tag 0x%04x%s%s%s%s%s: %s. CDB: %s "
+ "(%0.0Lf ms)\n",
+ (intmax_t)entry->lun_num, entry->tag_num,
+ (entry->cmd_flags & CTL_OOACMD_FLAG_BLOCKED) ?
+ " BLOCKED" : "",
+ (entry->cmd_flags & CTL_OOACMD_FLAG_DMA) ? " DMA" : "",
+ (entry->cmd_flags & CTL_OOACMD_FLAG_DMA_QUEUED) ?
+ " DMAQUEUED" : "",
+ (entry->cmd_flags & CTL_OOACMD_FLAG_ABORT) ?
+ " ABORT" : "",
+ (entry->cmd_flags & CTL_OOACMD_FLAG_RTR) ? " RTR" :"",
+ scsi_op_desc(entry->cdb[0], NULL),
+ scsi_cdb_string(entry->cdb, cdb_str, sizeof(cdb_str)),
+ cmd_latency);
+ }
+ fprintf(stdout, "OOA queues dump done\n");
+#if 0
+ if (ioctl(fd, CTL_DUMP_OOA) == -1) {
+ warn("%s: CTL_DUMP_OOA ioctl failed", __func__);
+ return (1);
+ }
+#endif
+
+bailout:
+ free(ooa.entries);
+
+ return (0);
+}
+
+static int
+cctl_dump_structs(int fd, ctladm_cmdargs cmdargs __unused)
+{
+ if (ioctl(fd, CTL_DUMP_STRUCTS) == -1) {
+ warn(__func__);
+ return (1);
+ }
+ return (0);
+}
+
+static int
+cctl_port_dump(int fd, int quiet, int xml, int32_t targ_port,
+ ctl_port_type port_type)
+{
+ struct ctl_port_list port_list;
+ struct ctl_port_entry *entries;
+ struct sbuf *sb = NULL;
+ int num_entries;
+ int did_print = 0;
+ unsigned int i;
+
+ num_entries = 16;
+
+retry:
+
+ entries = malloc(sizeof(*entries) * num_entries);
+ bzero(&port_list, sizeof(port_list));
+ port_list.entries = entries;
+ port_list.alloc_num = num_entries;
+ port_list.alloc_len = num_entries * sizeof(*entries);
+ if (ioctl(fd, CTL_GET_PORT_LIST, &port_list) != 0) {
+ warn("%s: CTL_GET_PORT_LIST ioctl failed", __func__);
+ return (1);
+ }
+ if (port_list.status == CTL_PORT_LIST_NEED_MORE_SPACE) {
+ printf("%s: allocated %d, need %d, retrying\n", __func__,
+ num_entries, port_list.fill_num + port_list.dropped_num);
+ free(entries);
+ num_entries = port_list.fill_num + port_list.dropped_num;
+ goto retry;
+ }
+
+ if ((quiet == 0)
+ && (xml == 0))
+ printf("Port Online Type Name pp vp %-18s %-18s\n",
+ "WWNN", "WWPN");
+
+ if (xml != 0) {
+ sb = sbuf_new_auto();
+ sbuf_printf(sb, "<ctlfelist>\n");
+ }
+ for (i = 0; i < port_list.fill_num; i++) {
+ struct ctl_port_entry *entry;
+ const char *type;
+
+ entry = &entries[i];
+
+ switch (entry->port_type) {
+ case CTL_PORT_FC:
+ type = "FC";
+ break;
+ case CTL_PORT_SCSI:
+ type = "SCSI";
+ break;
+ case CTL_PORT_IOCTL:
+ type = "IOCTL";
+ break;
+ case CTL_PORT_INTERNAL:
+ type = "INTERNAL";
+ break;
+ case CTL_PORT_ISC:
+ type = "ISC";
+ break;
+ default:
+ type = "UNKNOWN";
+ break;
+ }
+
+ /*
+ * If the user specified a frontend number or a particular
+ * frontend type, only print out that particular frontend
+ * or frontend type.
+ */
+ if ((targ_port != -1)
+ && (targ_port != entry->targ_port))
+ continue;
+ else if ((port_type != CTL_PORT_NONE)
+ && ((port_type & entry->port_type) == 0))
+ continue;
+
+ did_print = 1;
+
+#if 0
+ printf("Num: %ju Type: %s (%#x) Name: %s Physical Port: %d "
+ "Virtual Port: %d\n", (uintmax_t)entry->fe_num, type,
+ entry->port_type, entry->fe_name, entry->physical_port,
+ entry->virtual_port);
+ printf("WWNN %#jx WWPN %#jx Online: %s\n",
+ (uintmax_t)entry->wwnn, (uintmax_t)entry->wwpn,
+ (entry->online) ? "YES" : "NO" );
+#endif
+ if (xml == 0) {
+ printf("%-4d %-6s %-8s %-12s %-2d %-2d %#-18jx "
+ "%#-18jx\n",
+ entry->targ_port, (entry->online) ? "YES" : "NO",
+ type, entry->port_name, entry->physical_port,
+ entry->virtual_port, (uintmax_t)entry->wwnn,
+ (uintmax_t)entry->wwpn);
+ } else {
+ sbuf_printf(sb, "<targ_port id=\"%d\">\n",
+ entry->targ_port);
+ sbuf_printf(sb, "<online>%s</online>\n",
+ (entry->online) ? "YES" : "NO");
+ sbuf_printf(sb, "<port_type>%s</port_type>\n", type);
+ sbuf_printf(sb, "<port_name>%s</port_name>\n",
+ entry->port_name);
+ sbuf_printf(sb, "<physical_port>%d</physical_port>\n",
+ entry->physical_port);
+ sbuf_printf(sb, "<virtual_port>%d</virtual_port>\n",
+ entry->virtual_port);
+ sbuf_printf(sb, "<wwnn>%#jx</wwnn>\n",
+ (uintmax_t)entry->wwnn);
+ sbuf_printf(sb, "<wwpn>%#jx</wwpn>\n",
+ (uintmax_t)entry->wwpn);
+ sbuf_printf(sb, "</targ_port>\n");
+ }
+
+ }
+ if (xml != 0) {
+ sbuf_printf(sb, "</ctlfelist>\n");
+ sbuf_finish(sb);
+ printf("%s", sbuf_data(sb));
+ sbuf_delete(sb);
+ }
+
+ /*
+ * Give some indication that we didn't find the frontend or
+ * frontend type requested by the user. We could print something
+ * out, but it would probably be better to hide that behind a
+ * verbose flag.
+ */
+ if ((did_print == 0)
+ && ((targ_port != -1)
+ || (port_type != CTL_PORT_NONE)))
+ return (1);
+ else
+ return (0);
+}
+
+typedef enum {
+ CCTL_PORT_MODE_NONE,
+ CCTL_PORT_MODE_LIST,
+ CCTL_PORT_MODE_SET,
+ CCTL_PORT_MODE_ON,
+ CCTL_PORT_MODE_OFF
+} cctl_port_mode;
+
+struct ctladm_opts cctl_fe_table[] = {
+ {"fc", CTL_PORT_FC, CTLADM_ARG_NONE, NULL},
+ {"scsi", CTL_PORT_SCSI, CTLADM_ARG_NONE, NULL},
+ {"internal", CTL_PORT_INTERNAL, CTLADM_ARG_NONE, NULL},
+ {"all", CTL_PORT_ALL, CTLADM_ARG_NONE, NULL},
+ {NULL, 0, 0, NULL}
+};
+
+static int
+cctl_port(int fd, int argc, char **argv, char *combinedopt)
+{
+ int c;
+ int32_t targ_port = -1;
+ int retval = 0;
+ int wwnn_set = 0, wwpn_set = 0;
+ uint64_t wwnn = 0, wwpn = 0;
+ cctl_port_mode port_mode = CCTL_PORT_MODE_NONE;
+ struct ctl_port_entry entry;
+ ctl_port_type port_type = CTL_PORT_NONE;
+ int quiet = 0, xml = 0;
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'l':
+ if (port_mode != CCTL_PORT_MODE_NONE)
+ goto bailout_badarg;
+
+ port_mode = CCTL_PORT_MODE_LIST;
+ break;
+ case 'o':
+ if (port_mode != CCTL_PORT_MODE_NONE)
+ goto bailout_badarg;
+
+ if (strcasecmp(optarg, "on") == 0)
+ port_mode = CCTL_PORT_MODE_ON;
+ else if (strcasecmp(optarg, "off") == 0)
+ port_mode = CCTL_PORT_MODE_OFF;
+ else {
+ warnx("Invalid -o argument %s, \"on\" or "
+ "\"off\" are the only valid args",
+ optarg);
+ retval = 1;
+ goto bailout;
+ }
+ break;
+ case 'p':
+ targ_port = strtol(optarg, NULL, 0);
+ break;
+ case 'q':
+ quiet = 1;
+ break;
+ case 't': {
+ ctladm_optret optret;
+ ctladm_cmdargs argnum;
+ const char *subopt;
+ ctl_port_type tmp_port_type;
+
+ optret = getoption(cctl_fe_table, optarg, &tmp_port_type,
+ &argnum, &subopt);
+ if (optret == CC_OR_AMBIGUOUS) {
+ warnx("%s: ambiguous frontend type %s",
+ __func__, optarg);
+ retval = 1;
+ goto bailout;
+ } else if (optret == CC_OR_NOT_FOUND) {
+ warnx("%s: invalid frontend type %s",
+ __func__, optarg);
+ retval = 1;
+ goto bailout;
+ }
+
+ port_type |= tmp_port_type;
+ break;
+ }
+ case 'w':
+ if ((port_mode != CCTL_PORT_MODE_NONE)
+ && (port_mode != CCTL_PORT_MODE_SET))
+ goto bailout_badarg;
+
+ port_mode = CCTL_PORT_MODE_SET;
+
+ wwnn = strtoull(optarg, NULL, 0);
+ wwnn_set = 1;
+ break;
+ case 'W':
+ if ((port_mode != CCTL_PORT_MODE_NONE)
+ && (port_mode != CCTL_PORT_MODE_SET))
+ goto bailout_badarg;
+
+ port_mode = CCTL_PORT_MODE_SET;
+
+ wwpn = strtoull(optarg, NULL, 0);
+ wwpn_set = 1;
+ break;
+ case 'x':
+ xml = 1;
+ break;
+ }
+ }
+
+ /*
+ * The user can specify either one or more frontend types (-t), or
+ * a specific frontend, but not both.
+ *
+ * If the user didn't specify a frontend type or number, set it to
+ * all. This is primarily needed for the enable/disable ioctls.
+ * This will be a no-op for the listing code. For the set ioctl,
+ * we'll throw an error, since that only works on one port at a time.
+ */
+ if ((port_type != CTL_PORT_NONE) && (targ_port != -1)) {
+ warnx("%s: can only specify one of -t or -n", __func__);
+ retval = 1;
+ goto bailout;
+ } else if ((targ_port == -1) && (port_type == CTL_PORT_NONE))
+ port_type = CTL_PORT_ALL;
+
+ bzero(&entry, sizeof(&entry));
+
+ /*
+ * These are needed for all but list/dump mode.
+ */
+ entry.port_type = port_type;
+ entry.targ_port = targ_port;
+
+ switch (port_mode) {
+ case CCTL_PORT_MODE_LIST:
+ cctl_port_dump(fd, quiet, xml, targ_port, port_type);
+ break;
+ case CCTL_PORT_MODE_SET:
+ if (targ_port == -1) {
+ warnx("%s: -w and -W require -n", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (wwnn_set) {
+ entry.flags |= CTL_PORT_WWNN_VALID;
+ entry.wwnn = wwnn;
+ }
+ if (wwpn_set) {
+ entry.flags |= CTL_PORT_WWPN_VALID;
+ entry.wwpn = wwpn;
+ }
+
+ if (ioctl(fd, CTL_SET_PORT_WWNS, &entry) == -1) {
+ warn("%s: CTL_SET_PORT_WWNS ioctl failed", __func__);
+ retval = 1;
+ goto bailout;
+ }
+ break;
+ case CCTL_PORT_MODE_ON:
+ if (ioctl(fd, CTL_ENABLE_PORT, &entry) == -1) {
+ warn("%s: CTL_ENABLE_PORT ioctl failed", __func__);
+ retval = 1;
+ goto bailout;
+ }
+ fprintf(stdout, "Front End Ports enabled\n");
+ break;
+ case CCTL_PORT_MODE_OFF:
+ if (ioctl(fd, CTL_DISABLE_PORT, &entry) == -1) {
+ warn("%s: CTL_DISABLE_PORT ioctl failed", __func__);
+ retval = 1;
+ goto bailout;
+ }
+ fprintf(stdout, "Front End Ports disabled\n");
+ break;
+ default:
+ warnx("%s: one of -l, -o or -w/-W must be specified", __func__);
+ retval = 1;
+ goto bailout;
+ break;
+ }
+
+bailout:
+
+ return (retval);
+
+bailout_badarg:
+ warnx("%s: only one of -l, -o or -w/-W may be specified", __func__);
+ return (1);
+}
+
+static int
+cctl_do_io(int fd, int retries, union ctl_io *io, const char *func)
+{
+ do {
+ if (ioctl(fd, CTL_IO, io) == -1) {
+ warn("%s: error sending CTL_IO ioctl", func);
+ return (-1);
+ }
+ } while (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
+ && (retries-- > 0));
+
+ return (0);
+}
+
+static int
+cctl_delay(int fd, int target, int lun, int argc, char **argv,
+ char *combinedopt)
+{
+ int datamove_delay;
+ struct ctl_io_delay_info delay_info;
+ char *delayloc = NULL;
+ char *delaytype = NULL;
+ int delaytime = -1;
+ int retval;
+ int c;
+
+ retval = 0;
+ datamove_delay = 0;
+
+ memset(&delay_info, 0, sizeof(delay_info));
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'T':
+ delaytype = strdup(optarg);
+ break;
+ case 'l':
+ delayloc = strdup(optarg);
+ break;
+ case 't':
+ delaytime = strtoul(optarg, NULL, 0);
+ break;
+ }
+ }
+
+ if (delaytime == -1) {
+ warnx("%s: you must specify the delaytime with -t", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (strcasecmp(delayloc, "datamove") == 0)
+ delay_info.delay_loc = CTL_DELAY_LOC_DATAMOVE;
+ else if (strcasecmp(delayloc, "done") == 0)
+ delay_info.delay_loc = CTL_DELAY_LOC_DONE;
+ else {
+ warnx("%s: invalid delay location %s", __func__, delayloc);
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((delaytype == NULL)
+ || (strcmp(delaytype, "oneshot") == 0))
+ delay_info.delay_type = CTL_DELAY_TYPE_ONESHOT;
+ else if (strcmp(delaytype, "cont") == 0)
+ delay_info.delay_type = CTL_DELAY_TYPE_CONT;
+ else {
+ warnx("%s: invalid delay type %s", __func__, delaytype);
+ retval = 1;
+ goto bailout;
+ }
+
+ delay_info.target_id = target;
+ delay_info.lun_id = lun;
+ delay_info.delay_secs = delaytime;
+
+ if (ioctl(fd, CTL_DELAY_IO, &delay_info) == -1) {
+ warn("%s: CTL_DELAY_IO ioctl failed", __func__);
+ retval = 1;
+ goto bailout;
+ }
+ switch (delay_info.status) {
+ case CTL_DELAY_STATUS_NONE:
+ warnx("%s: no delay status??", __func__);
+ retval = 1;
+ break;
+ case CTL_DELAY_STATUS_OK:
+ break;
+ case CTL_DELAY_STATUS_INVALID_LUN:
+ warnx("%s: invalid lun %d", __func__, lun);
+ retval = 1;
+ break;
+ case CTL_DELAY_STATUS_INVALID_TYPE:
+ warnx("%s: invalid delay type %d", __func__,
+ delay_info.delay_type);
+ retval = 1;
+ break;
+ case CTL_DELAY_STATUS_INVALID_LOC:
+ warnx("%s: delay location %s not implemented?", __func__,
+ delayloc);
+ retval = 1;
+ break;
+ case CTL_DELAY_STATUS_NOT_IMPLEMENTED:
+ warnx("%s: delay not implemented in the kernel", __func__);
+ warnx("%s: recompile with the CTL_IO_DELAY flag set", __func__);
+ retval = 1;
+ break;
+ default:
+ warnx("%s: unknown delay return status %d", __func__,
+ delay_info.status);
+ retval = 1;
+ break;
+ }
+bailout:
+
+ /* delayloc should never be NULL, but just in case...*/
+ if (delayloc != NULL)
+ free(delayloc);
+
+ return (retval);
+}
+
+static int
+cctl_realsync(int fd, int argc, char **argv)
+{
+ int syncstate;
+ int retval;
+ char *syncarg;
+
+ retval = 0;
+
+ if (argc != 3) {
+ warnx("%s %s takes exactly one argument", argv[0], argv[1]);
+ retval = 1;
+ goto bailout;
+ }
+
+ syncarg = argv[2];
+
+ if (strncasecmp(syncarg, "query", min(strlen(syncarg),
+ strlen("query"))) == 0) {
+ if (ioctl(fd, CTL_REALSYNC_GET, &syncstate) == -1) {
+ warn("%s: CTL_REALSYNC_GET ioctl failed", __func__);
+ retval = 1;
+ goto bailout;
+ }
+ fprintf(stdout, "SYNCHRONIZE CACHE support is: ");
+ switch (syncstate) {
+ case 0:
+ fprintf(stdout, "OFF\n");
+ break;
+ case 1:
+ fprintf(stdout, "ON\n");
+ break;
+ default:
+ fprintf(stdout, "unknown (%d)\n", syncstate);
+ break;
+ }
+ goto bailout;
+ } else if (strcasecmp(syncarg, "on") == 0) {
+ syncstate = 1;
+ } else if (strcasecmp(syncarg, "off") == 0) {
+ syncstate = 0;
+ } else {
+ warnx("%s: invalid realsync argument %s", __func__, syncarg);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (ioctl(fd, CTL_REALSYNC_SET, &syncstate) == -1) {
+ warn("%s: CTL_REALSYNC_SET ioctl failed", __func__);
+ retval = 1;
+ goto bailout;
+ }
+bailout:
+ return (retval);
+}
+
+static int
+cctl_getsetsync(int fd, int target, int lun, ctladm_cmdfunction command,
+ int argc, char **argv, char *combinedopt)
+{
+ struct ctl_sync_info sync_info;
+ uint32_t ioctl_cmd;
+ int sync_interval = -1;
+ int retval;
+ int c;
+
+ retval = 0;
+
+ memset(&sync_info, 0, sizeof(sync_info));
+ sync_info.target_id = target;
+ sync_info.lun_id = lun;
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'i':
+ sync_interval = strtoul(optarg, NULL, 0);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (command == CTLADM_CMD_SETSYNC) {
+ if (sync_interval == -1) {
+ warnx("%s: you must specify the sync interval with -i",
+ __func__);
+ retval = 1;
+ goto bailout;
+ }
+ sync_info.sync_interval = sync_interval;
+ ioctl_cmd = CTL_SETSYNC;
+ } else {
+ ioctl_cmd = CTL_GETSYNC;
+ }
+
+ if (ioctl(fd, ioctl_cmd, &sync_info) == -1) {
+ warn("%s: CTL_%sSYNC ioctl failed", __func__,
+ (command == CTLADM_CMD_SETSYNC) ? "SET" : "GET");
+ retval = 1;
+ goto bailout;
+ }
+
+ switch (sync_info.status) {
+ case CTL_GS_SYNC_OK:
+ if (command == CTLADM_CMD_GETSYNC) {
+ fprintf(stdout, "%d:%d: sync interval: %d\n",
+ target, lun, sync_info.sync_interval);
+ }
+ break;
+ case CTL_GS_SYNC_NO_LUN:
+ warnx("%s: unknown target:LUN %d:%d", __func__, target, lun);
+ retval = 1;
+ break;
+ case CTL_GS_SYNC_NONE:
+ default:
+ warnx("%s: unknown CTL_%sSYNC status %d", __func__,
+ (command == CTLADM_CMD_SETSYNC) ? "SET" : "GET",
+ sync_info.status);
+ retval = 1;
+ break;
+ }
+bailout:
+ return (retval);
+}
+
+struct ctladm_opts cctl_err_types[] = {
+ {"aborted", CTL_LUN_INJ_ABORTED, CTLADM_ARG_NONE, NULL},
+ {"mediumerr", CTL_LUN_INJ_MEDIUM_ERR, CTLADM_ARG_NONE, NULL},
+ {"ua", CTL_LUN_INJ_UA, CTLADM_ARG_NONE, NULL},
+ {"custom", CTL_LUN_INJ_CUSTOM, CTLADM_ARG_NONE, NULL},
+ {NULL, 0, 0, NULL}
+
+};
+
+struct ctladm_opts cctl_err_patterns[] = {
+ {"read", CTL_LUN_PAT_READ, CTLADM_ARG_NONE, NULL},
+ {"write", CTL_LUN_PAT_WRITE, CTLADM_ARG_NONE, NULL},
+ {"rw", CTL_LUN_PAT_READWRITE, CTLADM_ARG_NONE, NULL},
+ {"readwrite", CTL_LUN_PAT_READWRITE, CTLADM_ARG_NONE, NULL},
+ {"readcap", CTL_LUN_PAT_READCAP, CTLADM_ARG_NONE, NULL},
+ {"tur", CTL_LUN_PAT_TUR, CTLADM_ARG_NONE, NULL},
+ {"any", CTL_LUN_PAT_ANY, CTLADM_ARG_NONE, NULL},
+#if 0
+ {"cmd", CTL_LUN_PAT_CMD, CTLADM_ARG_NONE, NULL},
+#endif
+ {NULL, 0, 0, NULL}
+};
+
+static int
+cctl_error_inject(int fd, uint32_t target, uint32_t lun, int argc, char **argv,
+ char *combinedopt)
+{
+ int retval;
+ struct ctl_error_desc err_desc;
+ uint64_t lba = 0;
+ uint32_t len = 0;
+ uint64_t delete_id = 0;
+ int delete_id_set = 0;
+ int continuous = 0;
+ int sense_len = 0;
+ int fd_sense = 0;
+ int c;
+
+ bzero(&err_desc, sizeof(err_desc));
+ err_desc.target_id = target;
+ err_desc.lun_id = lun;
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'c':
+ continuous = 1;
+ break;
+ case 'd':
+ delete_id = strtoull(optarg, NULL, 0);
+ delete_id_set = 1;
+ break;
+ case 'i':
+ case 'p': {
+ ctladm_optret optret;
+ ctladm_cmdargs argnum;
+ const char *subopt;
+
+ if (c == 'i') {
+ ctl_lun_error err_type;
+
+ if (err_desc.lun_error != CTL_LUN_INJ_NONE) {
+ warnx("%s: can't specify multiple -i "
+ "arguments", __func__);
+ retval = 1;
+ goto bailout;
+ }
+ optret = getoption(cctl_err_types, optarg,
+ &err_type, &argnum, &subopt);
+ err_desc.lun_error = err_type;
+ } else {
+ ctl_lun_error_pattern pattern;
+
+ optret = getoption(cctl_err_patterns, optarg,
+ &pattern, &argnum, &subopt);
+ err_desc.error_pattern |= pattern;
+ }
+
+ if (optret == CC_OR_AMBIGUOUS) {
+ warnx("%s: ambiguous argument %s", __func__,
+ optarg);
+ retval = 1;
+ goto bailout;
+ } else if (optret == CC_OR_NOT_FOUND) {
+ warnx("%s: argument %s not found", __func__,
+ optarg);
+ retval = 1;
+ goto bailout;
+ }
+ break;
+ }
+ case 'r': {
+ char *tmpstr, *tmpstr2;
+
+ tmpstr = strdup(optarg);
+ if (tmpstr == NULL) {
+ warn("%s: error duplicating string %s",
+ __func__, optarg);
+ retval = 1;
+ goto bailout;
+ }
+
+ tmpstr2 = strsep(&tmpstr, ",");
+ if (tmpstr2 == NULL) {
+ warnx("%s: invalid -r argument %s", __func__,
+ optarg);
+ retval = 1;
+ free(tmpstr);
+ goto bailout;
+ }
+ lba = strtoull(tmpstr2, NULL, 0);
+ tmpstr2 = strsep(&tmpstr, ",");
+ if (tmpstr2 == NULL) {
+ warnx("%s: no len argument for -r lba,len, got"
+ " %s", __func__, optarg);
+ retval = 1;
+ free(tmpstr);
+ goto bailout;
+ }
+ len = strtoul(tmpstr2, NULL, 0);
+ free(tmpstr);
+ break;
+ }
+ case 's': {
+ struct get_hook hook;
+ char *sensestr;
+
+ sense_len = strtol(optarg, NULL, 0);
+ if (sense_len <= 0) {
+ warnx("invalid number of sense bytes %d",
+ sense_len);
+ retval = 1;
+ goto bailout;
+ }
+
+ sense_len = MIN(sense_len, SSD_FULL_SIZE);
+
+ hook.argc = argc - optind;
+ hook.argv = argv + optind;
+ hook.got = 0;
+
+ sensestr = cget(&hook, NULL);
+ if ((sensestr != NULL)
+ && (sensestr[0] == '-')) {
+ fd_sense = 1;
+ } else {
+ buff_encode_visit(
+ (uint8_t *)&err_desc.custom_sense,
+ sense_len, sensestr, iget, &hook);
+ }
+ optind += hook.got;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ if (delete_id_set != 0) {
+ err_desc.serial = delete_id;
+ if (ioctl(fd, CTL_ERROR_INJECT_DELETE, &err_desc) == -1) {
+ warn("%s: error issuing CTL_ERROR_INJECT_DELETE ioctl",
+ __func__);
+ retval = 1;
+ }
+ goto bailout;
+ }
+
+ if (err_desc.lun_error == CTL_LUN_INJ_NONE) {
+ warnx("%s: error injection command (-i) needed",
+ __func__);
+ retval = 1;
+ goto bailout;
+ } else if ((err_desc.lun_error == CTL_LUN_INJ_CUSTOM)
+ && (sense_len == 0)) {
+ warnx("%s: custom error requires -s", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (continuous != 0)
+ err_desc.lun_error |= CTL_LUN_INJ_CONTINUOUS;
+
+ /*
+ * If fd_sense is set, we need to read the sense data the user
+ * wants returned from stdin.
+ */
+ if (fd_sense == 1) {
+ ssize_t amt_read;
+ int amt_to_read = sense_len;
+ u_int8_t *buf_ptr = (uint8_t *)&err_desc.custom_sense;
+
+ for (amt_read = 0; amt_to_read > 0;
+ amt_read = read(STDIN_FILENO, buf_ptr, amt_to_read)) {
+ if (amt_read == -1) {
+ warn("error reading sense data from stdin");
+ retval = 1;
+ goto bailout;
+ }
+ amt_to_read -= amt_read;
+ buf_ptr += amt_read;
+ }
+ }
+
+ if (err_desc.error_pattern == CTL_LUN_PAT_NONE) {
+ warnx("%s: command pattern (-p) needed", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (len != 0) {
+ err_desc.error_pattern |= CTL_LUN_PAT_RANGE;
+ /*
+ * We could check here to see whether it's a read/write
+ * command, but that will be pointless once we allow
+ * custom patterns. At that point, the user could specify
+ * a READ(6) CDB type, and we wouldn't have an easy way here
+ * to verify whether range checking is possible there. The
+ * user will just figure it out when his error never gets
+ * executed.
+ */
+#if 0
+ if ((err_desc.pattern & CTL_LUN_PAT_READWRITE) == 0) {
+ warnx("%s: need read and/or write pattern if range "
+ "is specified", __func__);
+ retval = 1;
+ goto bailout;
+ }
+#endif
+ err_desc.lba_range.lba = lba;
+ err_desc.lba_range.len = len;
+ }
+
+ if (ioctl(fd, CTL_ERROR_INJECT, &err_desc) == -1) {
+ warn("%s: error issuing CTL_ERROR_INJECT ioctl", __func__);
+ retval = 1;
+ } else {
+ printf("Error injection succeeded, serial number is %ju\n",
+ (uintmax_t)err_desc.serial);
+ }
+bailout:
+
+ return (retval);
+}
+
+static int
+cctl_lunlist(int fd)
+{
+ struct scsi_report_luns_data *lun_data;
+ struct scsi_inquiry_data *inq_data;
+ uint32_t num_luns;
+ int target;
+ int initid;
+ unsigned int i;
+ int retval;
+
+ retval = 0;
+ inq_data = NULL;
+
+ target = 6;
+ initid = 7;
+
+ /*
+ * XXX KDM assuming LUN 0 is fine, but we may need to change this
+ * if we ever acquire the ability to have multiple targets.
+ */
+ if ((retval = cctl_get_luns(fd, target, /*lun*/ 0, initid,
+ /*retries*/ 2, &lun_data, &num_luns)) != 0)
+ goto bailout;
+
+ inq_data = malloc(sizeof(*inq_data));
+ if (inq_data == NULL) {
+ warn("%s: couldn't allocate memory for inquiry data\n",
+ __func__);
+ retval = 1;
+ goto bailout;
+ }
+ for (i = 0; i < num_luns; i++) {
+ char scsi_path[40];
+ int lun_val;
+
+ switch (lun_data->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) {
+ case RPL_LUNDATA_ATYP_PERIPH:
+ lun_val = lun_data->luns[i].lundata[1];
+ break;
+ case RPL_LUNDATA_ATYP_FLAT:
+ lun_val = (lun_data->luns[i].lundata[0] &
+ RPL_LUNDATA_FLAT_LUN_MASK) |
+ (lun_data->luns[i].lundata[1] <<
+ RPL_LUNDATA_FLAT_LUN_BITS);
+ break;
+ case RPL_LUNDATA_ATYP_LUN:
+ case RPL_LUNDATA_ATYP_EXTLUN:
+ default:
+ fprintf(stdout, "Unsupported LUN format %d\n",
+ lun_data->luns[i].lundata[0] &
+ RPL_LUNDATA_ATYP_MASK);
+ lun_val = -1;
+ break;
+ }
+ if (lun_val == -1)
+ continue;
+
+ if ((retval = cctl_get_inquiry(fd, target, lun_val, initid,
+ /*retries*/ 2, scsi_path,
+ sizeof(scsi_path),
+ inq_data)) != 0) {
+ goto bailout;
+ }
+ printf("%s", scsi_path);
+ scsi_print_inquiry(inq_data);
+ }
+bailout:
+
+ if (lun_data != NULL)
+ free(lun_data);
+
+ if (inq_data != NULL)
+ free(inq_data);
+
+ return (retval);
+}
+
+static void
+cctl_cfi_mt_statusstr(cfi_mt_status status, char *str, int str_len)
+{
+ switch (status) {
+ case CFI_MT_PORT_OFFLINE:
+ snprintf(str, str_len, "Port Offline");
+ break;
+ case CFI_MT_ERROR:
+ snprintf(str, str_len, "Error");
+ break;
+ case CFI_MT_SUCCESS:
+ snprintf(str, str_len, "Success");
+ break;
+ case CFI_MT_NONE:
+ snprintf(str, str_len, "None??");
+ break;
+ default:
+ snprintf(str, str_len, "Unknown status: %d", status);
+ break;
+ }
+}
+
+static void
+cctl_cfi_bbr_statusstr(cfi_bbrread_status status, char *str, int str_len)
+{
+ switch (status) {
+ case CFI_BBR_SUCCESS:
+ snprintf(str, str_len, "Success");
+ break;
+ case CFI_BBR_LUN_UNCONFIG:
+ snprintf(str, str_len, "LUN not configured");
+ break;
+ case CFI_BBR_NO_LUN:
+ snprintf(str, str_len, "LUN does not exist");
+ break;
+ case CFI_BBR_NO_MEM:
+ snprintf(str, str_len, "Memory allocation error");
+ break;
+ case CFI_BBR_BAD_LEN:
+ snprintf(str, str_len, "Length is not a multiple of blocksize");
+ break;
+ case CFI_BBR_RESERV_CONFLICT:
+ snprintf(str, str_len, "Reservation conflict");
+ break;
+ case CFI_BBR_LUN_STOPPED:
+ snprintf(str, str_len, "LUN is powered off");
+ break;
+ case CFI_BBR_LUN_OFFLINE_CTL:
+ snprintf(str, str_len, "LUN is offline");
+ break;
+ case CFI_BBR_LUN_OFFLINE_RC:
+ snprintf(str, str_len, "RAIDCore array is offline (double "
+ "failure?)");
+ break;
+ case CFI_BBR_SCSI_ERROR:
+ snprintf(str, str_len, "SCSI Error");
+ break;
+ case CFI_BBR_ERROR:
+ snprintf(str, str_len, "Error");
+ break;
+ default:
+ snprintf(str, str_len, "Unknown status: %d", status);
+ break;
+ }
+}
+
+static int
+cctl_hardstopstart(int fd, ctladm_cmdfunction command)
+{
+ struct ctl_hard_startstop_info hs_info;
+ char error_str[256];
+ int do_start;
+ int retval;
+
+ retval = 0;
+
+ if (command == CTLADM_CMD_HARDSTART)
+ do_start = 1;
+ else
+ do_start = 0;
+
+ if (ioctl(fd, (do_start == 1) ? CTL_HARD_START : CTL_HARD_STOP,
+ &hs_info) == -1) {
+ warn("%s: CTL_HARD_%s ioctl failed", __func__,
+ (do_start == 1) ? "START" : "STOP");
+ retval = 1;
+ goto bailout;
+ }
+
+ fprintf(stdout, "Hard %s Status: ", (command == CTLADM_CMD_HARDSTOP) ?
+ "Stop" : "Start");
+ cctl_cfi_mt_statusstr(hs_info.status, error_str, sizeof(error_str));
+ fprintf(stdout, "%s\n", error_str);
+ fprintf(stdout, "Total LUNs: %d\n", hs_info.total_luns);
+ fprintf(stdout, "LUNs complete: %d\n", hs_info.luns_complete);
+ fprintf(stdout, "LUNs failed: %d\n", hs_info.luns_failed);
+
+bailout:
+ return (retval);
+}
+
+static int
+cctl_bbrread(int fd, int target __unused, int lun, int iid __unused,
+ int argc, char **argv, char *combinedopt)
+{
+ struct ctl_bbrread_info bbr_info;
+ char error_str[256];
+ int datalen = -1;
+ uint64_t lba = 0;
+ int lba_set = 0;
+ int retval;
+ int c;
+
+ retval = 0;
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'd':
+ datalen = strtoul(optarg, NULL, 0);
+ break;
+ case 'l':
+ lba = strtoull(optarg, NULL, 0);
+ lba_set = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (lba_set == 0) {
+ warnx("%s: you must specify an LBA with -l", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (datalen == -1) {
+ warnx("%s: you must specify a length with -d", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ bbr_info.lun_num = lun;
+ bbr_info.lba = lba;
+ /*
+ * XXX KDM get the blocksize first??
+ */
+ if ((datalen % 512) != 0) {
+ warnx("%s: data length %d is not a multiple of 512 bytes",
+ __func__, datalen);
+ retval = 1;
+ goto bailout;
+ }
+ bbr_info.len = datalen;
+
+ if (ioctl(fd, CTL_BBRREAD, &bbr_info) == -1) {
+ warn("%s: CTL_BBRREAD ioctl failed", __func__);
+ retval = 1;
+ goto bailout;
+ }
+ cctl_cfi_mt_statusstr(bbr_info.status, error_str, sizeof(error_str));
+ fprintf(stdout, "BBR Read Overall Status: %s\n", error_str);
+ cctl_cfi_bbr_statusstr(bbr_info.bbr_status, error_str,
+ sizeof(error_str));
+ fprintf(stdout, "BBR Read Status: %s\n", error_str);
+ /*
+ * XXX KDM should we bother printing out SCSI status if we get
+ * CFI_BBR_SCSI_ERROR back?
+ *
+ * Return non-zero if this fails?
+ */
+bailout:
+ return (retval);
+}
+
+static int
+cctl_startup_shutdown(int fd, int target, int lun, int iid,
+ ctladm_cmdfunction command)
+{
+ union ctl_io *io;
+ struct ctl_id id;
+ struct scsi_report_luns_data *lun_data;
+ struct scsi_inquiry_data *inq_data;
+ uint32_t num_luns;
+ unsigned int i;
+ int retval;
+
+ retval = 0;
+ inq_data = NULL;
+
+ /*
+ * - report luns
+ * - step through each lun, do an inquiry
+ * - check OOA queue on direct access luns
+ * - send stop with offline bit to each direct access device with a
+ * clear OOA queue
+ * - if we get a reservation conflict, reset the LUN to clear it
+ * and reissue the stop with the offline bit set
+ */
+
+ id.id = iid;
+
+ io = ctl_scsi_alloc_io(id);
+ if (io == NULL) {
+ warnx("%s: can't allocate memory", __func__);
+ return (1);
+ }
+
+ if ((retval = cctl_get_luns(fd, target, lun, iid, /*retries*/ 2,
+ &lun_data, &num_luns)) != 0)
+ goto bailout;
+
+ inq_data = malloc(sizeof(*inq_data));
+ if (inq_data == NULL) {
+ warn("%s: couldn't allocate memory for inquiry data\n",
+ __func__);
+ retval = 1;
+ goto bailout;
+ }
+ for (i = 0; i < num_luns; i++) {
+ char scsi_path[40];
+ int lun_val;
+
+ /*
+ * XXX KDM figure out a way to share this code with
+ * cctl_lunlist()?
+ */
+ switch (lun_data->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) {
+ case RPL_LUNDATA_ATYP_PERIPH:
+ lun_val = lun_data->luns[i].lundata[1];
+ break;
+ case RPL_LUNDATA_ATYP_FLAT:
+ lun_val = (lun_data->luns[i].lundata[0] &
+ RPL_LUNDATA_FLAT_LUN_MASK) |
+ (lun_data->luns[i].lundata[1] <<
+ RPL_LUNDATA_FLAT_LUN_BITS);
+ break;
+ case RPL_LUNDATA_ATYP_LUN:
+ case RPL_LUNDATA_ATYP_EXTLUN:
+ default:
+ fprintf(stdout, "Unsupported LUN format %d\n",
+ lun_data->luns[i].lundata[0] &
+ RPL_LUNDATA_ATYP_MASK);
+ lun_val = -1;
+ break;
+ }
+ if (lun_val == -1)
+ continue;
+
+ if ((retval = cctl_get_inquiry(fd, target, lun_val, iid,
+ /*retries*/ 2, scsi_path,
+ sizeof(scsi_path),
+ inq_data)) != 0) {
+ goto bailout;
+ }
+ printf("%s", scsi_path);
+ scsi_print_inquiry(inq_data);
+ /*
+ * We only want to shutdown direct access devices.
+ */
+ if (SID_TYPE(inq_data) != T_DIRECT) {
+ printf("%s LUN is not direct access, skipped\n",
+ scsi_path);
+ continue;
+ }
+
+ if (command == CTLADM_CMD_SHUTDOWN) {
+ struct ctl_ooa_info ooa_info;
+
+ ooa_info.target_id = target;
+ ooa_info.lun_id = lun_val;
+
+ if (ioctl(fd, CTL_CHECK_OOA, &ooa_info) == -1) {
+ printf("%s CTL_CHECK_OOA ioctl failed\n",
+ scsi_path);
+ continue;
+ }
+
+ if (ooa_info.status != CTL_OOA_SUCCESS) {
+ printf("%s CTL_CHECK_OOA returned status %d\n",
+ scsi_path, ooa_info.status);
+ continue;
+ }
+ if (ooa_info.num_entries != 0) {
+ printf("%s %d entr%s in the OOA queue, "
+ "skipping shutdown\n", scsi_path,
+ ooa_info.num_entries,
+ (ooa_info.num_entries > 1)?"ies" : "y" );
+ continue;
+ }
+ }
+
+ ctl_scsi_start_stop(/*io*/ io,
+ /*start*/(command == CTLADM_CMD_STARTUP) ?
+ 1 : 0,
+ /*load_eject*/ 0,
+ /*immediate*/ 0,
+ /*power_conditions*/ SSS_PC_START_VALID,
+ /*onoffline*/ 1,
+ /*ctl_tag_type*/
+ (command == CTLADM_CMD_STARTUP) ?
+ CTL_TAG_SIMPLE :CTL_TAG_ORDERED,
+ /*control*/ 0);
+
+ io->io_hdr.nexus.targ_target.id = target;
+ io->io_hdr.nexus.targ_lun = lun_val;
+ io->io_hdr.nexus.initid = id;
+
+ if (cctl_do_io(fd, /*retries*/ 3, io, __func__) != 0) {
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
+ ctl_io_error_print(io, inq_data, stderr);
+ else {
+ printf("%s LUN is now %s\n", scsi_path,
+ (command == CTLADM_CMD_STARTUP) ? "online" :
+ "offline");
+ }
+ }
+bailout:
+ if (lun_data != NULL)
+ free(lun_data);
+
+ if (inq_data != NULL)
+ free(inq_data);
+
+ if (io != NULL)
+ ctl_scsi_free_io(io);
+
+ return (retval);
+}
+
+static int
+cctl_sync_cache(int fd, int target, int lun, int iid, int retries,
+ int argc, char **argv, char *combinedopt)
+{
+ union ctl_io *io;
+ struct ctl_id id;
+ int cdb_size = -1;
+ int retval;
+ uint64_t our_lba = 0;
+ uint32_t our_block_count = 0;
+ int reladr = 0, immed = 0;
+ int c;
+
+ id.id = iid;
+ retval = 0;
+
+ io = ctl_scsi_alloc_io(id);
+ if (io == NULL) {
+ warnx("%s: can't allocate memory", __func__);
+ return (1);
+ }
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'b':
+ our_block_count = strtoul(optarg, NULL, 0);
+ break;
+ case 'c':
+ cdb_size = strtol(optarg, NULL, 0);
+ break;
+ case 'i':
+ immed = 1;
+ break;
+ case 'l':
+ our_lba = strtoull(optarg, NULL, 0);
+ break;
+ case 'r':
+ reladr = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (cdb_size != -1) {
+ switch (cdb_size) {
+ case 10:
+ case 16:
+ break;
+ default:
+ warnx("%s: invalid cdbsize %d, valid sizes are 10 "
+ "and 16", __func__, cdb_size);
+ retval = 1;
+ goto bailout;
+ break; /* NOTREACHED */
+ }
+ } else
+ cdb_size = 10;
+
+ ctl_scsi_sync_cache(/*io*/ io,
+ /*immed*/ immed,
+ /*reladr*/ reladr,
+ /*minimum_cdb_size*/ cdb_size,
+ /*starting_lba*/ our_lba,
+ /*block_count*/ our_block_count,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+
+ io->io_hdr.nexus.targ_target.id = target;
+ io->io_hdr.nexus.targ_lun = lun;
+ io->io_hdr.nexus.initid = id;
+
+ if (cctl_do_io(fd, retries, io, __func__) != 0) {
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
+ fprintf(stdout, "Cache synchronized successfully\n");
+ } else
+ ctl_io_error_print(io, NULL, stderr);
+bailout:
+ ctl_scsi_free_io(io);
+
+ return (retval);
+}
+
+static int
+cctl_start_stop(int fd, int target, int lun, int iid, int retries, int start,
+ int argc, char **argv, char *combinedopt)
+{
+ union ctl_io *io;
+ struct ctl_id id;
+ char scsi_path[40];
+ int immed = 0, onoffline = 0;
+ int retval, c;
+
+ id.id = iid;
+ retval = 0;
+
+ io = ctl_scsi_alloc_io(id);
+ if (io == NULL) {
+ warnx("%s: can't allocate memory", __func__);
+ return (1);
+ }
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'i':
+ immed = 1;
+ break;
+ case 'o':
+ onoffline = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * Use an ordered tag for the stop command, to guarantee that any
+ * pending I/O will finish before the stop command executes. This
+ * would normally be the case anyway, since CTL will basically
+ * treat the start/stop command as an ordered command with respect
+ * to any other command except an INQUIRY. (See ctl_ser_table.c.)
+ */
+ ctl_scsi_start_stop(/*io*/ io,
+ /*start*/ start,
+ /*load_eject*/ 0,
+ /*immediate*/ immed,
+ /*power_conditions*/ SSS_PC_START_VALID,
+ /*onoffline*/ onoffline,
+ /*ctl_tag_type*/ start ? CTL_TAG_SIMPLE :
+ CTL_TAG_ORDERED,
+ /*control*/ 0);
+
+ io->io_hdr.nexus.targ_target.id = target;
+ io->io_hdr.nexus.targ_lun = lun;
+ io->io_hdr.nexus.initid = id;
+
+ if (cctl_do_io(fd, retries, io, __func__) != 0) {
+ retval = 1;
+ goto bailout;
+ }
+
+ ctl_scsi_path_string(io, scsi_path, sizeof(scsi_path));
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
+ fprintf(stdout, "%s LUN %s successfully\n", scsi_path,
+ (start) ? "started" : "stopped");
+ } else
+ ctl_io_error_print(io, NULL, stderr);
+
+bailout:
+ ctl_scsi_free_io(io);
+
+ return (retval);
+}
+
+static int
+cctl_mode_sense(int fd, int target, int lun, int iid, int retries,
+ int argc, char **argv, char *combinedopt)
+{
+ union ctl_io *io;
+ struct ctl_id id;
+ uint32_t datalen;
+ uint8_t *dataptr;
+ int pc = -1, cdbsize, retval, dbd = 0, subpage = -1;
+ int list = 0;
+ int page_code = -1;
+ int c;
+
+ id.id = iid;
+ cdbsize = 0;
+ retval = 0;
+ dataptr = NULL;
+
+ io = ctl_scsi_alloc_io(id);
+ if (io == NULL) {
+ warn("%s: can't allocate memory", __func__);
+ return (1);
+ }
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'P':
+ pc = strtoul(optarg, NULL, 0);
+ break;
+ case 'S':
+ subpage = strtoul(optarg, NULL, 0);
+ break;
+ case 'd':
+ dbd = 1;
+ break;
+ case 'l':
+ list = 1;
+ break;
+ case 'm':
+ page_code = strtoul(optarg, NULL, 0);
+ break;
+ case 'c':
+ cdbsize = strtol(optarg, NULL, 0);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (((list == 0) && (page_code == -1))
+ || ((list != 0) && (page_code != -1))) {
+ warnx("%s: you must specify either a page code (-m) or -l",
+ __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((page_code != -1)
+ && ((page_code > SMS_ALL_PAGES_PAGE)
+ || (page_code < 0))) {
+ warnx("%s: page code %d is out of range", __func__,
+ page_code);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (list == 1) {
+ page_code = SMS_ALL_PAGES_PAGE;
+ if (pc != -1) {
+ warnx("%s: arg -P makes no sense with -l",
+ __func__);
+ retval = 1;
+ goto bailout;
+ }
+ if (subpage != -1) {
+ warnx("%s: arg -S makes no sense with -l", __func__);
+ retval = 1;
+ goto bailout;
+ }
+ }
+
+ if (pc == -1)
+ pc = SMS_PAGE_CTRL_CURRENT;
+ else {
+ if ((pc > 3)
+ || (pc < 0)) {
+ warnx("%s: page control value %d is out of range: 0-3",
+ __func__, pc);
+ retval = 1;
+ goto bailout;
+ }
+ }
+
+
+ if ((subpage != -1)
+ && ((subpage > 255)
+ || (subpage < 0))) {
+ warnx("%s: subpage code %d is out of range: 0-255", __func__,
+ subpage);
+ retval = 1;
+ goto bailout;
+ }
+ if (cdbsize != 0) {
+ switch (cdbsize) {
+ case 6:
+ case 10:
+ break;
+ default:
+ warnx("%s: invalid cdbsize %d, valid sizes are 6 "
+ "and 10", __func__, cdbsize);
+ retval = 1;
+ goto bailout;
+ break;
+ }
+ } else
+ cdbsize = 6;
+
+ if (subpage == -1)
+ subpage = 0;
+
+ if (cdbsize == 6)
+ datalen = 255;
+ else
+ datalen = 65535;
+
+ dataptr = (uint8_t *)malloc(datalen);
+ if (dataptr == NULL) {
+ warn("%s: can't allocate %d bytes", __func__, datalen);
+ retval = 1;
+ goto bailout;
+ }
+
+ memset(dataptr, 0, datalen);
+
+ ctl_scsi_mode_sense(io,
+ /*data_ptr*/ dataptr,
+ /*data_len*/ datalen,
+ /*dbd*/ dbd,
+ /*llbaa*/ 0,
+ /*page_code*/ page_code,
+ /*pc*/ pc << 6,
+ /*subpage*/ subpage,
+ /*minimum_cdb_size*/ cdbsize,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+
+ io->io_hdr.nexus.targ_target.id = target;
+ io->io_hdr.nexus.targ_lun = lun;
+ io->io_hdr.nexus.initid = id;
+
+ if (cctl_do_io(fd, retries, io, __func__) != 0) {
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
+ int pages_len, used_len;
+ uint32_t returned_len;
+ uint8_t *ndataptr;
+
+ if (io->scsiio.cdb[0] == MODE_SENSE_6) {
+ struct scsi_mode_hdr_6 *hdr6;
+ int bdlen;
+
+ hdr6 = (struct scsi_mode_hdr_6 *)dataptr;
+
+ returned_len = hdr6->datalen + 1;
+ bdlen = hdr6->block_descr_len;
+
+ ndataptr = (uint8_t *)((uint8_t *)&hdr6[1] + bdlen);
+ } else {
+ struct scsi_mode_hdr_10 *hdr10;
+ int bdlen;
+
+ hdr10 = (struct scsi_mode_hdr_10 *)dataptr;
+
+ returned_len = scsi_2btoul(hdr10->datalen) + 2;
+ bdlen = scsi_2btoul(hdr10->block_descr_len);
+
+ ndataptr = (uint8_t *)((uint8_t *)&hdr10[1] + bdlen);
+ }
+ /* just in case they can give us more than we allocated for */
+ returned_len = min(returned_len, datalen);
+ pages_len = returned_len - (ndataptr - dataptr);
+#if 0
+ fprintf(stdout, "returned_len = %d, pages_len = %d\n",
+ returned_len, pages_len);
+#endif
+ if (list == 1) {
+ fprintf(stdout, "Supported mode pages:\n");
+ for (used_len = 0; used_len < pages_len;) {
+ struct scsi_mode_page_header *header;
+
+ header = (struct scsi_mode_page_header *)
+ &ndataptr[used_len];
+ fprintf(stdout, "%d\n", header->page_code);
+ used_len += header->page_length + 2;
+ }
+ } else {
+ for (used_len = 0; used_len < pages_len; used_len++) {
+ fprintf(stdout, "0x%x ", ndataptr[used_len]);
+ if (((used_len+1) % 16) == 0)
+ fprintf(stdout, "\n");
+ }
+ fprintf(stdout, "\n");
+ }
+ } else
+ ctl_io_error_print(io, NULL, stderr);
+bailout:
+
+ ctl_scsi_free_io(io);
+
+ if (dataptr != NULL)
+ free(dataptr);
+
+ return (retval);
+}
+
+static int
+cctl_read_capacity(int fd, int target, int lun, int iid, int retries,
+ int argc, char **argv, char *combinedopt)
+{
+ union ctl_io *io;
+ struct ctl_id id;
+ struct scsi_read_capacity_data *data;
+ struct scsi_read_capacity_data_long *longdata;
+ int cdbsize = -1, retval;
+ uint8_t *dataptr;
+ int c;
+
+ cdbsize = 10;
+ dataptr = NULL;
+ retval = 0;
+ id.id = iid;
+
+ io = ctl_scsi_alloc_io(id);
+ if (io == NULL) {
+ warn("%s: can't allocate memory\n", __func__);
+ return (1);
+ }
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'c':
+ cdbsize = strtol(optarg, NULL, 0);
+ break;
+ default:
+ break;
+ }
+ }
+ if (cdbsize != -1) {
+ switch (cdbsize) {
+ case 10:
+ case 16:
+ break;
+ default:
+ warnx("%s: invalid cdbsize %d, valid sizes are 10 "
+ "and 16", __func__, cdbsize);
+ retval = 1;
+ goto bailout;
+ break; /* NOTREACHED */
+ }
+ } else
+ cdbsize = 10;
+
+ dataptr = (uint8_t *)malloc(sizeof(*longdata));
+ if (dataptr == NULL) {
+ warn("%s: can't allocate %zd bytes\n", __func__,
+ sizeof(*longdata));
+ retval = 1;
+ goto bailout;
+ }
+ memset(dataptr, 0, sizeof(*longdata));
+
+retry:
+
+ switch (cdbsize) {
+ case 10:
+ ctl_scsi_read_capacity(io,
+ /*data_ptr*/ dataptr,
+ /*data_len*/ sizeof(*longdata),
+ /*addr*/ 0,
+ /*reladr*/ 0,
+ /*pmi*/ 0,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+ break;
+ case 16:
+ ctl_scsi_read_capacity_16(io,
+ /*data_ptr*/ dataptr,
+ /*data_len*/ sizeof(*longdata),
+ /*addr*/ 0,
+ /*reladr*/ 0,
+ /*pmi*/ 0,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+ break;
+ }
+
+ io->io_hdr.nexus.initid = id;
+ io->io_hdr.nexus.targ_target.id = target;
+ io->io_hdr.nexus.targ_lun = lun;
+
+ if (cctl_do_io(fd, retries, io, __func__) != 0) {
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
+ uint64_t maxlba;
+ uint32_t blocksize;
+
+ if (cdbsize == 10) {
+
+ data = (struct scsi_read_capacity_data *)dataptr;
+
+ maxlba = scsi_4btoul(data->addr);
+ blocksize = scsi_4btoul(data->length);
+
+ if (maxlba == 0xffffffff) {
+ cdbsize = 16;
+ goto retry;
+ }
+ } else {
+ longdata=(struct scsi_read_capacity_data_long *)dataptr;
+
+ maxlba = scsi_8btou64(longdata->addr);
+ blocksize = scsi_4btoul(longdata->length);
+ }
+
+ fprintf(stdout, "Disk Capacity: %ju, Blocksize: %d\n",
+ (uintmax_t)maxlba, blocksize);
+ } else {
+ ctl_io_error_print(io, NULL, stderr);
+ }
+bailout:
+ ctl_scsi_free_io(io);
+
+ if (dataptr != NULL)
+ free(dataptr);
+
+ return (retval);
+}
+
+static int
+cctl_read_write(int fd, int target, int lun, int iid, int retries,
+ int argc, char **argv, char *combinedopt,
+ ctladm_cmdfunction command)
+{
+ union ctl_io *io;
+ struct ctl_id id;
+ int file_fd, do_stdio;
+ int cdbsize = -1, databytes;
+ uint8_t *dataptr;
+ char *filename = NULL;
+ int datalen = -1, blocksize = -1;
+ uint64_t lba = 0;
+ int lba_set = 0;
+ int retval;
+ int c;
+
+ retval = 0;
+ do_stdio = 0;
+ dataptr = NULL;
+ file_fd = -1;
+ id.id = iid;
+
+ io = ctl_scsi_alloc_io(id);
+ if (io == NULL) {
+ warn("%s: can't allocate memory\n", __func__);
+ return (1);
+ }
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'N':
+ io->io_hdr.flags |= CTL_FLAG_NO_DATAMOVE;
+ break;
+ case 'b':
+ blocksize = strtoul(optarg, NULL, 0);
+ break;
+ case 'c':
+ cdbsize = strtoul(optarg, NULL, 0);
+ break;
+ case 'd':
+ datalen = strtoul(optarg, NULL, 0);
+ break;
+ case 'f':
+ filename = strdup(optarg);
+ break;
+ case 'l':
+ lba = strtoull(optarg, NULL, 0);
+ lba_set = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ if (filename == NULL) {
+ warnx("%s: you must supply a filename using -f", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (datalen == -1) {
+ warnx("%s: you must specify the data length with -d", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (lba_set == 0) {
+ warnx("%s: you must specify the LBA with -l", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (blocksize == -1) {
+ warnx("%s: you must specify the blocksize with -b", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (cdbsize != -1) {
+ switch (cdbsize) {
+ case 6:
+ case 10:
+ case 12:
+ case 16:
+ break;
+ default:
+ warnx("%s: invalid cdbsize %d, valid sizes are 6, "
+ "10, 12 or 16", __func__, cdbsize);
+ retval = 1;
+ goto bailout;
+ break; /* NOTREACHED */
+ }
+ } else
+ cdbsize = 6;
+
+ databytes = datalen * blocksize;
+ dataptr = (uint8_t *)malloc(databytes);
+
+ if (dataptr == NULL) {
+ warn("%s: can't allocate %d bytes\n", __func__, databytes);
+ retval = 1;
+ goto bailout;
+ }
+ if (strcmp(filename, "-") == 0) {
+ if (command == CTLADM_CMD_READ)
+ file_fd = STDOUT_FILENO;
+ else
+ file_fd = STDIN_FILENO;
+ do_stdio = 1;
+ } else {
+ file_fd = open(filename, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
+ if (file_fd == -1) {
+ warn("%s: can't open file %s", __func__, filename);
+ retval = 1;
+ goto bailout;
+ }
+ }
+
+ memset(dataptr, 0, databytes);
+
+ if (command == CTLADM_CMD_WRITE) {
+ int bytes_read;
+
+ bytes_read = read(file_fd, dataptr, databytes);
+ if (bytes_read == -1) {
+ warn("%s: error reading file %s", __func__, filename);
+ retval = 1;
+ goto bailout;
+ }
+ if (bytes_read != databytes) {
+ warnx("%s: only read %d bytes from file %s",
+ __func__, bytes_read, filename);
+ retval = 1;
+ goto bailout;
+ }
+ }
+ ctl_scsi_read_write(io,
+ /*data_ptr*/ dataptr,
+ /*data_len*/ databytes,
+ /*read_op*/ (command == CTLADM_CMD_READ) ? 1 : 0,
+ /*byte2*/ 0,
+ /*minimum_cdb_size*/ cdbsize,
+ /*lba*/ lba,
+ /*num_blocks*/ datalen,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+
+ io->io_hdr.nexus.targ_target.id = target;
+ io->io_hdr.nexus.targ_lun = lun;
+ io->io_hdr.nexus.initid = id;
+
+ if (cctl_do_io(fd, retries, io, __func__) != 0) {
+ retval = 1;
+ goto bailout;
+ }
+
+ if (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
+ && (command == CTLADM_CMD_READ)) {
+ int bytes_written;
+
+ bytes_written = write(file_fd, dataptr, databytes);
+ if (bytes_written == -1) {
+ warn("%s: can't write to %s", __func__, filename);
+ goto bailout;
+ }
+ } else if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
+ ctl_io_error_print(io, NULL, stderr);
+
+
+bailout:
+
+ ctl_scsi_free_io(io);
+
+ if (dataptr != NULL)
+ free(dataptr);
+
+ if ((do_stdio == 0)
+ && (file_fd != -1))
+ close(file_fd);
+
+ return (retval);
+}
+
+static int
+cctl_get_luns(int fd, int target, int lun, int iid, int retries, struct
+ scsi_report_luns_data **lun_data, uint32_t *num_luns)
+{
+ union ctl_io *io;
+ struct ctl_id id;
+ uint32_t nluns;
+ int lun_datalen;
+ int retval;
+
+ retval = 0;
+ id.id = iid;
+
+ io = ctl_scsi_alloc_io(id);
+ if (io == NULL) {
+ warnx("%s: can't allocate memory", __func__);
+ return (1);
+ }
+
+ /*
+ * lun_data includes space for 1 lun, allocate space for 4 initially.
+ * If that isn't enough, we'll allocate more.
+ */
+ nluns = 4;
+retry:
+ lun_datalen = sizeof(*lun_data) +
+ (nluns * sizeof(struct scsi_report_luns_lundata));
+ *lun_data = malloc(lun_datalen);
+
+ if (*lun_data == NULL) {
+ warnx("%s: can't allocate memory", __func__);
+ ctl_scsi_free_io(io);
+ return (1);
+ }
+
+ ctl_scsi_report_luns(io,
+ /*data_ptr*/ (uint8_t *)*lun_data,
+ /*data_len*/ lun_datalen,
+ /*select_report*/ RPL_REPORT_ALL,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+
+ io->io_hdr.nexus.initid = id;
+ io->io_hdr.nexus.targ_target.id = target;
+ io->io_hdr.nexus.targ_lun = lun;
+
+ if (cctl_do_io(fd, retries, io, __func__) != 0) {
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
+ uint32_t returned_len, returned_luns;
+
+ returned_len = scsi_4btoul((*lun_data)->length);
+ returned_luns = returned_len / 8;
+ if (returned_luns > nluns) {
+ nluns = returned_luns;
+ free(*lun_data);
+ goto retry;
+ }
+ /* These should be the same */
+ *num_luns = MIN(returned_luns, nluns);
+ } else {
+ ctl_io_error_print(io, NULL, stderr);
+ retval = 1;
+ }
+bailout:
+ ctl_scsi_free_io(io);
+
+ return (retval);
+}
+
+static int
+cctl_report_luns(int fd, int target, int lun, int iid, int retries)
+{
+ struct scsi_report_luns_data *lun_data;
+ uint32_t num_luns, i;
+ int retval;
+
+ lun_data = NULL;
+
+ if ((retval = cctl_get_luns(fd, target, lun, iid, retries, &lun_data,
+ &num_luns)) != 0)
+ goto bailout;
+
+ fprintf(stdout, "%u LUNs returned\n", num_luns);
+ for (i = 0; i < num_luns; i++) {
+ int lun_val;
+
+ /*
+ * XXX KDM figure out a way to share this code with
+ * cctl_lunlist()?
+ */
+ switch (lun_data->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) {
+ case RPL_LUNDATA_ATYP_PERIPH:
+ lun_val = lun_data->luns[i].lundata[1];
+ break;
+ case RPL_LUNDATA_ATYP_FLAT:
+ lun_val = (lun_data->luns[i].lundata[0] &
+ RPL_LUNDATA_FLAT_LUN_MASK) |
+ (lun_data->luns[i].lundata[1] <<
+ RPL_LUNDATA_FLAT_LUN_BITS);
+ break;
+ case RPL_LUNDATA_ATYP_LUN:
+ case RPL_LUNDATA_ATYP_EXTLUN:
+ default:
+ fprintf(stdout, "Unsupported LUN format %d\n",
+ lun_data->luns[i].lundata[0] &
+ RPL_LUNDATA_ATYP_MASK);
+ lun_val = -1;
+ break;
+ }
+ if (lun_val == -1)
+ continue;
+
+ fprintf(stdout, "%d\n", lun_val);
+ }
+
+bailout:
+ if (lun_data != NULL)
+ free(lun_data);
+
+ return (retval);
+}
+
+static int
+cctl_tur(int fd, int target, int lun, int iid, int retries)
+{
+ union ctl_io *io;
+ struct ctl_id id;
+
+ id.id = iid;
+
+ io = ctl_scsi_alloc_io(id);
+ if (io == NULL) {
+ fprintf(stderr, "can't allocate memory\n");
+ return (1);
+ }
+
+ ctl_scsi_tur(io,
+ /* tag_type */ CTL_TAG_SIMPLE,
+ /* control */ 0);
+
+ io->io_hdr.nexus.targ_target.id = target;
+ io->io_hdr.nexus.targ_lun = lun;
+ io->io_hdr.nexus.initid = id;
+
+ if (cctl_do_io(fd, retries, io, __func__) != 0) {
+ ctl_scsi_free_io(io);
+ return (1);
+ }
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
+ fprintf(stdout, "Unit is ready\n");
+ else
+ ctl_io_error_print(io, NULL, stderr);
+
+ return (0);
+}
+
+static int
+cctl_get_inquiry(int fd, int target, int lun, int iid, int retries,
+ char *path_str, int path_len,
+ struct scsi_inquiry_data *inq_data)
+{
+ union ctl_io *io;
+ struct ctl_id id;
+ int retval;
+
+ retval = 0;
+
+ id.id = iid;
+
+ io = ctl_scsi_alloc_io(id);
+ if (io == NULL) {
+ warnx("cctl_inquiry: can't allocate memory\n");
+ return (1);
+ }
+
+ ctl_scsi_inquiry(/*io*/ io,
+ /*data_ptr*/ (uint8_t *)inq_data,
+ /*data_len*/ sizeof(*inq_data),
+ /*byte2*/ 0,
+ /*page_code*/ 0,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+
+ io->io_hdr.nexus.targ_target.id = target;
+ io->io_hdr.nexus.targ_lun = lun;
+ io->io_hdr.nexus.initid = id;
+
+ if (cctl_do_io(fd, retries, io, __func__) != 0) {
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
+ retval = 1;
+ ctl_io_error_print(io, NULL, stderr);
+ } else if (path_str != NULL)
+ ctl_scsi_path_string(io, path_str, path_len);
+
+bailout:
+ ctl_scsi_free_io(io);
+
+ return (retval);
+}
+
+static int
+cctl_inquiry(int fd, int target, int lun, int iid, int retries)
+{
+ struct scsi_inquiry_data *inq_data;
+ char scsi_path[40];
+ int retval;
+
+ retval = 0;
+
+ inq_data = malloc(sizeof(*inq_data));
+ if (inq_data == NULL) {
+ warnx("%s: can't allocate inquiry data", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((retval = cctl_get_inquiry(fd, target, lun, iid, retries, scsi_path,
+ sizeof(scsi_path), inq_data)) != 0)
+ goto bailout;
+
+ printf("%s", scsi_path);
+ scsi_print_inquiry(inq_data);
+
+bailout:
+ if (inq_data != NULL)
+ free(inq_data);
+
+ return (retval);
+}
+
+static int
+cctl_req_sense(int fd, int target, int lun, int iid, int retries)
+{
+ union ctl_io *io;
+ struct scsi_sense_data *sense_data;
+ struct ctl_id id;
+ int retval;
+
+ retval = 0;
+
+ id.id = iid;
+
+ io = ctl_scsi_alloc_io(id);
+ if (io == NULL) {
+ warnx("cctl_req_sense: can't allocate memory\n");
+ return (1);
+ }
+ sense_data = malloc(sizeof(*sense_data));
+ memset(sense_data, 0, sizeof(*sense_data));
+
+ ctl_scsi_request_sense(/*io*/ io,
+ /*data_ptr*/ (uint8_t *)sense_data,
+ /*data_len*/ sizeof(*sense_data),
+ /*byte2*/ 0,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+
+ io->io_hdr.nexus.targ_target.id = target;
+ io->io_hdr.nexus.targ_lun = lun;
+ io->io_hdr.nexus.initid = id;
+
+ if (cctl_do_io(fd, retries, io, __func__) != 0) {
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
+ bcopy(sense_data, &io->scsiio.sense_data, sizeof(*sense_data));
+ io->scsiio.sense_len = sizeof(*sense_data);
+ ctl_scsi_sense_print(&io->scsiio, NULL, stdout);
+ } else
+ ctl_io_error_print(io, NULL, stderr);
+
+bailout:
+
+ ctl_scsi_free_io(io);
+ free(sense_data);
+
+ return (retval);
+}
+
+static int
+cctl_report_target_port_group(int fd, int target, int lun, int initiator)
+{
+ union ctl_io *io;
+ struct ctl_id id;
+ uint32_t datalen;
+ uint8_t *dataptr;
+ int retval;
+
+ id.id = initiator;
+ dataptr = NULL;
+ retval = 0;
+
+ io = ctl_scsi_alloc_io(id);
+ if (io == NULL) {
+ warn("%s: can't allocate memory", __func__);
+ return (1);
+ }
+
+ datalen = 64;
+ dataptr = (uint8_t *)malloc(datalen);
+ if (dataptr == NULL) {
+ warn("%s: can't allocate %d bytes", __func__, datalen);
+ retval = 1;
+ goto bailout;
+ }
+
+ memset(dataptr, 0, datalen);
+
+ ctl_scsi_maintenance_in(/*io*/ io,
+ /*data_ptr*/ dataptr,
+ /*data_len*/ datalen,
+ /*action*/ SA_RPRT_TRGT_GRP,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+
+ io->io_hdr.nexus.targ_target.id = target;
+ io->io_hdr.nexus.targ_lun = lun;
+ io->io_hdr.nexus.initid = id;
+
+ if (cctl_do_io(fd, 0, io, __func__) != 0) {
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
+ int returned_len, used_len;
+
+ returned_len = scsi_4btoul(&dataptr[0]) + 4;
+
+ for (used_len = 0; used_len < returned_len; used_len++) {
+ fprintf(stdout, "0x%02x ", dataptr[used_len]);
+ if (((used_len+1) % 8) == 0)
+ fprintf(stdout, "\n");
+ }
+ fprintf(stdout, "\n");
+ } else
+ ctl_io_error_print(io, NULL, stderr);
+
+bailout:
+ ctl_scsi_free_io(io);
+
+ if (dataptr != NULL)
+ free(dataptr);
+
+ return (retval);
+}
+
+static int
+cctl_inquiry_vpd_devid(int fd, int target, int lun, int initiator)
+{
+ union ctl_io *io;
+ struct ctl_id id;
+ uint32_t datalen;
+ uint8_t *dataptr;
+ int retval;
+
+ id.id = initiator;
+ retval = 0;
+ dataptr = NULL;
+
+ io = ctl_scsi_alloc_io(id);
+ if (io == NULL) {
+ warn("%s: can't allocate memory", __func__);
+ return (1);
+ }
+
+ datalen = 256;
+ dataptr = (uint8_t *)malloc(datalen);
+ if (dataptr == NULL) {
+ warn("%s: can't allocate %d bytes", __func__, datalen);
+ retval = 1;
+ goto bailout;
+ }
+
+ memset(dataptr, 0, datalen);
+
+ ctl_scsi_inquiry(/*io*/ io,
+ /*data_ptr*/ dataptr,
+ /*data_len*/ datalen,
+ /*byte2*/ SI_EVPD,
+ /*page_code*/ SVPD_DEVICE_ID,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+
+ io->io_hdr.nexus.targ_target.id = target;
+ io->io_hdr.nexus.targ_lun = lun;
+ io->io_hdr.nexus.initid = id;
+
+ if (cctl_do_io(fd, 0, io, __func__) != 0) {
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
+ int returned_len, used_len;
+
+ returned_len = scsi_2btoul(&dataptr[2]) + 4;
+
+ for (used_len = 0; used_len < returned_len; used_len++) {
+ fprintf(stdout, "0x%02x ", dataptr[used_len]);
+ if (((used_len+1) % 8) == 0)
+ fprintf(stdout, "\n");
+ }
+ fprintf(stdout, "\n");
+ } else
+ ctl_io_error_print(io, NULL, stderr);
+
+bailout:
+ ctl_scsi_free_io(io);
+
+ if (dataptr != NULL)
+ free(dataptr);
+
+ return (retval);
+}
+
+static int
+cctl_persistent_reserve_in(int fd, int target, int lun, int initiator,
+ int argc, char **argv, char *combinedopt,
+ int retry_count)
+{
+ union ctl_io *io;
+ struct ctl_id id;
+ uint32_t datalen;
+ uint8_t *dataptr;
+ int action = -1;
+ int retval;
+ int c;
+
+ id.id = initiator;
+ retval = 0;
+ dataptr = NULL;
+
+ io = ctl_scsi_alloc_io(id);
+ if (io == NULL) {
+ warn("%s: can't allocate memory", __func__);
+ return (1);
+ }
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'a':
+ action = strtol(optarg, NULL, 0);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (action < 0 || action > 2) {
+ warn("action must be specified and in the range: 0-2");
+ retval = 1;
+ goto bailout;
+ }
+
+
+ datalen = 256;
+ dataptr = (uint8_t *)malloc(datalen);
+ if (dataptr == NULL) {
+ warn("%s: can't allocate %d bytes", __func__, datalen);
+ retval = 1;
+ goto bailout;
+ }
+
+ memset(dataptr, 0, datalen);
+
+ ctl_scsi_persistent_res_in(io,
+ /*data_ptr*/ dataptr,
+ /*data_len*/ datalen,
+ /*action*/ action,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+
+ io->io_hdr.nexus.targ_target.id = target;
+ io->io_hdr.nexus.targ_lun = lun;
+ io->io_hdr.nexus.initid = id;
+
+ if (cctl_do_io(fd, retry_count, io, __func__) != 0) {
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
+ int returned_len, used_len;
+
+ returned_len = 0;
+
+ switch (action) {
+ case 0:
+ returned_len = scsi_4btoul(&dataptr[4]) + 8;
+ returned_len = min(returned_len, 256);
+ break;
+ case 1:
+ returned_len = scsi_4btoul(&dataptr[4]) + 8;
+ break;
+ case 2:
+ returned_len = 8;
+ break;
+ default:
+ warnx("%s: invalid action %d", __func__, action);
+ goto bailout;
+ break; /* NOTREACHED */
+ }
+
+ for (used_len = 0; used_len < returned_len; used_len++) {
+ fprintf(stdout, "0x%02x ", dataptr[used_len]);
+ if (((used_len+1) % 8) == 0)
+ fprintf(stdout, "\n");
+ }
+ fprintf(stdout, "\n");
+ } else
+ ctl_io_error_print(io, NULL, stderr);
+
+bailout:
+ ctl_scsi_free_io(io);
+
+ if (dataptr != NULL)
+ free(dataptr);
+
+ return (retval);
+}
+
+static int
+cctl_persistent_reserve_out(int fd, int target, int lun, int initiator,
+ int argc, char **argv, char *combinedopt,
+ int retry_count)
+{
+ union ctl_io *io;
+ struct ctl_id id;
+ uint32_t datalen;
+ uint64_t key = 0, sa_key = 0;
+ int action = -1, restype = -1;
+ uint8_t *dataptr;
+ int retval;
+ int c;
+
+ id.id = initiator;
+ retval = 0;
+ dataptr = NULL;
+
+ io = ctl_scsi_alloc_io(id);
+ if (io == NULL) {
+ warn("%s: can't allocate memory", __func__);
+ return (1);
+ }
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'a':
+ action = strtol(optarg, NULL, 0);
+ break;
+ case 'k':
+ key = strtoull(optarg, NULL, 0);
+ break;
+ case 'r':
+ restype = strtol(optarg, NULL, 0);
+ break;
+ case 's':
+ sa_key = strtoull(optarg, NULL, 0);
+ break;
+ default:
+ break;
+ }
+ }
+ if (action < 0 || action > 5) {
+ warn("action must be specified and in the range: 0-5");
+ retval = 1;
+ goto bailout;
+ }
+
+ if (restype < 0 || restype > 5) {
+ if (action != 0 && action != 5 && action != 3) {
+ warn("'restype' must specified and in the range: 0-5");
+ retval = 1;
+ goto bailout;
+ }
+ }
+
+ datalen = 24;
+ dataptr = (uint8_t *)malloc(datalen);
+ if (dataptr == NULL) {
+ warn("%s: can't allocate %d bytes", __func__, datalen);
+ retval = 1;
+ goto bailout;
+ }
+
+ memset(dataptr, 0, datalen);
+
+ ctl_scsi_persistent_res_out(io,
+ /*data_ptr*/ dataptr,
+ /*data_len*/ datalen,
+ /*action*/ action,
+ /*type*/ restype,
+ /*key*/ key,
+ /*sa key*/ sa_key,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+
+ io->io_hdr.nexus.targ_target.id = target;
+ io->io_hdr.nexus.targ_lun = lun;
+ io->io_hdr.nexus.initid = id;
+
+ if (cctl_do_io(fd, retry_count, io, __func__) != 0) {
+ retval = 1;
+ goto bailout;
+ }
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
+ char scsi_path[40];
+ ctl_scsi_path_string(io, scsi_path, sizeof(scsi_path));
+ fprintf( stdout, "%sPERSISTENT RESERVE OUT executed "
+ "successfully\n", scsi_path);
+ } else
+ ctl_io_error_print(io, NULL, stderr);
+
+bailout:
+ ctl_scsi_free_io(io);
+
+ if (dataptr != NULL)
+ free(dataptr);
+
+ return (retval);
+}
+
+struct cctl_req_option {
+ char *name;
+ int namelen;
+ char *value;
+ int vallen;
+ STAILQ_ENTRY(cctl_req_option) links;
+};
+
+static int
+cctl_create_lun(int fd, int argc, char **argv, char *combinedopt)
+{
+ struct ctl_lun_req req;
+ int device_type = -1;
+ uint64_t lun_size = 0;
+ uint32_t blocksize = 0, req_lun_id = 0;
+ char *serial_num = NULL;
+ char *device_id = NULL;
+ int lun_size_set = 0, blocksize_set = 0, lun_id_set = 0;
+ char *backend_name = NULL;
+ STAILQ_HEAD(, cctl_req_option) option_list;
+ int num_options = 0;
+ int retval = 0, c;
+
+ STAILQ_INIT(&option_list);
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'b':
+ backend_name = strdup(optarg);
+ break;
+ case 'B':
+ blocksize = strtoul(optarg, NULL, 0);
+ blocksize_set = 1;
+ break;
+ case 'd':
+ device_id = strdup(optarg);
+ break;
+ case 'l':
+ req_lun_id = strtoul(optarg, NULL, 0);
+ lun_id_set = 1;
+ break;
+ case 'o': {
+ struct cctl_req_option *option;
+ char *tmpstr;
+ char *name, *value;
+
+ tmpstr = strdup(optarg);
+ name = strsep(&tmpstr, "=");
+ if (name == NULL) {
+ warnx("%s: option -o takes \"name=value\""
+ "argument", __func__);
+ retval = 1;
+ goto bailout;
+ }
+ value = strsep(&tmpstr, "=");
+ if (value == NULL) {
+ warnx("%s: option -o takes \"name=value\""
+ "argument", __func__);
+ retval = 1;
+ goto bailout;
+ }
+ option = malloc(sizeof(*option));
+ if (option == NULL) {
+ warn("%s: error allocating %zd bytes",
+ __func__, sizeof(*option));
+ retval = 1;
+ goto bailout;
+ }
+ option->name = strdup(name);
+ option->namelen = strlen(name) + 1;
+ option->value = strdup(value);
+ option->vallen = strlen(value) + 1;
+ free(tmpstr);
+
+ STAILQ_INSERT_TAIL(&option_list, option, links);
+ num_options++;
+ break;
+ }
+ case 's':
+ lun_size = strtoull(optarg, NULL, 0);
+ lun_size_set = 1;
+ break;
+ case 'S':
+ serial_num = strdup(optarg);
+ break;
+ case 't':
+ device_type = strtoul(optarg, NULL, 0);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (backend_name == NULL) {
+ warnx("%s: backend name (-b) must be specified", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ bzero(&req, sizeof(req));
+
+ strlcpy(req.backend, backend_name, sizeof(req.backend));
+ req.reqtype = CTL_LUNREQ_CREATE;
+
+ if (blocksize_set != 0)
+ req.reqdata.create.blocksize_bytes = blocksize;
+
+ if (lun_size_set != 0)
+ req.reqdata.create.lun_size_bytes = lun_size;
+
+ if (lun_id_set != 0) {
+ req.reqdata.create.flags |= CTL_LUN_FLAG_ID_REQ;
+ req.reqdata.create.req_lun_id = req_lun_id;
+ }
+
+ req.reqdata.create.flags |= CTL_LUN_FLAG_DEV_TYPE;
+
+ if (device_type != -1)
+ req.reqdata.create.device_type = device_type;
+ else
+ req.reqdata.create.device_type = T_DIRECT;
+
+ if (serial_num != NULL) {
+ strlcpy(req.reqdata.create.serial_num, serial_num,
+ sizeof(req.reqdata.create.serial_num));
+ req.reqdata.create.flags |= CTL_LUN_FLAG_SERIAL_NUM;
+ }
+
+ if (device_id != NULL) {
+ strlcpy(req.reqdata.create.device_id, device_id,
+ sizeof(req.reqdata.create.device_id));
+ req.reqdata.create.flags |= CTL_LUN_FLAG_DEVID;
+ }
+
+ req.num_be_args = num_options;
+ if (num_options > 0) {
+ struct cctl_req_option *option, *next_option;
+ int i;
+
+ req.be_args = malloc(num_options * sizeof(*req.be_args));
+ if (req.be_args == NULL) {
+ warn("%s: error allocating %zd bytes", __func__,
+ num_options * sizeof(*req.be_args));
+ retval = 1;
+ goto bailout;
+ }
+
+ for (i = 0, option = STAILQ_FIRST(&option_list);
+ i < num_options; i++, option = next_option) {
+ next_option = STAILQ_NEXT(option, links);
+
+ req.be_args[i].namelen = option->namelen;
+ req.be_args[i].name = strdup(option->name);
+ req.be_args[i].vallen = option->vallen;
+ req.be_args[i].value = strdup(option->value);
+ /*
+ * XXX KDM do we want a way to specify a writeable
+ * flag of some sort? Do we want a way to specify
+ * binary data?
+ */
+ req.be_args[i].flags = CTL_BEARG_ASCII | CTL_BEARG_RD;
+
+ STAILQ_REMOVE(&option_list, option, cctl_req_option,
+ links);
+ free(option->name);
+ free(option->value);
+ free(option);
+ }
+ }
+
+ if (ioctl(fd, CTL_LUN_REQ, &req) == -1) {
+ warn("%s: error issuing CTL_LUN_REQ ioctl", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (req.status == CTL_LUN_ERROR) {
+ warnx("%s: error returned from LUN creation request:\n%s",
+ __func__, req.error_str);
+ retval = 1;
+ goto bailout;
+ } else if (req.status != CTL_LUN_OK) {
+ warnx("%s: unknown LUN creation request status %d",
+ __func__, req.status);
+ retval = 1;
+ goto bailout;
+ }
+
+ fprintf(stdout, "LUN created successfully\n");
+ fprintf(stdout, "backend: %s\n", req.backend);
+ fprintf(stdout, "device type: %d\n",req.reqdata.create.device_type);
+ fprintf(stdout, "LUN size: %ju bytes\n",
+ (uintmax_t)req.reqdata.create.lun_size_bytes);
+ fprintf(stdout, "blocksize %u bytes\n",
+ req.reqdata.create.blocksize_bytes);
+ fprintf(stdout, "LUN ID: %d\n", req.reqdata.create.req_lun_id);
+ fprintf(stdout, "Serial Number: %s\n", req.reqdata.create.serial_num);
+ fprintf(stdout, "Device ID; %s\n", req.reqdata.create.device_id);
+
+bailout:
+ return (retval);
+}
+
+static int
+cctl_rm_lun(int fd, int argc, char **argv, char *combinedopt)
+{
+ struct ctl_lun_req req;
+ uint32_t lun_id = 0;
+ int lun_id_set = 0;
+ char *backend_name = NULL;
+ STAILQ_HEAD(, cctl_req_option) option_list;
+ int num_options = 0;
+ int retval = 0, c;
+
+ STAILQ_INIT(&option_list);
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'b':
+ backend_name = strdup(optarg);
+ break;
+ case 'l':
+ lun_id = strtoul(optarg, NULL, 0);
+ lun_id_set = 1;
+ break;
+ case 'o': {
+ struct cctl_req_option *option;
+ char *tmpstr;
+ char *name, *value;
+
+ tmpstr = strdup(optarg);
+ name = strsep(&tmpstr, "=");
+ if (name == NULL) {
+ warnx("%s: option -o takes \"name=value\""
+ "argument", __func__);
+ retval = 1;
+ goto bailout;
+ }
+ value = strsep(&tmpstr, "=");
+ if (value == NULL) {
+ warnx("%s: option -o takes \"name=value\""
+ "argument", __func__);
+ retval = 1;
+ goto bailout;
+ }
+ option = malloc(sizeof(*option));
+ if (option == NULL) {
+ warn("%s: error allocating %zd bytes",
+ __func__, sizeof(*option));
+ retval = 1;
+ goto bailout;
+ }
+ option->name = strdup(name);
+ option->namelen = strlen(name) + 1;
+ option->value = strdup(value);
+ option->vallen = strlen(value) + 1;
+ free(tmpstr);
+
+ STAILQ_INSERT_TAIL(&option_list, option, links);
+ num_options++;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ if (backend_name == NULL)
+ errx(1, "%s: backend name (-b) must be specified", __func__);
+
+ if (lun_id_set == 0)
+ errx(1, "%s: LUN id (-l) must be specified", __func__);
+
+ bzero(&req, sizeof(req));
+
+ strlcpy(req.backend, backend_name, sizeof(req.backend));
+ req.reqtype = CTL_LUNREQ_RM;
+
+ req.reqdata.rm.lun_id = lun_id;
+
+ req.num_be_args = num_options;
+ if (num_options > 0) {
+ struct cctl_req_option *option, *next_option;
+ int i;
+
+ req.be_args = malloc(num_options * sizeof(*req.be_args));
+ if (req.be_args == NULL) {
+ warn("%s: error allocating %zd bytes", __func__,
+ num_options * sizeof(*req.be_args));
+ retval = 1;
+ goto bailout;
+ }
+
+ for (i = 0, option = STAILQ_FIRST(&option_list);
+ i < num_options; i++, option = next_option) {
+ next_option = STAILQ_NEXT(option, links);
+
+ req.be_args[i].namelen = option->namelen;
+ req.be_args[i].name = strdup(option->name);
+ req.be_args[i].vallen = option->vallen;
+ req.be_args[i].value = strdup(option->value);
+ /*
+ * XXX KDM do we want a way to specify a writeable
+ * flag of some sort? Do we want a way to specify
+ * binary data?
+ */
+ req.be_args[i].flags = CTL_BEARG_ASCII | CTL_BEARG_RD;
+
+ STAILQ_REMOVE(&option_list, option, cctl_req_option,
+ links);
+ free(option->name);
+ free(option->value);
+ free(option);
+ }
+ }
+
+ if (ioctl(fd, CTL_LUN_REQ, &req) == -1) {
+ warn("%s: error issuing CTL_LUN_REQ ioctl", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (req.status == CTL_LUN_ERROR) {
+ warnx("%s: error returned from LUN creation request:\n%s",
+ __func__, req.error_str);
+ retval = 1;
+ goto bailout;
+ } else if (req.status != CTL_LUN_OK) {
+ warnx("%s: unknown LUN creation request status %d",
+ __func__, req.status);
+ retval = 1;
+ goto bailout;
+ }
+
+ printf("LUN %d deleted successfully\n", lun_id);
+
+bailout:
+ return (retval);
+}
+
+/*
+ * Name/value pair used for per-LUN attributes.
+ */
+struct cctl_lun_nv {
+ char *name;
+ char *value;
+ STAILQ_ENTRY(cctl_lun_nv) links;
+};
+
+/*
+ * Backend LUN information.
+ */
+struct cctl_lun {
+ uint64_t lun_id;
+ char *backend_type;
+ uint64_t size_blocks;
+ uint32_t blocksize;
+ char *serial_number;
+ char *device_id;
+ STAILQ_HEAD(,cctl_lun_nv) attr_list;
+ STAILQ_ENTRY(cctl_lun) links;
+};
+
+struct cctl_devlist_data {
+ int num_luns;
+ STAILQ_HEAD(,cctl_lun) lun_list;
+ struct cctl_lun *cur_lun;
+ int level;
+ struct sbuf *cur_sb[32];
+};
+
+static void
+cctl_start_element(void *user_data, const char *name, const char **attr)
+{
+ int i;
+ struct cctl_devlist_data *devlist;
+ struct cctl_lun *cur_lun;
+
+ devlist = (struct cctl_devlist_data *)user_data;
+ cur_lun = devlist->cur_lun;
+ devlist->level++;
+ if ((u_int)devlist->level > (sizeof(devlist->cur_sb) /
+ sizeof(devlist->cur_sb[0])))
+ errx(1, "%s: too many nesting levels, %zd max", __func__,
+ sizeof(devlist->cur_sb) / sizeof(devlist->cur_sb[0]));
+
+ devlist->cur_sb[devlist->level] = sbuf_new_auto();
+ if (devlist->cur_sb[devlist->level] == NULL)
+ err(1, "%s: Unable to allocate sbuf", __func__);
+
+ if (strcmp(name, "lun") == 0) {
+ if (cur_lun != NULL)
+ errx(1, "%s: improper lun element nesting", __func__);
+
+ cur_lun = calloc(1, sizeof(*cur_lun));
+ if (cur_lun == NULL)
+ err(1, "%s: cannot allocate %zd bytes", __func__,
+ sizeof(*cur_lun));
+
+ devlist->num_luns++;
+ devlist->cur_lun = cur_lun;
+
+ STAILQ_INIT(&cur_lun->attr_list);
+ STAILQ_INSERT_TAIL(&devlist->lun_list, cur_lun, links);
+
+ for (i = 0; attr[i] != NULL; i += 2) {
+ if (strcmp(attr[i], "id") == 0) {
+ cur_lun->lun_id = strtoull(attr[i+1], NULL, 0);
+ } else {
+ errx(1, "%s: invalid LUN attribute %s = %s",
+ __func__, attr[i], attr[i+1]);
+ }
+ }
+ }
+}
+
+static void
+cctl_end_element(void *user_data, const char *name)
+{
+ struct cctl_devlist_data *devlist;
+ struct cctl_lun *cur_lun;
+ char *str;
+
+ devlist = (struct cctl_devlist_data *)user_data;
+ cur_lun = devlist->cur_lun;
+
+ if ((cur_lun == NULL)
+ && (strcmp(name, "ctllunlist") != 0))
+ errx(1, "%s: cur_lun == NULL! (name = %s)", __func__, name);
+
+ if (devlist->cur_sb[devlist->level] == NULL)
+ errx(1, "%s: no valid sbuf at level %d (name %s)", __func__,
+ devlist->level, name);
+
+ sbuf_finish(devlist->cur_sb[devlist->level]);
+ str = strdup(sbuf_data(devlist->cur_sb[devlist->level]));
+ if (str == NULL)
+ err(1, "%s can't allocate %zd bytes for string", __func__,
+ sbuf_len(devlist->cur_sb[devlist->level]));
+
+ if (strlen(str) == 0) {
+ free(str);
+ str = NULL;
+ }
+
+ sbuf_delete(devlist->cur_sb[devlist->level]);
+ devlist->cur_sb[devlist->level] = NULL;
+ devlist->level--;
+
+ if (strcmp(name, "backend_type") == 0) {
+ cur_lun->backend_type = str;
+ str = NULL;
+ } else if (strcmp(name, "size") == 0) {
+ cur_lun->size_blocks = strtoull(str, NULL, 0);
+ } else if (strcmp(name, "blocksize") == 0) {
+ cur_lun->blocksize = strtoul(str, NULL, 0);
+ } else if (strcmp(name, "serial_number") == 0) {
+ cur_lun->serial_number = str;
+ str = NULL;
+ } else if (strcmp(name, "device_id") == 0) {
+ cur_lun->device_id = str;
+ str = NULL;
+ } else if (strcmp(name, "lun") == 0) {
+ devlist->cur_lun = NULL;
+ } else if (strcmp(name, "ctllunlist") == 0) {
+
+ } else {
+ struct cctl_lun_nv *nv;
+
+ nv = calloc(1, sizeof(*nv));
+ if (nv == NULL)
+ err(1, "%s: can't allocate %zd bytes for nv pair",
+ __func__, sizeof(*nv));
+
+ nv->name = strdup(name);
+ if (nv->name == NULL)
+ err(1, "%s: can't allocated %zd bytes for string",
+ __func__, strlen(name));
+
+ nv->value = str;
+ str = NULL;
+ STAILQ_INSERT_TAIL(&cur_lun->attr_list, nv, links);
+ }
+
+ free(str);
+}
+
+static void
+cctl_char_handler(void *user_data, const XML_Char *str, int len)
+{
+ struct cctl_devlist_data *devlist;
+
+ devlist = (struct cctl_devlist_data *)user_data;
+
+ sbuf_bcat(devlist->cur_sb[devlist->level], str, len);
+}
+
+static int
+cctl_devlist(int fd, int argc, char **argv, char *combinedopt)
+{
+ struct ctl_lun_list list;
+ struct cctl_devlist_data devlist;
+ struct cctl_lun *lun;
+ XML_Parser parser;
+ char *lun_str;
+ int lun_len;
+ int dump_xml = 0;
+ int retval, c;
+ char *backend = NULL;
+ int verbose = 0;
+
+ retval = 0;
+ lun_len = 4096;
+
+ bzero(&devlist, sizeof(devlist));
+ STAILQ_INIT(&devlist.lun_list);
+
+ while ((c = getopt(argc, argv, combinedopt)) != -1) {
+ switch (c) {
+ case 'b':
+ backend = strdup(optarg);
+ break;
+ case 'v':
+ verbose++;
+ break;
+ case 'x':
+ dump_xml = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+retry:
+ lun_str = malloc(lun_len);
+
+ bzero(&list, sizeof(list));
+ list.alloc_len = lun_len;
+ list.status = CTL_LUN_LIST_NONE;
+ list.lun_xml = lun_str;
+
+ if (ioctl(fd, CTL_LUN_LIST, &list) == -1) {
+ warn("%s: error issuing CTL_LUN_LIST ioctl", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ if (list.status == CTL_LUN_LIST_ERROR) {
+ warnx("%s: error returned from CTL_LUN_LIST ioctl:\n%s",
+ __func__, list.error_str);
+ } else if (list.status == CTL_LUN_LIST_NEED_MORE_SPACE) {
+ lun_len = lun_len << 1;
+ goto retry;
+ }
+
+ if (dump_xml != 0) {
+ printf("%s", lun_str);
+ goto bailout;
+ }
+
+ parser = XML_ParserCreate(NULL);
+ if (parser == NULL) {
+ warn("%s: Unable to create XML parser", __func__);
+ retval = 1;
+ goto bailout;
+ }
+
+ XML_SetUserData(parser, &devlist);
+ XML_SetElementHandler(parser, cctl_start_element, cctl_end_element);
+ XML_SetCharacterDataHandler(parser, cctl_char_handler);
+
+ retval = XML_Parse(parser, lun_str, strlen(lun_str), 1);
+ XML_ParserFree(parser);
+ if (retval != 1) {
+ retval = 1;
+ goto bailout;
+ }
+
+ printf("LUN Backend %18s %4s %-16s %-16s\n", "Size (Blocks)", "BS",
+ "Serial Number", "Device ID");
+ STAILQ_FOREACH(lun, &devlist.lun_list, links) {
+ struct cctl_lun_nv *nv;
+
+ if ((backend != NULL)
+ && (strcmp(lun->backend_type, backend) != 0))
+ continue;
+
+ printf("%3ju %-8s %18ju %4u %-16s %-16s\n",
+ (uintmax_t)lun->lun_id,
+ lun->backend_type, (uintmax_t)lun->size_blocks,
+ lun->blocksize, lun->serial_number, lun->device_id);
+
+ if (verbose == 0)
+ continue;
+
+ STAILQ_FOREACH(nv, &lun->attr_list, links) {
+ printf(" %s=%s\n", nv->name, nv->value);
+ }
+ }
+bailout:
+ free(lun_str);
+
+ return (retval);
+}
+
+void
+usage(int error)
+{
+ fprintf(error ? stderr : stdout,
+"Usage:\n"
+"Primary commands:\n"
+" ctladm tur [dev_id][general options]\n"
+" ctladm inquiry [dev_id][general options]\n"
+" ctladm devid [dev_id][general options]\n"
+" ctladm reqsense [dev_id][general options]\n"
+" ctladm reportluns [dev_id][general options]\n"
+" ctladm read [dev_id][general options] <-l lba> <-d len>\n"
+" <-f file|-> <-b blocksize> [-c cdbsize][-N]\n"
+" ctladm write [dev_id][general options] <-l lba> <-d len>\n"
+" <-f file|-> <-b blocksize> [-c cdbsize][-N]\n"
+" ctladm readcap [dev_id][general options] [-c cdbsize]\n"
+" ctladm modesense [dev_id][general options] <-m page|-l> [-P pc]\n"
+" [-d] [-S subpage] [-c cdbsize]\n"
+" ctladm prin [dev_id][general options] <-a action>\n"
+" ctladm prout [dev_id][general options] <-a action>\n"
+" <-r restype] [-k key] [-s sa_key]\n"
+" ctladm rtpg [dev_id][general options]\n"
+" ctladm start [dev_id][general options] [-i] [-o]\n"
+" ctladm stop [dev_id][general options] [-i] [-o]\n"
+" ctladm synccache [dev_id][general options] [-l lba]\n"
+" [-b blockcount] [-r] [-i] [-c cdbsize]\n"
+" ctladm create <-b backend> [-B blocksize] [-d device_id]\n"
+" [-l lun_id] [-o name=value] [-s size_bytes]\n"
+" [-S serial_num] [-t dev_type]\n"
+" ctladm remove <-b backend> <-l lun_id> [-o name=value]\n"
+" ctladm devlist [-b][-v][-x]\n"
+" ctladm shutdown\n"
+" ctladm startup\n"
+" ctladm hardstop\n"
+" ctladm hardstart\n"
+" ctladm lunlist\n"
+" ctladm bbrread [dev_id] <-l lba> <-d datalen>\n"
+" ctladm delay [dev_id] <-l datamove|done> [-T oneshot|cont]\n"
+" [-t secs]\n"
+" ctladm realsync <on|off|query>\n"
+" ctladm setsync [dev_id] <-i interval>\n"
+" ctladm getsync [dev_id]\n"
+" ctladm inject [dev_id] <-i action> <-p pattern> [-r lba,len]\n"
+" [-s len fmt [args]] [-c] [-d delete_id]\n"
+" ctladm port <-l | -o <on|off> | [-w wwnn][-W wwpn]>\n"
+" [-p targ_port] [-t port_type] [-q] [-x]\n"
+" ctladm dumpooa\n"
+" ctladm dumpstructs\n"
+" ctladm help\n"
+"General Options:\n"
+"-I intiator_id : defaults to 7, used to change the initiator id\n"
+"-C retries : specify the number of times to retry this command\n"
+"-D devicename : specify the device to operate on\n"
+" : (default is %s)\n"
+"read/write options:\n"
+"-l lba : logical block address\n"
+"-d len : read/write length, in blocks\n"
+"-f file|- : write/read data to/from file or stdout/stdin\n"
+"-b blocksize : block size, in bytes\n"
+"-c cdbsize : specify minimum cdb size: 6, 10, 12 or 16\n"
+"-N : do not copy data to/from userland\n"
+"readcapacity options:\n"
+"-c cdbsize : specify minimum cdb size: 10 or 16\n"
+"modesense options:\n"
+"-m page : specify the mode page to view\n"
+"-l : request a list of supported pages\n"
+"-P pc : specify the page control value: 0-3 (current,\n"
+" changeable, default, saved, respectively)\n"
+"-d : disable block descriptors for mode sense\n"
+"-S subpage : specify a subpage\n"
+"-c cdbsize : specify minimum cdb size: 6 or 10\n"
+"persistent reserve in options:\n"
+"-a action : specify the action value: 0-2 (read key, read\n"
+" reservation, read capabilities, respectively)\n"
+"persistent reserve out options:\n"
+"-a action : specify the action value: 0-5 (register, reserve,\n"
+" release, clear, preempt, register and ignore)\n"
+"-k key : key value\n"
+"-s sa_key : service action value\n"
+"-r restype : specify the reservation type: 0-5(wr ex, ex ac,\n"
+" wr ex ro, ex ac ro, wr ex ar, ex ac ar)\n"
+"start/stop options:\n"
+"-i : set the immediate bit (CTL does not support this)\n"
+"-o : set the on/offline bit\n"
+"synccache options:\n"
+"-l lba : set the starting LBA\n"
+"-b blockcount : set the length to sync in blocks\n"
+"-r : set the relative addressing bit\n"
+"-i : set the immediate bit\n"
+"-c cdbsize : specify minimum cdb size: 10 or 16\n"
+"create options:\n"
+"-b backend : backend name (\"block\", \"ramdisk\", etc.)\n"
+"-B blocksize : LUN blocksize in bytes (some backends)\n"
+"-d device_id : SCSI VPD page 0x83 ID\n"
+"-l lun_id : requested LUN number\n"
+"-o name=value : backend-specific options, multiple allowed\n"
+"-s size_bytes : LUN size in bytes (some backends)\n"
+"-S serial_num : SCSI VPD page 0x80 serial number\n"
+"-t dev_type : SCSI device type (0=disk, 3=processor)\n"
+"remove options:\n"
+"-b backend : backend name (\"block\", \"ramdisk\", etc.)\n"
+"-l lun_id : LUN number to delete\n"
+"-o name=value : backend-specific options, multiple allowed\n"
+"devlist options:\n"
+"-b backend : list devices from specified backend only\n"
+"-v : be verbose, show backend attributes\n"
+"-x : dump raw XML\n"
+"delay options:\n"
+"-l datamove|done : delay command at datamove or done phase\n"
+"-T oneshot : delay one command, then resume normal completion\n"
+"-T cont : delay all commands\n"
+"-t secs : number of seconds to delay\n"
+"inject options:\n"
+"-i error_action : action to perform\n"
+"-p pattern : command pattern to look for\n"
+"-r lba,len : LBA range for pattern\n"
+"-s len fmt [args] : sense data for custom sense action\n"
+"-c : continuous operation\n"
+"-d delete_id : error id to delete\n"
+"port options:\n"
+"-l : list frontend ports\n"
+"-o on|off : turn frontend ports on or off\n"
+"-w wwnn : set WWNN for one frontend\n"
+"-W wwpn : set WWPN for one frontend\n"
+"-t port_type : specify fc, scsi, ioctl, internal frontend type\n"
+"-p targ_port : specify target port number\n"
+"-q : omit header in list output\n"
+"-x : output port list in XML format\n"
+"bbrread options:\n"
+"-l lba : starting LBA\n"
+"-d datalen : length, in bytes, to read\n",
+CTL_DEFAULT_DEV);
+}
+
+int
+main(int argc, char **argv)
+{
+ int option_index, c;
+ ctladm_cmdfunction command;
+ ctladm_cmdargs cmdargs;
+ ctladm_optret optreturn;
+ char *device;
+ const char *mainopt = "C:D:I:";
+ const char *subopt = NULL;
+ char combinedopt[256];
+ int target, lun;
+ int optstart = 2;
+ int retval, fd;
+ int retries, timeout;
+ int initid;
+
+ option_index = 0;
+ retval = 0;
+ cmdargs = CTLADM_ARG_NONE;
+ command = CTLADM_CMD_HELP;
+ device = NULL;
+ fd = -1;
+ retries = 0;
+ target = 0;
+ lun = 0;
+ timeout = 0;
+ initid = 7;
+
+ if (argc < 2) {
+ usage(1);
+ retval = 1;
+ goto bailout;
+ }
+
+ /*
+ * Get the base option.
+ */
+ optreturn = getoption(option_table,argv[1], &command, &cmdargs,&subopt);
+
+ if (optreturn == CC_OR_AMBIGUOUS) {
+ warnx("ambiguous option %s", argv[1]);
+ usage(0);
+ exit(1);
+ } else if (optreturn == CC_OR_NOT_FOUND) {
+ warnx("option %s not found", argv[1]);
+ usage(0);
+ exit(1);
+ }
+
+ if (cmdargs & CTLADM_ARG_NEED_TL) {
+ if ((argc < 3)
+ || (!isdigit(argv[2][0]))) {
+ warnx("option %s requires a target:lun argument",
+ argv[1]);
+ usage(0);
+ exit(1);
+ }
+ retval = cctl_parse_tl(argv[2], &target, &lun);
+ if (retval != 0)
+ errx(1, "invalid target:lun argument %s", argv[2]);
+
+ cmdargs |= CTLADM_ARG_TARG_LUN;
+ optstart++;
+ }
+
+ /*
+ * Ahh, getopt(3) is a pain.
+ *
+ * This is a gross hack. There really aren't many other good
+ * options (excuse the pun) for parsing options in a situation like
+ * this. getopt is kinda braindead, so you end up having to run
+ * through the options twice, and give each invocation of getopt
+ * the option string for the other invocation.
+ *
+ * You would think that you could just have two groups of options.
+ * The first group would get parsed by the first invocation of
+ * getopt, and the second group would get parsed by the second
+ * invocation of getopt. It doesn't quite work out that way. When
+ * the first invocation of getopt finishes, it leaves optind pointing
+ * to the argument _after_ the first argument in the second group.
+ * So when the second invocation of getopt comes around, it doesn't
+ * recognize the first argument it gets and then bails out.
+ *
+ * A nice alternative would be to have a flag for getopt that says
+ * "just keep parsing arguments even when you encounter an unknown
+ * argument", but there isn't one. So there's no real clean way to
+ * easily parse two sets of arguments without having one invocation
+ * of getopt know about the other.
+ *
+ * Without this hack, the first invocation of getopt would work as
+ * long as the generic arguments are first, but the second invocation
+ * (in the subfunction) would fail in one of two ways. In the case
+ * where you don't set optreset, it would fail because optind may be
+ * pointing to the argument after the one it should be pointing at.
+ * In the case where you do set optreset, and reset optind, it would
+ * fail because getopt would run into the first set of options, which
+ * it doesn't understand.
+ *
+ * All of this would "sort of" work if you could somehow figure out
+ * whether optind had been incremented one option too far. The
+ * mechanics of that, however, are more daunting than just giving
+ * both invocations all of the expect options for either invocation.
+ *
+ * Needless to say, I wouldn't mind if someone invented a better
+ * (non-GPL!) command line parsing interface than getopt. I
+ * wouldn't mind if someone added more knobs to getopt to make it
+ * work better. Who knows, I may talk myself into doing it someday,
+ * if the standards weenies let me. As it is, it just leads to
+ * hackery like this and causes people to avoid it in some cases.
+ *
+ * KDM, September 8th, 1998
+ */
+ if (subopt != NULL)
+ sprintf(combinedopt, "%s%s", mainopt, subopt);
+ else
+ sprintf(combinedopt, "%s", mainopt);
+
+ /*
+ * Start getopt processing at argv[2/3], since we've already
+ * accepted argv[1..2] as the command name, and as a possible
+ * device name.
+ */
+ optind = optstart;
+
+ /*
+ * Now we run through the argument list looking for generic
+ * options, and ignoring options that possibly belong to
+ * subfunctions.
+ */
+ while ((c = getopt(argc, argv, combinedopt))!= -1){
+ switch (c) {
+ case 'C':
+ cmdargs |= CTLADM_ARG_RETRIES;
+ retries = strtol(optarg, NULL, 0);
+ break;
+ case 'D':
+ device = strdup(optarg);
+ cmdargs |= CTLADM_ARG_DEVICE;
+ break;
+ case 'I':
+ cmdargs |= CTLADM_ARG_INITIATOR;
+ initid = strtol(optarg, NULL, 0);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if ((cmdargs & CTLADM_ARG_INITIATOR) == 0)
+ initid = 7;
+
+ optind = optstart;
+ optreset = 1;
+
+ /*
+ * Default to opening the CTL device for now.
+ */
+ if (((cmdargs & CTLADM_ARG_DEVICE) == 0)
+ && (command != CTLADM_CMD_HELP)) {
+ device = strdup(CTL_DEFAULT_DEV);
+ cmdargs |= CTLADM_ARG_DEVICE;
+ }
+
+ if ((cmdargs & CTLADM_ARG_DEVICE)
+ && (command != CTLADM_CMD_HELP)) {
+ fd = open(device, O_RDWR);
+ if (fd == -1) {
+ fprintf(stderr, "%s: error opening %s: %s\n",
+ argv[0], device, strerror(errno));
+ retval = 1;
+ goto bailout;
+ }
+ } else if ((command != CTLADM_CMD_HELP)
+ && ((cmdargs & CTLADM_ARG_DEVICE) == 0)) {
+ fprintf(stderr, "%s: you must specify a device with the "
+ "--device argument for this command\n", argv[0]);
+ command = CTLADM_CMD_HELP;
+ retval = 1;
+ }
+
+ switch (command) {
+ case CTLADM_CMD_TUR:
+ retval = cctl_tur(fd, target, lun, initid, retries);
+ break;
+ case CTLADM_CMD_INQUIRY:
+ retval = cctl_inquiry(fd, target, lun, initid, retries);
+ break;
+ case CTLADM_CMD_REQ_SENSE:
+ retval = cctl_req_sense(fd, target, lun, initid, retries);
+ break;
+ case CTLADM_CMD_REPORT_LUNS:
+ retval = cctl_report_luns(fd, target, lun, initid, retries);
+ break;
+ case CTLADM_CMD_CREATE:
+ retval = cctl_create_lun(fd, argc, argv, combinedopt);
+ break;
+ case CTLADM_CMD_RM:
+ retval = cctl_rm_lun(fd, argc, argv, combinedopt);
+ break;
+ case CTLADM_CMD_DEVLIST:
+ retval = cctl_devlist(fd, argc, argv, combinedopt);
+ break;
+ case CTLADM_CMD_READ:
+ case CTLADM_CMD_WRITE:
+ retval = cctl_read_write(fd, target, lun, initid, retries,
+ argc, argv, combinedopt, command);
+ break;
+ case CTLADM_CMD_PORT:
+ retval = cctl_port(fd, argc, argv, combinedopt);
+ break;
+ case CTLADM_CMD_READCAPACITY:
+ retval = cctl_read_capacity(fd, target, lun, initid, retries,
+ argc, argv, combinedopt);
+ break;
+ case CTLADM_CMD_MODESENSE:
+ retval = cctl_mode_sense(fd, target, lun, initid, retries,
+ argc, argv, combinedopt);
+ break;
+ case CTLADM_CMD_START:
+ case CTLADM_CMD_STOP:
+ retval = cctl_start_stop(fd, target, lun, initid, retries,
+ (command == CTLADM_CMD_START) ? 1 : 0,
+ argc, argv, combinedopt);
+ break;
+ case CTLADM_CMD_SYNC_CACHE:
+ retval = cctl_sync_cache(fd, target, lun, initid, retries,
+ argc, argv, combinedopt);
+ break;
+ case CTLADM_CMD_SHUTDOWN:
+ case CTLADM_CMD_STARTUP:
+ retval = cctl_startup_shutdown(fd, target, lun, initid,
+ command);
+ break;
+ case CTLADM_CMD_HARDSTOP:
+ case CTLADM_CMD_HARDSTART:
+ retval = cctl_hardstopstart(fd, command);
+ break;
+ case CTLADM_CMD_BBRREAD:
+ retval = cctl_bbrread(fd, target, lun, initid, argc, argv,
+ combinedopt);
+ break;
+ case CTLADM_CMD_LUNLIST:
+ retval = cctl_lunlist(fd);
+ break;
+ case CTLADM_CMD_DELAY:
+ retval = cctl_delay(fd, target, lun, argc, argv, combinedopt);
+ break;
+ case CTLADM_CMD_REALSYNC:
+ retval = cctl_realsync(fd, argc, argv);
+ break;
+ case CTLADM_CMD_SETSYNC:
+ case CTLADM_CMD_GETSYNC:
+ retval = cctl_getsetsync(fd, target, lun, command,
+ argc, argv, combinedopt);
+ break;
+ case CTLADM_CMD_ERR_INJECT:
+ retval = cctl_error_inject(fd, target, lun, argc, argv,
+ combinedopt);
+ break;
+ case CTLADM_CMD_DUMPOOA:
+ retval = cctl_dump_ooa(fd, argc, argv);
+ break;
+ case CTLADM_CMD_DUMPSTRUCTS:
+ retval = cctl_dump_structs(fd, cmdargs);
+ break;
+ case CTLADM_CMD_PRES_IN:
+ retval = cctl_persistent_reserve_in(fd, target, lun, initid,
+ argc, argv, combinedopt,
+ retries);
+ break;
+ case CTLADM_CMD_PRES_OUT:
+ retval = cctl_persistent_reserve_out(fd, target, lun, initid,
+ argc, argv, combinedopt,
+ retries);
+ break;
+ case CTLADM_CMD_INQ_VPD_DEVID:
+ retval = cctl_inquiry_vpd_devid(fd, target, lun, initid);
+ break;
+ case CTLADM_CMD_RTPG:
+ retval = cctl_report_target_port_group(fd, target, lun, initid);
+ break;
+ case CTLADM_CMD_HELP:
+ default:
+ usage(retval);
+ break;
+ }
+bailout:
+
+ if (fd != -1)
+ close(fd);
+
+ exit (retval);
+}
+
+/*
+ * vim: ts=8
+ */
diff --git a/usr.sbin/ctladm/ctladm.h b/usr.sbin/ctladm/ctladm.h
new file mode 100644
index 0000000..b2e9f12
--- /dev/null
+++ b/usr.sbin/ctladm/ctladm.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Kenneth D. Merry.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ * $Id: //depot/users/kenm/FreeBSD-test2/usr.sbin/ctladm/ctladm.h#1 $
+ */
+
+#ifndef _CTLADM_H
+#define _CTLADM_H
+
+/*
+ * get_hook: Structure for evaluating args in a callback.
+ */
+struct get_hook
+{
+ int argc;
+ char **argv;
+ int got;
+};
+
+char *cget(void *hook, char *name);
+int iget(void *hook, char *name);
+void arg_put(void *hook, int letter, void *arg, int count, char *name);
+void usage(int error);
+
+#endif /* _CTLADM_H */
diff --git a/usr.sbin/ctladm/util.c b/usr.sbin/ctladm/util.c
new file mode 100644
index 0000000..df13357
--- /dev/null
+++ b/usr.sbin/ctladm/util.c
@@ -0,0 +1,156 @@
+/*
+ * Written By Julian ELischer
+ * Copyright julian Elischer 1993.
+ * Permission is granted to use or redistribute this file in any way as long
+ * as this notice remains. Julian Elischer does not guarantee that this file
+ * is totally correct for any given task and users of this file must
+ * accept responsibility for any damage that occurs from the application of this
+ * file.
+ *
+ * (julian@tfs.com julian@dialix.oz.au)
+ *
+ * User SCSI hooks added by Peter Dufault:
+ *
+ * Copyright (c) 1994 HD Associates
+ * (contact: dufault@hda.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of HD Associates
+ * may not be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY HD ASSOCIATES ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL HD ASSOCIATES BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Taken from the original scsi(8) program.
+ * from: scsi.c,v 1.17 1998/01/12 07:57:57 charnier Exp $";
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/stdint.h>
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <camlib.h>
+#include "ctladm.h"
+
+int verbose;
+
+/* iget: Integer argument callback
+ */
+int
+iget(void *hook, char *name)
+{
+ struct get_hook *h = (struct get_hook *)hook;
+ int arg;
+
+ if (h->got >= h->argc)
+ {
+ fprintf(stderr, "Expecting an integer argument.\n");
+ usage(0);
+ exit(1);
+ }
+ arg = strtol(h->argv[h->got], 0, 0);
+ h->got++;
+
+ if (verbose && name && *name)
+ printf("%s: %d\n", name, arg);
+
+ return arg;
+}
+
+/* cget: char * argument callback
+ */
+char *
+cget(void *hook, char *name)
+{
+ struct get_hook *h = (struct get_hook *)hook;
+ char *arg;
+
+ if (h->got >= h->argc)
+ {
+ fprintf(stderr, "Expecting a character pointer argument.\n");
+ usage(0);
+ exit(1);
+ }
+ arg = h->argv[h->got];
+ h->got++;
+
+ if (verbose && name)
+ printf("cget: %s: %s", name, arg);
+
+ return arg;
+}
+
+/* arg_put: "put argument" callback
+ */
+void
+arg_put(void *hook __unused, int letter, void *arg, int count, char *name)
+{
+ if (verbose && name && *name)
+ printf("%s: ", name);
+
+ switch(letter)
+ {
+ case 'i':
+ case 'b':
+ printf("%jd ", (intmax_t)(intptr_t)arg);
+ break;
+
+ case 'c':
+ case 'z':
+ {
+ char *p;
+
+ p = malloc(count + 1);
+ if (p == NULL) {
+ fprintf(stderr, "can't malloc memory for p\n");
+ exit(1);
+ }
+
+ bzero(p, count +1);
+ strncpy(p, (char *)arg, count);
+ if (letter == 'z')
+ {
+ int i;
+ for (i = count - 1; i >= 0; i--)
+ if (p[i] == ' ')
+ p[i] = 0;
+ else
+ break;
+ }
+ printf("%s ", p);
+
+ free(p);
+ }
+
+ break;
+
+ default:
+ printf("Unknown format letter: '%c'\n", letter);
+ }
+ if (verbose)
+ putchar('\n');
+}
diff --git a/usr.sbin/mlxcontrol/interface.c b/usr.sbin/mlxcontrol/interface.c
index 2c7e12b..c0c241b 100644
--- a/usr.sbin/mlxcontrol/interface.c
+++ b/usr.sbin/mlxcontrol/interface.c
@@ -253,7 +253,7 @@ mlx_scsi_inquiry(int unit, int channel, int target, char **vendor, char **device
/* build the cdb */
inq_cmd->opcode = INQUIRY;
- inq_cmd->length = SHORT_INQUIRY_LENGTH;
+ scsi_ulto2b(SHORT_INQUIRY_LENGTH, inq_cmd->length);
/* hand it off for processing */
mlx_perform(unit, mlx_command, &cmd);
OpenPOWER on IntegriCloud