summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorgibbs <gibbs@FreeBSD.org>1998-09-15 06:36:34 +0000
committergibbs <gibbs@FreeBSD.org>1998-09-15 06:36:34 +0000
commitcb986cde46f00a045e2662a010c6eec29b3fbe8a (patch)
tree036628a681da2a8894c183b293e14fc3c31dbc8b /sys
parent855593c295ac3878b7f73a83edd07f899eccc38d (diff)
downloadFreeBSD-src-cb986cde46f00a045e2662a010c6eec29b3fbe8a.zip
FreeBSD-src-cb986cde46f00a045e2662a010c6eec29b3fbe8a.tar.gz
SCSI Peripheral drivers for CAM:
da - Direct Access Devices (disks, optical devices, SS disks) cd - CDROM (or devices that can act like them, WORM, CD-RW, etc) ch - Medium Changer devices. sa - Sequential Access Devices (tape drives) pass - Application pass-thru driver targ - Target Mode "Processor Target" Emulator pt - Processor Target Devices (scanners, cpus, etc.) Submitted by: The CAM Team
Diffstat (limited to 'sys')
-rw-r--r--sys/cam/scsi/scsi_all.c2678
-rw-r--r--sys/cam/scsi/scsi_all.h814
-rw-r--r--sys/cam/scsi/scsi_cd.c3016
-rw-r--r--sys/cam/scsi/scsi_cd.h217
-rw-r--r--sys/cam/scsi/scsi_ch.c1584
-rw-r--r--sys/cam/scsi/scsi_ch.h486
-rw-r--r--sys/cam/scsi/scsi_da.c1520
-rw-r--r--sys/cam/scsi/scsi_da.h391
-rw-r--r--sys/cam/scsi/scsi_message.h42
-rw-r--r--sys/cam/scsi/scsi_pass.c787
-rw-r--r--sys/cam/scsi/scsi_pass.h38
-rw-r--r--sys/cam/scsi/scsi_pt.c723
-rw-r--r--sys/cam/scsi/scsi_pt.h48
-rw-r--r--sys/cam/scsi/scsi_sa.c2337
-rw-r--r--sys/cam/scsi/scsi_sa.h254
-rw-r--r--sys/cam/scsi/scsi_target.c1459
-rw-r--r--sys/cam/scsi/scsi_targetio.h103
17 files changed, 16497 insertions, 0 deletions
diff --git a/sys/cam/scsi/scsi_all.c b/sys/cam/scsi/scsi_all.c
new file mode 100644
index 0000000..129847f
--- /dev/null
+++ b/sys/cam/scsi/scsi_all.c
@@ -0,0 +1,2678 @@
+/*
+ * Implementation of Utility functions for all SCSI device types.
+ *
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * Copyright (c) 1997, 1998 Kenneth D. Merry.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include <sys/param.h>
+
+#ifdef KERNEL
+#include <opt_scsi.h>
+
+#include <sys/systm.h>
+#else
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#endif
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_xpt.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/scsi/scsi_all.h>
+#ifndef KERNEL
+#include <camlib.h>
+
+#ifndef FALSE
+#define FALSE 0
+#endif /* FALSE */
+#ifndef TRUE
+#define TRUE 1
+#endif /* TRUE */
+#define ERESTART -1 /* restart syscall */
+#define EJUSTRETURN -2 /* don't modify regs, just return */
+#endif /* !KERNEL */
+
+const char *scsi_sense_key_text[] =
+{
+ "NO SENSE",
+ "RECOVERED ERROR",
+ "NOT READY",
+ "MEDIUM ERROR",
+ "HARDWARE FAILURE",
+ "ILLEGAL REQUEST",
+ "UNIT ATTENTION",
+ "DATA PROTECT",
+ "BLANK CHECK",
+ "Vendor Specific",
+ "COPY ABORTED",
+ "ABORTED COMMAND",
+ "EQUAL",
+ "VOLUME OVERFLOW",
+ "MISCOMPARE",
+ "RESERVED"
+};
+
+#if !defined(SCSI_NO_OP_STRINGS)
+
+#define D 0x001
+#define T 0x002
+#define L 0x004
+#define P 0x008
+#define W 0x010
+#define R 0x020
+#define S 0x040
+#define O 0x080
+#define M 0x100
+#define C 0x200
+#define A 0x400
+#define E 0x800
+
+#define ALL 0xFFF
+
+/*
+ * WARNING: You must update the num_ops field below for this quirk table
+ * entry if you add more entries.
+ */
+static struct op_table_entry plextor_cd_ops[] = {
+ {0xD8, R, "CD-DA READ"}
+};
+
+static struct scsi_op_quirk_entry scsi_op_quirk_table[] = {
+ {
+ /*
+ * I believe that 0xD8 is the Plextor proprietary command
+ * to read CD-DA data. I'm not sure which Plextor CDROM
+ * models support the command, though. I know for sure
+ * that the 4X, 8X, and 12X models do, and presumably the
+ * 12-20X does. I don't know about any earlier models,
+ * though. If anyone has any more complete information,
+ * feel free to change this quirk entry.
+ */
+ {T_CDROM, SIP_MEDIA_REMOVABLE, "PLEXTOR", "CD-ROM PX*", "*"},
+ 1, /* number of vendor-specific opcodes for this entry */
+ plextor_cd_ops
+ }
+};
+
+static struct op_table_entry scsi_op_codes[] = {
+/*
+ * From: ftp://ftp.symbios.com/pub/standards/io/t10/drafts/spc/op-num.txt
+ * Modifications by Kenneth Merry (ken@plutotech.com)
+ *
+ * Note: order is important in this table, scsi_op_desc() currently
+ * depends on the opcodes in the table being in order to save search time.
+ */
+/*
+ * File: OP-NUM.TXT
+ *
+ * SCSI Operation Codes
+ * Numeric Sorted Listing
+ * as of 11/13/96
+ *
+ * D - DIRECT ACCESS DEVICE (SBC) device column key
+ * .T - SEQUENTIAL ACCESS DEVICE (SSC) -------------------
+ * . L - PRINTER DEVICE (SSC) M = Mandatory
+ * . P - PROCESSOR DEVICE (SPC) O = Optional
+ * . .W - WRITE ONCE READ MULTIPLE DEVICE (SBC) V = Vendor specific
+ * . . R - CD DEVICE (MMC) R = Reserved
+ * . . S - SCANNER DEVICE (SGC) Z = Obsolete
+ * . . .O - OPTICAL MEMORY DEVICE (SBC)
+ * . . . M - MEDIA CHANGER DEVICE (SMC)
+ * . . . C - COMMUNICATION DEVICE (SSC)
+ * . . . .A - STORAGE ARRAY DEVICE (SCC)
+ * . . . . E - ENCLOSURE SERVICES DEVICE (SES)
+ * OP DTLPWRSOMCAE Description
+ * -- ------------ ---------------------------------------------------- */
+/* 00 MMMMMMMMMMMM TEST UNIT READY */
+{0x00, ALL, "TEST UNIT READY"},
+
+/* 01 M REWIND */
+{0x01, T, "REWIND"},
+/* 01 Z V ZO ZO REZERO UNIT */
+{0x01, D|L|W|O|M, "REZERO UNIT"},
+
+/* 02 VVVVVV V */
+
+/* 03 MMMMMMMMMMMM REQUEST SENSE */
+{0x03, ALL, "REQUEST SENSE"},
+
+/* 04 M O O FORMAT UNIT */
+{0x04, D|R|O, "FORMAT UNIT"},
+/* 04 O FORMAT MEDIUM */
+{0x04, T, "FORMAT MEDIUM"},
+/* 04 O FORMAT */
+{0x04, L, "FORMAT"},
+
+/* 05 VMVVVV V READ BLOCK LIMITS */
+{0x05, T, "READ BLOCK LIMITS"},
+
+/* 06 VVVVVV V */
+
+/* 07 OVV O OV REASSIGN BLOCKS */
+{0x07, D|W|O, "REASSIGN BLOCKS"},
+/* 07 O INITIALIZE ELEMENT STATUS */
+{0x07, M, "INITIALIZE ELEMENT STATUS"},
+
+/* 08 OMV OO OV READ(06) */
+{0x08, D|T|W|R|O, "READ(06)"},
+/* 08 O RECEIVE */
+{0x08, P, "RECEIVE"},
+/* 08 M GET MESSAGE(06) */
+{0x08, C, "GET MESSAGE(06)"},
+
+/* 09 VVVVVV V */
+
+/* 0A OM O OV WRITE(06) */
+{0x0A, D|T|W|O, "WRITE(06)"},
+/* 0A M SEND(06) */
+{0x0A, P, "SEND(06)"},
+/* 0A M SEND MESSAGE(06) */
+{0x0A, C, "SEND MESSAGE(06)"},
+/* 0A M PRINT */
+{0x0A, L, "PRINT"},
+
+/* 0B Z ZO ZV SEEK(06) */
+{0x0B, D|W|R|O, "SEEK(06)"},
+/* 0B O SLEW AND PRINT */
+{0x0B, L, "SLEW AND PRINT"},
+
+/* 0C VVVVVV V */
+/* 0D VVVVVV V */
+/* 0E VVVVVV V */
+/* 0F VOVVVV V READ REVERSE */
+{0x0F, T, "READ REVERSE"},
+
+/* 10 VM VVV WRITE FILEMARKS */
+{0x10, T, "WRITE FILEMARKS"},
+/* 10 O O SYNCHRONIZE BUFFER */
+{0x10, L|W, "SYNCHRONIZE BUFFER"},
+
+/* 11 VMVVVV SPACE */
+{0x11, T, "SPACE"},
+
+/* 12 MMMMMMMMMMMM INQUIRY */
+{0x12, ALL, "INQUIRY"},
+
+/* 13 VOVVVV VERIFY(06) */
+{0x13, T, "VERIFY(06)"},
+
+/* 14 VOOVVV RECOVER BUFFERED DATA */
+{0x14, T|L, "RECOVER BUFFERED DATA"},
+
+/* 15 OMO OOOOOOOO MODE SELECT(06) */
+{0x15, ALL & ~(P), "MODE SELECT(06)"},
+
+/* 16 MMMOMMMM O RESERVE(06) */
+{0x16, D|T|L|P|W|R|S|O|E, "RESERVE(06)"},
+/* 16 M RESERVE ELEMENT(06) */
+{0x16, M, "RESERVE ELEMENT(06)"},
+
+/* 17 MMMOMMMM O RELEASE(06) */
+{0x17, ALL & ~(M|C|A), "RELEASE(06)"},
+/* 17 M RELEASE ELEMENT(06) */
+{0x17, M, "RELEASE ELEMENT(06)"},
+
+/* 18 OOOOOOOO COPY */
+{0x18, ALL & ~(M|C|A|E), "COPY"},
+
+/* 19 VMVVVV ERASE */
+{0x19, T, "ERASE"},
+
+/* 1A OMO OOOOOOOO MODE SENSE(06) */
+{0x1A, ALL & ~(P), "MODE SENSE(06)"},
+
+/* 1B O OM O STOP START UNIT */
+{0x1B, D|W|R|O, "STOP START UNIT"},
+/* 1B O LOAD UNLOAD */
+{0x1B, T, "LOAD UNLOAD"},
+/* 1B O SCAN */
+{0x1B, S, "SCAN"},
+/* 1B O STOP PRINT */
+{0x1B, L, "STOP PRINT"},
+
+/* 1C OOOOOOOOOO M RECEIVE DIAGNOSTIC RESULTS */
+{0x1C, ALL & ~(A), "RECEIVE DIAGNOSTIC RESULTS"},
+
+/* 1D MMMMMMMMMMMM SEND DIAGNOSTIC */
+{0x1D, ALL, "SEND DIAGNOSTIC"},
+
+/* 1E OO OM OO PREVENT ALLOW MEDIUM REMOVAL */
+{0x1E, D|T|W|R|O|M, "PREVENT ALLOW MEDIUM REMOVAL"},
+
+/* 1F */
+/* 20 V VV V */
+/* 21 V VV V */
+/* 22 V VV V */
+/* 23 V VV V */
+
+/* 24 V VVM SET WINDOW */
+{0x24, S, "SET WINDOW"},
+
+/* 25 M M M READ CAPACITY */
+{0x25, D|W|O, "READ CAPACITY"},
+/* 25 M READ CD RECORDED CAPACITY */
+{0x25, R, "READ CD RECORDED CAPACITY"},
+/* 25 O GET WINDOW */
+{0x25, S, "GET WINDOW"},
+
+/* 26 V VV */
+/* 27 V VV */
+
+/* 28 M MMMM READ(10) */
+{0x28, D|W|R|S|O, "READ(10)"},
+/* 28 O GET MESSAGE(10) */
+{0x28, C, "GET MESSAGE(10)"},
+
+/* 29 V VV O READ GENERATION */
+{0x29, O, "READ GENERATION"},
+
+/* 2A M MM M WRITE(10) */
+{0x2A, D|W|R|O, "WRITE(10)"},
+/* 2A O SEND(10) */
+{0x2A, S, "SEND(10)"},
+/* 2A O SEND MESSAGE(10) */
+{0x2A, C, "SEND MESSAGE(10)"},
+
+/* 2B O OM O SEEK(10) */
+{0x2B, D|W|R|O, "SEEK(10)"},
+/* 2B O LOCATE */
+{0x2B, T, "LOCATE"},
+/* 2B O POSITION TO ELEMENT */
+{0x2B, M, "POSITION TO ELEMENT"},
+
+/* 2C V O ERASE(10) */
+{0x2C, O, "ERASE(10)"},
+
+/* 2D V O O READ UPDATED BLOCK */
+{0x2D, W|O, "READ UPDATED BLOCK"},
+
+/* 2E O O O WRITE AND VERIFY(10) */
+{0x2E, D|W|O, "WRITE AND VERIFY(10)"},
+
+/* 2F O OO O VERIFY(10) */
+{0x2F, D|W|R|O, "VERIFY(10)"},
+
+/* 30 Z ZO Z SEARCH DATA HIGH(10) */
+{0x30, D|W|R|O, "SEARCH DATA HIGH(10)"},
+
+/* 31 Z ZO Z SEARCH DATA EQUAL(10) */
+{0x31, D|W|R|O, "SEARCH DATA EQUAL(10)"},
+/* 31 O OBJECT POSITION */
+{0x31, S, "OBJECT POSITION"},
+
+/* 32 Z ZO Z SEARCH DATA LOW(10) */
+{0x32, D|W|R|O, "SEARCH DATA LOW(10"},
+
+/* 33 O OO O SET LIMITS(10) */
+{0x33, D|W|R|O, "SET LIMITS(10)"},
+
+/* 34 O OO O PRE-FETCH */
+{0x34, D|W|R|O, "PRE-FETCH"},
+/* 34 O READ POSITION */
+{0x34, T, "READ POSITION"},
+/* 34 O GET DATA BUFFER STATUS */
+{0x34, S, "GET DATA BUFFER STATUS"},
+
+/* 35 O OM O SYNCHRONIZE CACHE */
+{0x35, D|W|R|O, "SYNCHRONIZE CACHE"},
+
+/* 36 O OO O LOCK UNLOCK CACHE */
+{0x36, D|W|R|O, "LOCK UNLOCK CACHE"},
+
+/* 37 O O READ DEFECT DATA(10) */
+{0x37, D|O, "READ DEFECT DATA(10)"},
+
+/* 38 O O MEDIUM SCAN */
+{0x38, W|O, "MEDIUM SCAN"},
+
+/* 39 OOOOOOOO COMPARE */
+{0x39, ALL & ~(M|C|A|E), "COMPARE"},
+
+/* 3A OOOOOOOO COPY AND VERIFY */
+{0x3A, ALL & ~(M|C|A|E), "COPY AND VERIFY"},
+
+/* 3B OOOOOOOOOO O WRITE BUFFER */
+{0x3B, ALL & ~(A), "WRITE BUFFER"},
+
+/* 3C OOOOOOOOOO READ BUFFER */
+{0x3C, ALL & ~(A|E),"READ BUFFER"},
+
+/* 3D O O UPDATE BLOCK */
+{0x3D, W|O, "UPDATE BLOCK"},
+
+/* 3E O OO O READ LONG */
+{0x3E, D|W|R|O, "READ LONG"},
+
+/* 3F O O O WRITE LONG */
+{0x3F, D|W|O, "WRITE LONG"},
+
+/* 40 OOOOOOOOOO CHANGE DEFINITION */
+{0x40, ALL & ~(A|E),"CHANGE DEFINITION"},
+
+/* 41 O WRITE SAME */
+{0x41, D, "WRITE SAME"},
+
+/* 42 M READ SUB-CHANNEL */
+{0x42, R, "READ SUB-CHANNEL"},
+
+/* 43 M READ TOC/PMA/ATIP {MMC Proposed} */
+{0x43, R, "READ TOC/PMA/ATIP {MMC Proposed}"},
+
+/* 44 M REPORT DENSITY SUPPORT */
+{0x44, T, "REPORT DENSITY SUPPORT"},
+/* 44 M READ HEADER */
+{0x44, R, "READ HEADER"},
+
+/* 45 O PLAY AUDIO(10) */
+{0x45, R, "PLAY AUDIO(10)"},
+
+/* 46 */
+
+/* 47 O PLAY AUDIO MSF */
+{0x47, R, "PLAY AUDIO MSF"},
+
+/* 48 O PLAY AUDIO TRACK INDEX */
+{0x48, R, "PLAY AUDIO TRACK INDEX"},
+
+/* 49 O PLAY TRACK RELATIVE(10) */
+{0x49, R, "PLAY TRACK RELATIVE(10)"},
+
+/* 4A */
+
+/* 4B O PAUSE/RESUME */
+{0x4B, R, "PAUSE/RESUME"},
+
+/* 4C OOOOOOOOOOO LOG SELECT */
+{0x4C, ALL & ~(E), "LOG SELECT"},
+
+/* 4D OOOOOOOOOOO LOG SENSE */
+{0x4D, ALL & ~(E), "LOG SENSE"},
+
+/* 4E O STOP PLAY/SCAN {MMC Proposed} */
+{0x4E, R, "STOP PLAY/SCAN {MMC Proposed}"},
+
+/* 4F */
+
+/* 50 O XDWRITE(10) */
+{0x50, D, "XDWRITE(10)"},
+
+/* 51 O XPWRITE(10) */
+{0x51, D, "XPWRITE(10)"},
+/* 51 M READ DISC INFORMATION {MMC Proposed} */
+{0x51, R, "READ DISC INFORMATION {MMC Proposed}"},
+
+/* 52 O XDREAD(10) */
+{0x52, D, "XDREAD(10)"},
+/* 52 M READ TRACK INFORMATION {MMC Proposed} */
+{0x52, R, "READ TRACK INFORMATION {MMC Proposed}"},
+
+/* 53 M RESERVE TRACK {MMC Proposed} */
+{0x53, R, "RESERVE TRACK {MMC Proposed}"},
+
+/* 54 O SEND OPC INFORMATION {MMC Proposed} */
+{0x54, R, "SEND OPC INFORMATION {MMC Proposed}"},
+
+/* 55 OOO OOOOOOOO MODE SELECT(10) */
+{0x55, ALL & ~(P), "MODE SELECT(10)"},
+
+/* 56 MMMOMMMM O RESERVE(10) */
+{0x56, ALL & ~(M|C|A), "RESERVE(10)"},
+/* 56 M RESERVE ELEMENT(10) */
+{0x56, M, "RESERVE ELEMENT(10)"},
+
+/* 57 MMMOMMMM O RELEASE(10) */
+{0x57, ALL & ~(M|C|A), "RELEASE(10"},
+/* 57 M RELEASE ELEMENT(10) */
+{0x57, M, "RELEASE ELEMENT(10)"},
+
+/* 58 O REPAIR TRACK {MMC Proposed} */
+{0x58, R, "REPAIR TRACK {MMC Proposed}"},
+
+/* 59 O READ MASTER CUE {MMC Proposed} */
+{0x59, R, "READ MASTER CUE {MMC Proposed}"},
+
+/* 5A OOO OOOOOOOO MODE SENSE(10) */
+{0x5A, ALL & ~(P), "MODE SENSE(10)"},
+
+/* 5B M CLOSE TRACK/SESSION {MMC Proposed} */
+{0x5B, R, "CLOSE TRACK/SESSION {MMC Proposed}"},
+
+/* 5C O READ BUFFER CAPACITY {MMC Proposed} */
+{0x5C, R, "READ BUFFER CAPACITY {MMC Proposed}"},
+
+/* 5D O SEND CUE SHEET {MMC Proposed} */
+{0x5D, R, "SEND CUE SHEET {MMC Proposed}"},
+
+/* 5E OOOOOOOOO O PERSISTENT RESERVE IN */
+{0x5E, ALL & ~(C|A),"PERSISTENT RESERVE IN"},
+
+/* 5F OOOOOOOOO O PERSISTENT RESERVE OUT */
+{0x5F, ALL & ~(C|A),"PERSISTENT RESERVE OUT"},
+
+/* 80 O XDWRITE EXTENDED(16) */
+{0x80, D, "XDWRITE EXTENDED(16)"},
+
+/* 81 O REBUILD(16) */
+{0x81, D, "REBUILD(16)"},
+
+/* 82 O REGENERATE(16) */
+{0x82, D, "REGENERATE(16)"},
+
+/* 83 */
+/* 84 */
+/* 85 */
+/* 86 */
+/* 87 */
+/* 88 */
+/* 89 */
+/* 8A */
+/* 8B */
+/* 8C */
+/* 8D */
+/* 8E */
+/* 8F */
+/* 90 */
+/* 91 */
+/* 92 */
+/* 93 */
+/* 94 */
+/* 95 */
+/* 96 */
+/* 97 */
+/* 98 */
+/* 99 */
+/* 9A */
+/* 9B */
+/* 9C */
+/* 9D */
+/* 9E */
+/* 9F */
+
+/* A0 OOOOOOOOOOO REPORT LUNS */
+{0xA0, ALL & ~(E), "REPORT LUNS"},
+
+/* A1 O BLANK {MMC Proposed} */
+{0xA1, R, "BLANK {MMC Proposed}"},
+
+/* A2 O WRITE CD MSF {MMC Proposed} */
+{0xA2, R, "WRITE CD MSF {MMC Proposed}"},
+
+/* A3 M MAINTENANCE (IN) */
+{0xA3, A, "MAINTENANCE (IN)"},
+
+/* A4 O MAINTENANCE (OUT) */
+{0xA4, A, "MAINTENANCE (OUT)"},
+
+/* A5 O M MOVE MEDIUM */
+{0xA5, T|M, "MOVE MEDIUM"},
+/* A5 O PLAY AUDIO(12) */
+{0xA5, R, "PLAY AUDIO(12)"},
+
+/* A6 O EXCHANGE MEDIUM */
+{0xA6, M, "EXCHANGE MEDIUM"},
+/* A6 O LOAD/UNLOAD CD {MMC Proposed} */
+{0xA6, R, "LOAD/UNLOAD CD {MMC Proposed}"},
+
+/* A7 OO OO OO MOVE MEDIUM ATTACHED */
+{0xA7, D|T|W|R|O|M, "MOVE MEDIUM ATTACHED"},
+
+/* A8 OM O READ(12) */
+{0xA8, W|R|O, "READ(12)"},
+/* A8 O GET MESSAGE(12) */
+{0xA8, C, "GET MESSAGE(12)"},
+
+/* A9 O PLAY TRACK RELATIVE(12) */
+{0xA9, R, "PLAY TRACK RELATIVE(12)"},
+
+/* AA O O WRITE(12) */
+{0xAA, W|O, "WRITE(12)"},
+/* AA O WRITE CD(12) {MMC Proposed} */
+{0xAA, R, "WRITE CD(12) {MMC Proposed}"},
+/* AA O SEND MESSAGE(12) */
+{0xAA, C, "SEND MESSAGE(12)"},
+
+/* AB */
+
+/* AC O ERASE(12) */
+{0xAC, O, "ERASE(12)"},
+
+/* AD */
+
+/* AE O O WRITE AND VERIFY(12) */
+{0xAE, W|O, "WRITE AND VERIFY(12)"},
+
+/* AF OO O VERIFY(12) */
+{0xAF, W|R|O, "VERIFY(12)"},
+
+/* B0 ZO Z SEARCH DATA HIGH(12) */
+{0xB0, W|R|O, "SEARCH DATA HIGH(12)"},
+
+/* B1 ZO Z SEARCH DATA EQUAL(12) */
+{0xB1, W|R|O, "SEARCH DATA EQUAL(12)"},
+
+/* B2 ZO Z SEARCH DATA LOW(12) */
+{0xB2, W|R|O, "SEARCH DATA LOW(12)"},
+
+/* B3 OO O SET LIMITS(12) */
+{0xB3, W|R|O, "SET LIMITS(12)"},
+
+/* B4 OO OO OO READ ELEMENT STATUS ATTACHED */
+{0xB4, D|T|W|R|O|M, "READ ELEMENT STATUS ATTACHED"},
+
+/* B5 O REQUEST VOLUME ELEMENT ADDRESS */
+{0xB5, M, "REQUEST VOLUME ELEMENT ADDRESS"},
+
+/* B6 O SEND VOLUME TAG */
+{0xB6, M, "SEND VOLUME TAG"},
+
+/* B7 O READ DEFECT DATA(12) */
+{0xB7, O, "READ DEFECT DATA(12)"},
+
+/* B8 O M READ ELEMENT STATUS */
+{0xB8, T|M, "READ ELEMENT STATUS"},
+/* B8 O SET CD SPEED {MMC Proposed} */
+{0xB8, R, "SET CD SPEED {MMC Proposed}"},
+
+/* B9 M READ CD MSF {MMC Proposed} */
+{0xB9, R, "READ CD MSF {MMC Proposed}"},
+
+/* BA O SCAN {MMC Proposed} */
+{0xBA, R, "SCAN {MMC Proposed}"},
+/* BA M REDUNDANCY GROUP (IN) */
+{0xBA, A, "REDUNDANCY GROUP (IN)"},
+
+/* BB O SET CD-ROM SPEED {proposed} */
+{0xBB, R, "SET CD-ROM SPEED {proposed}"},
+/* BB O REDUNDANCY GROUP (OUT) */
+{0xBB, A, "REDUNDANCY GROUP (OUT)"},
+
+/* BC O PLAY CD {MMC Proposed} */
+{0xBC, R, "PLAY CD {MMC Proposed}"},
+/* BC M SPARE (IN) */
+{0xBC, A, "SPARE (IN)"},
+
+/* BD M MECHANISM STATUS {MMC Proposed} */
+{0xBD, R, "MECHANISM STATUS {MMC Proposed}"},
+/* BD O SPARE (OUT) */
+{0xBD, A, "SPARE (OUT)"},
+
+/* BE O READ CD {MMC Proposed} */
+{0xBE, R, "READ CD {MMC Proposed}"},
+/* BE M VOLUME SET (IN) */
+{0xBE, A, "VOLUME SET (IN)"},
+
+/* BF O VOLUME SET (OUT) */
+{0xBF, A, "VOLUME SET (OUT)"}
+};
+
+const char *
+scsi_op_desc(u_int16_t opcode, struct scsi_inquiry_data *inq_data)
+{
+ caddr_t match;
+ int i, j;
+ u_int16_t opmask;
+ u_int16_t pd_type;
+ int num_ops[2];
+ struct op_table_entry *table[2];
+ int num_tables;
+
+ pd_type = SID_TYPE(inq_data);
+
+ match = cam_quirkmatch((caddr_t)inq_data,
+ (caddr_t)scsi_op_quirk_table,
+ sizeof(scsi_op_quirk_table)/
+ sizeof(*scsi_op_quirk_table),
+ sizeof(*scsi_op_quirk_table),
+ scsi_inquiry_match);
+
+ if (match != NULL) {
+ table[0] = ((struct scsi_op_quirk_entry *)match)->op_table;
+ num_ops[0] = ((struct scsi_op_quirk_entry *)match)->num_ops;
+ table[1] = scsi_op_codes;
+ num_ops[1] = sizeof(scsi_op_codes)/sizeof(scsi_op_codes[0]);
+ num_tables = 2;
+ } else {
+ /*
+ * If this is true, we have a vendor specific opcode that
+ * wasn't covered in the quirk table.
+ */
+ if ((opcode > 0xBF) || ((opcode > 0x5F) && (opcode < 0x80)))
+ return("Vendor Specific Command");
+
+ table[0] = scsi_op_codes;
+ num_ops[0] = sizeof(scsi_op_codes)/sizeof(scsi_op_codes[0]);
+ num_tables = 1;
+ }
+
+ opmask = 1 << pd_type;
+
+ for (j = 0; j < num_tables; j++) {
+ for (i = 0;i < num_ops[j] && table[j][i].opcode <= opcode; i++){
+ if ((table[j][i].opcode == opcode)
+ && ((table[j][i].opmask & opmask) != 0))
+ return(table[j][i].desc);
+ }
+ }
+
+ /*
+ * If we can't find a match for the command in the table, we just
+ * assume it's a vendor specifc command.
+ */
+ return("Vendor Specific Command");
+
+}
+
+#else /* SCSI_NO_OP_STRINGS */
+
+const char *
+scsi_op_desc(u_int16_t opcode, struct scsi_inquiry_data *inq_data)
+{
+ return("");
+}
+
+#endif
+
+
+#include <sys/param.h>
+
+
+#if !defined(SCSI_NO_SENSE_STRINGS)
+#define SST(asc, ascq, action, desc) \
+ asc, ascq, action, desc
+#else
+#define SST(asc, ascq, action, desc) \
+ asc, asc, action
+#endif
+
+/*
+ * If we're in the kernel, 'quantum' is already defined in cam_xpt.c.
+ * Otherwise, we need to define it.
+ */
+#ifdef KERNEL
+extern const char quantum[];
+#else
+static const char quantum[] = "QUANTUM";
+#endif
+
+/*
+ * WARNING: You must update the num_ascs field below for this quirk table
+ * entry if you add more entries.
+ */
+static struct asc_table_entry quantum_fireball_entries[] = {
+ {SST(0x04, 0x0b, SS_START|SSQ_DECREMENT_COUNT|ENXIO,
+ "Logical unit not ready, initializing cmd. required")}
+};
+
+static struct scsi_sense_quirk_entry asc_quirk_table[] = {
+ {
+ /*
+ * The Quantum Fireball ST and SE like to return 0x04 0x0b when
+ * they really should return 0x04 0x02. 0x04,0x0b isn't
+ * defined in any SCSI spec, and it isn't mentioned in the
+ * hardware manual for these drives.
+ */
+ {T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "FIREBALL S*", "*"},
+ 1, /* number of vendor-specific sense codes for this entry */
+ quantum_fireball_entries
+ }
+};
+
+static struct asc_table_entry asc_text[] = {
+/*
+ * From File: ASC-NUM.TXT
+ * SCSI ASC/ASCQ Assignments
+ * Numeric Sorted Listing
+ * as of 5/12/97
+ *
+ * D - DIRECT ACCESS DEVICE (SBC) device column key
+ * .T - SEQUENTIAL ACCESS DEVICE (SSC) -------------------
+ * . L - PRINTER DEVICE (SSC) blank = reserved
+ * . P - PROCESSOR DEVICE (SPC) not blank = allowed
+ * . .W - WRITE ONCE READ MULTIPLE DEVICE (SBC)
+ * . . R - CD DEVICE (MMC)
+ * . . S - SCANNER DEVICE (SGC)
+ * . . .O - OPTICAL MEMORY DEVICE (SBC)
+ * . . . M - MEDIA CHANGER DEVICE (SMC)
+ * . . . C - COMMUNICATION DEVICE (SSC)
+ * . . . .A - STORAGE ARRAY DEVICE (SCC)
+ * . . . . E - ENCLOSURE SERVICES DEVICE (SES)
+ * DTLPWRSOMCAE ASC ASCQ Action Description
+ * ------------ ---- ---- ------ -----------------------------------*/
+/* DTLPWRSOMCAE */{SST(0x00, 0x00, SS_NEPDEF,
+ "No additional sense information") },
+/* T S */{SST(0x00, 0x01, SS_DEF,
+ "Filemark detected") },
+/* T S */{SST(0x00, 0x02, SS_DEF,
+ "End-of-partition/medium detected") },
+/* T */{SST(0x00, 0x03, SS_DEF,
+ "Setmark detected") },
+/* T S */{SST(0x00, 0x04, SS_DEF,
+ "Beginning-of-partition/medium detected") },
+/* T S */{SST(0x00, 0x05, SS_DEF,
+ "End-of-data detected") },
+/* DTLPWRSOMCAE */{SST(0x00, 0x06, SS_DEF,
+ "I/O process terminated") },
+/* R */{SST(0x00, 0x11, SS_NEDEF|EBUSY,
+ "Audio play operation in progress") },
+/* R */{SST(0x00, 0x12, SS_NEDEF,
+ "Audio play operation paused") },
+/* R */{SST(0x00, 0x13, SS_NEDEF,
+ "Audio play operation successfully completed") },
+/* R */{SST(0x00, 0x14, SS_DEF,
+ "Audio play operation stopped due to error") },
+/* R */{SST(0x00, 0x15, SS_DEF,
+ "No current audio status to return") },
+/* DTLPWRSOMCAE */{SST(0x00, 0x16, SS_NEDEF|EBUSY,
+ "Operation in progress") },
+/* DTL WRSOM AE */{SST(0x00, 0x17, SS_DEF,
+ "Cleaning requested") },
+/* D W O */{SST(0x01, 0x00, SS_DEF,
+ "No index/sector signal") },
+/* D WR OM */{SST(0x02, 0x00, SS_DEF,
+ "No seek complete") },
+/* DTL W SO */{SST(0x03, 0x00, SS_DEF,
+ "Peripheral device write fault") },
+/* T */{SST(0x03, 0x01, SS_DEF,
+ "No write current") },
+/* T */{SST(0x03, 0x02, SS_DEF,
+ "Excessive write errors") },
+/* DTLPWRSOMCAE */{SST(0x04, 0x00, SS_TUR|SSQ_MANY|SSQ_DECREMENT_COUNT|EIO,
+ "Logical unit not ready, cause not reportable") },
+/* DTLPWRSOMCAE */{SST(0x04, 0x01, SS_TUR|SSQ_MANY|SSQ_DECREMENT_COUNT|EBUSY,
+ "Logical unit is in process of becoming ready") },
+/* DTLPWRSOMCAE */{SST(0x04, 0x02, SS_START|SSQ_DECREMENT_COUNT|ENXIO,
+ "Logical unit not ready, initializing cmd. required") },
+/* DTLPWRSOMCAE */{SST(0x04, 0x03, SS_NEDEF|ENXIO,
+ "Logical unit not ready, manual intervention required")},
+/* DTL O */{SST(0x04, 0x04, SS_NEDEF|EBUSY,
+ "Logical unit not ready, format in progress") },
+/* DT W OMCA */{SST(0x04, 0x05, SS_NEDEF|EBUSY,
+ "Logical unit not ready, rebuild in progress") },
+/* DT W OMCA */{SST(0x04, 0x06, SS_NEDEF|EBUSY,
+ "Logical unit not ready, recalculation in progress") },
+/* DTLPWRSOMCAE */{SST(0x04, 0x07, SS_NEDEF|EBUSY,
+ "Logical unit not ready, operation in progress") },
+/* R */{SST(0x04, 0x08, SS_NEDEF|EBUSY,
+ "Logical unit not ready, long write in progress") },
+/* DTL WRSOMCAE */{SST(0x05, 0x00, SS_DEF,
+ "Logical unit does not respond to selection") },
+/* D WR OM */{SST(0x06, 0x00, SS_DEF,
+ "No reference position found") },
+/* DTL WRSOM */{SST(0x07, 0x00, SS_DEF,
+ "Multiple peripheral devices selected") },
+/* DTL WRSOMCAE */{SST(0x08, 0x00, SS_DEF,
+ "Logical unit communication failure") },
+/* DTL WRSOMCAE */{SST(0x08, 0x01, SS_DEF,
+ "Logical unit communication time-out") },
+/* DTL WRSOMCAE */{SST(0x08, 0x02, SS_DEF,
+ "Logical unit communication parity error") },
+/* DT R OM */{SST(0x08, 0x03, SS_DEF,
+ "Logical unit communication crc error (ultra-dma/32)")},
+/* DT WR O */{SST(0x09, 0x00, SS_DEF,
+ "Track following error") },
+/* WR O */{SST(0x09, 0x01, SS_DEF,
+ "Tracking servo failure") },
+/* WR O */{SST(0x09, 0x02, SS_DEF,
+ "Focus servo failure") },
+/* WR O */{SST(0x09, 0x03, SS_DEF,
+ "Spindle servo failure") },
+/* DT WR O */{SST(0x09, 0x04, SS_DEF,
+ "Head select fault") },
+/* DTLPWRSOMCAE */{SST(0x0A, 0x00, SS_NEDEF|ENOSPC,
+ "Error log overflow") },
+/* DTLPWRSOMCAE */{SST(0x0B, 0x00, SS_DEF,
+ "Warning") },
+/* DTLPWRSOMCAE */{SST(0x0B, 0x01, SS_DEF,
+ "Specified temperature exceeded") },
+/* DTLPWRSOMCAE */{SST(0x0B, 0x02, SS_DEF,
+ "Enclosure degraded") },
+/* T RS */{SST(0x0C, 0x00, SS_DEF,
+ "Write error") },
+/* D W O */{SST(0x0C, 0x01, SS_NEDEF,
+ "Write error - recovered with auto reallocation") },
+/* D W O */{SST(0x0C, 0x02, SS_DEF,
+ "Write error - auto reallocation failed") },
+/* D W O */{SST(0x0C, 0x03, SS_DEF,
+ "Write error - recommend reassignment") },
+/* DT W O */{SST(0x0C, 0x04, SS_NEPDEF,
+ "Compression check miscompare error") },
+/* DT W O */{SST(0x0C, 0x05, SS_DEF,
+ "Data expansion occurred during compression") },
+/* DT W O */{SST(0x0C, 0x06, SS_DEF,
+ "Block not compressible") },
+/* R */{SST(0x0C, 0x07, SS_DEF,
+ "Write error - recovery needed") },
+/* R */{SST(0x0C, 0x08, SS_DEF,
+ "Write error - recovery failed") },
+/* R */{SST(0x0C, 0x09, SS_DEF,
+ "Write error - loss of streaming") },
+/* R */{SST(0x0C, 0x0A, SS_DEF,
+ "Write error - padding blocks added") },
+/* D W O */{SST(0x10, 0x00, SS_DEF,
+ "ID CRC or ECC error") },
+/* DT WRSO */{SST(0x11, 0x00, SS_DEF,
+ "Unrecovered read error") },
+/* DT W SO */{SST(0x11, 0x01, SS_DEF,
+ "Read retries exhausted") },
+/* DT W SO */{SST(0x11, 0x02, SS_DEF,
+ "Error too long to correct") },
+/* DT W SO */{SST(0x11, 0x03, SS_DEF,
+ "Multiple read errors") },
+/* D W O */{SST(0x11, 0x04, SS_DEF,
+ "Unrecovered read error - auto reallocate failed") },
+/* WR O */{SST(0x11, 0x05, SS_DEF,
+ "L-EC uncorrectable error") },
+/* WR O */{SST(0x11, 0x06, SS_DEF,
+ "CIRC unrecovered error") },
+/* W O */{SST(0x11, 0x07, SS_DEF,
+ "Data re-synchronization error") },
+/* T */{SST(0x11, 0x08, SS_DEF,
+ "Incomplete block read") },
+/* T */{SST(0x11, 0x09, SS_DEF,
+ "No gap found") },
+/* DT O */{SST(0x11, 0x0A, SS_DEF,
+ "Miscorrected error") },
+/* D W O */{SST(0x11, 0x0B, SS_DEF,
+ "Unrecovered read error - recommend reassignment") },
+/* D W O */{SST(0x11, 0x0C, SS_DEF,
+ "Unrecovered read error - recommend rewrite the data")},
+/* DT WR O */{SST(0x11, 0x0D, SS_DEF,
+ "De-compression CRC error") },
+/* DT WR O */{SST(0x11, 0x0E, SS_DEF,
+ "Cannot decompress using declared algorithm") },
+/* R */{SST(0x11, 0x0F, SS_DEF,
+ "Error reading UPC/EAN number") },
+/* R */{SST(0x11, 0x10, SS_DEF,
+ "Error reading ISRC number") },
+/* R */{SST(0x11, 0x11, SS_DEF,
+ "Read error - loss of streaming") },
+/* D W O */{SST(0x12, 0x00, SS_DEF,
+ "Address mark not found for id field") },
+/* D W O */{SST(0x13, 0x00, SS_DEF,
+ "Address mark not found for data field") },
+/* DTL WRSO */{SST(0x14, 0x00, SS_DEF,
+ "Recorded entity not found") },
+/* DT WR O */{SST(0x14, 0x01, SS_DEF,
+ "Record not found") },
+/* T */{SST(0x14, 0x02, SS_DEF,
+ "Filemark or setmark not found") },
+/* T */{SST(0x14, 0x03, SS_DEF,
+ "End-of-data not found") },
+/* T */{SST(0x14, 0x04, SS_DEF,
+ "Block sequence error") },
+/* DT W O */{SST(0x14, 0x05, SS_DEF,
+ "Record not found - recommend reassignment") },
+/* DT W O */{SST(0x14, 0x06, SS_DEF,
+ "Record not found - data auto-reallocated") },
+/* DTL WRSOM */{SST(0x15, 0x00, SS_DEF,
+ "Random positioning error") },
+/* DTL WRSOM */{SST(0x15, 0x01, SS_DEF,
+ "Mechanical positioning error") },
+/* DT WR O */{SST(0x15, 0x02, SS_DEF,
+ "Positioning error detected by read of medium") },
+/* D W O */{SST(0x16, 0x00, SS_DEF,
+ "Data synchronization mark error") },
+/* D W O */{SST(0x16, 0x01, SS_DEF,
+ "Data sync error - data rewritten") },
+/* D W O */{SST(0x16, 0x02, SS_DEF,
+ "Data sync error - recommend rewrite") },
+/* D W O */{SST(0x16, 0x03, SS_NEDEF,
+ "Data sync error - data auto-reallocated") },
+/* D W O */{SST(0x16, 0x04, SS_DEF,
+ "Data sync error - recommend reassignment") },
+/* DT WRSO */{SST(0x17, 0x00, SS_NEDEF,
+ "Recovered data with no error correction applied") },
+/* DT WRSO */{SST(0x17, 0x01, SS_NEDEF,
+ "Recovered data with retries") },
+/* DT WR O */{SST(0x17, 0x02, SS_NEDEF,
+ "Recovered data with positive head offset") },
+/* DT WR O */{SST(0x17, 0x03, SS_NEDEF,
+ "Recovered data with negative head offset") },
+/* WR O */{SST(0x17, 0x04, SS_NEDEF,
+ "Recovered data with retries and/or CIRC applied") },
+/* D WR O */{SST(0x17, 0x05, SS_NEDEF,
+ "Recovered data using previous sector id") },
+/* D W O */{SST(0x17, 0x06, SS_NEDEF,
+ "Recovered data without ECC - data auto-reallocated") },
+/* D W O */{SST(0x17, 0x07, SS_NEDEF,
+ "Recovered data without ECC - recommend reassignment")},
+/* D W O */{SST(0x17, 0x08, SS_NEDEF,
+ "Recovered data without ECC - recommend rewrite") },
+/* D W O */{SST(0x17, 0x09, SS_NEDEF,
+ "Recovered data without ECC - data rewritten") },
+/* D W O */{SST(0x18, 0x00, SS_NEDEF,
+ "Recovered data with error correction applied") },
+/* D WR O */{SST(0x18, 0x01, SS_NEDEF,
+ "Recovered data with error corr. & retries applied") },
+/* D WR O */{SST(0x18, 0x02, SS_NEDEF,
+ "Recovered data - data auto-reallocated") },
+/* R */{SST(0x18, 0x03, SS_NEDEF,
+ "Recovered data with CIRC") },
+/* R */{SST(0x18, 0x04, SS_NEDEF,
+ "Recovered data with L-EC") },
+/* D WR O */{SST(0x18, 0x05, SS_NEDEF,
+ "Recovered data - recommend reassignment") },
+/* D WR O */{SST(0x18, 0x06, SS_NEDEF,
+ "Recovered data - recommend rewrite") },
+/* D W O */{SST(0x18, 0x07, SS_NEDEF,
+ "Recovered data with ECC - data rewritten") },
+/* D O */{SST(0x19, 0x00, SS_DEF,
+ "Defect list error") },
+/* D O */{SST(0x19, 0x01, SS_DEF,
+ "Defect list not available") },
+/* D O */{SST(0x19, 0x02, SS_DEF,
+ "Defect list error in primary list") },
+/* D O */{SST(0x19, 0x03, SS_DEF,
+ "Defect list error in grown list") },
+/* DTLPWRSOMCAE */{SST(0x1A, 0x00, SS_DEF,
+ "Parameter list length error") },
+/* DTLPWRSOMCAE */{SST(0x1B, 0x00, SS_DEF,
+ "Synchronous data transfer error") },
+/* D O */{SST(0x1C, 0x00, SS_DEF,
+ "Defect list not found") },
+/* D O */{SST(0x1C, 0x01, SS_DEF,
+ "Primary defect list not found") },
+/* D O */{SST(0x1C, 0x02, SS_DEF,
+ "Grown defect list not found") },
+/* D W O */{SST(0x1D, 0x00, SS_NEPDEF,
+ "Miscompare during verify operation" )},
+/* D W O */{SST(0x1E, 0x00, SS_NEDEF,
+ "Recovered id with ecc correction") },
+/* D O */{SST(0x1F, 0x00, SS_DEF,
+ "Partial defect list transfer") },
+/* DTLPWRSOMCAE */{SST(0x20, 0x00, SS_DEF,
+ "Invalid command operation code") },
+/* DT WR OM */{SST(0x21, 0x00, SS_DEF,
+ "Logical block address out of range" )},
+/* DT WR OM */{SST(0x21, 0x01, SS_DEF,
+ "Invalid element address") },
+/* D */{SST(0x22, 0x00, SS_DEF,
+ "Illegal function") }, /* Deprecated. Use 20 00, 24 00, or 26 00 instead */
+/* DTLPWRSOMCAE */{SST(0x24, 0x00, SS_NEDEF|EINVAL,
+ "Invalid field in CDB") },
+/* DTLPWRSOMCAE */{SST(0x25, 0x00, SS_NEDEF|ENXIO,
+ "Logical unit not supported") },
+/* DTLPWRSOMCAE */{SST(0x26, 0x00, SS_NEDEF|EINVAL,
+ "Invalid field in parameter list") },
+/* DTLPWRSOMCAE */{SST(0x26, 0x01, SS_NEDEF|EINVAL,
+ "Parameter not supported") },
+/* DTLPWRSOMCAE */{SST(0x26, 0x02, SS_NEDEF|EINVAL,
+ "Parameter value invalid") },
+/* DTLPWRSOMCAE */{SST(0x26, 0x03, SS_DEF,
+ "Threshold parameters not supported") },
+/* DTLPWRSOMCAE */{SST(0x26, 0x04, SS_DEF,
+ "Invalid release of active persistent reservation") },
+/* DT W O */{SST(0x27, 0x00, SS_NEDEF|EACCES,
+ "Write protected") },
+/* DT W O */{SST(0x27, 0x01, SS_NEDEF|EACCES,
+ "Hardware write protected") },
+/* DT W O */{SST(0x27, 0x02, SS_NEDEF|EACCES,
+ "Logical unit software write protected") },
+/* T */{SST(0x27, 0x03, SS_NEDEF|EACCES,
+ "Associated write protect") },
+/* T */{SST(0x27, 0x04, SS_NEDEF|EACCES,
+ "Persistent write protect") },
+/* T */{SST(0x27, 0x05, SS_NEDEF|EACCES,
+ "Permanent write protect") },
+/* DTLPWRSOMCAE */{SST(0x28, 0x00, SS_NEDEF|ENXIO,
+ "Not ready to ready change, medium may have changed") },
+/* DT WR OM */{SST(0x28, 0x01, SS_DEF,
+ "Import or export element accessed") },
+/* DTLPWRSOMCAE */{SST(0x29, 0x00, SS_NEDEF|ENXIO,
+ "Power on, reset, or bus device reset occurred") },
+/* DTLPWRSOMCAE */{SST(0x29, 0x01, SS_DEF,
+ "Power on occurred") },
+/* DTLPWRSOMCAE */{SST(0x29, 0x02, SS_DEF,
+ "Scsi bus reset occurred") },
+/* DTLPWRSOMCAE */{SST(0x29, 0x03, SS_DEF,
+ "Bus device reset function occurred") },
+/* DTLPWRSOMCAE */{SST(0x29, 0x04, SS_DEF,
+ "Device internal reset") },
+/* DTLPWRSOMCAE */{SST(0x29, 0x05, SS_DEF,
+ "Transceiver mode changed to single-ended") },
+/* DTLPWRSOMCAE */{SST(0x29, 0x06, SS_DEF,
+ "Transceiver mode changed to LVD") },
+/* DTL WRSOMCAE */{SST(0x2A, 0x00, SS_DEF,
+ "Parameters changed") },
+/* DTL WRSOMCAE */{SST(0x2A, 0x01, SS_DEF,
+ "Mode parameters changed") },
+/* DTL WRSOMCAE */{SST(0x2A, 0x02, SS_DEF,
+ "Log parameters changed") },
+/* DTLPWRSOMCAE */{SST(0x2A, 0x03, SS_DEF,
+ "Reservations preempted") },
+/* DTLPWRSO C */{SST(0x2B, 0x00, SS_DEF,
+ "Copy cannot execute since host cannot disconnect") },
+/* DTLPWRSOMCAE */{SST(0x2C, 0x00, SS_DEF,
+ "Command sequence error") },
+/* S */{SST(0x2C, 0x01, SS_DEF,
+ "Too many windows specified") },
+/* S */{SST(0x2C, 0x02, SS_DEF,
+ "Invalid combination of windows specified") },
+/* R */{SST(0x2C, 0x03, SS_DEF,
+ "Current program area is not empty") },
+/* R */{SST(0x2C, 0x04, SS_DEF,
+ "Current program area is empty") },
+/* T */{SST(0x2D, 0x00, SS_DEF,
+ "Overwrite error on update in place") },
+/* DTLPWRSOMCAE */{SST(0x2F, 0x00, SS_DEF,
+ "Commands cleared by another initiator") },
+/* DT WR OM */{SST(0x30, 0x00, SS_DEF,
+ "Incompatible medium installed") },
+/* DT WR O */{SST(0x30, 0x01, SS_DEF,
+ "Cannot read medium - unknown format") },
+/* DT WR O */{SST(0x30, 0x02, SS_DEF,
+ "Cannot read medium - incompatible format") },
+/* DT */{SST(0x30, 0x03, SS_DEF,
+ "Cleaning cartridge installed") },
+/* DT WR O */{SST(0x30, 0x04, SS_DEF,
+ "Cannot write medium - unknown format") },
+/* DT WR O */{SST(0x30, 0x05, SS_DEF,
+ "Cannot write medium - incompatible format") },
+/* DT W O */{SST(0x30, 0x06, SS_DEF,
+ "Cannot format medium - incompatible medium") },
+/* DTL WRSOM AE */{SST(0x30, 0x07, SS_DEF,
+ "Cleaning failure") },
+/* R */{SST(0x30, 0x08, SS_DEF,
+ "Cannot write - application code mismatch") },
+/* R */{SST(0x30, 0x09, SS_DEF,
+ "Current session not fixated for append") },
+/* DT WR O */{SST(0x31, 0x00, SS_DEF,
+ "Medium format corrupted") },
+/* D L R O */{SST(0x31, 0x01, SS_DEF,
+ "Format command failed") },
+/* D W O */{SST(0x32, 0x00, SS_DEF,
+ "No defect spare location available") },
+/* D W O */{SST(0x32, 0x01, SS_DEF,
+ "Defect list update failure") },
+/* T */{SST(0x33, 0x00, SS_DEF,
+ "Tape length error") },
+/* DTLPWRSOMCAE */{SST(0x34, 0x00, SS_DEF,
+ "Enclosure failure") },
+/* DTLPWRSOMCAE */{SST(0x35, 0x00, SS_DEF,
+ "Enclosure services failure") },
+/* DTLPWRSOMCAE */{SST(0x35, 0x01, SS_DEF,
+ "Unsupported enclosure function") },
+/* DTLPWRSOMCAE */{SST(0x35, 0x02, SS_DEF,
+ "Enclosure services unavailable") },
+/* DTLPWRSOMCAE */{SST(0x35, 0x03, SS_DEF,
+ "Enclosure services transfer failure") },
+/* DTLPWRSOMCAE */{SST(0x35, 0x04, SS_DEF,
+ "Enclosure services transfer refused") },
+/* L */{SST(0x36, 0x00, SS_DEF,
+ "Ribbon, ink, or toner failure") },
+/* DTL WRSOMCAE */{SST(0x37, 0x00, SS_DEF,
+ "Rounded parameter") },
+/* DTL WRSOMCAE */{SST(0x39, 0x00, SS_DEF,
+ "Saving parameters not supported") },
+/* DTL WRSOM */{SST(0x3A, 0x00, SS_NEDEF|ENXIO,
+ "Medium not present") },
+/* DT WR OM */{SST(0x3A, 0x01, SS_NEDEF|ENXIO,
+ "Medium not present - tray closed") },
+/* DT WR OM */{SST(0x3A, 0x02, SS_NEDEF|ENXIO,
+ "Medium not present - tray open") },
+/* TL */{SST(0x3B, 0x00, SS_DEF,
+ "Sequential positioning error") },
+/* T */{SST(0x3B, 0x01, SS_DEF,
+ "Tape position error at beginning-of-medium") },
+/* T */{SST(0x3B, 0x02, SS_DEF,
+ "Tape position error at end-of-medium") },
+/* L */{SST(0x3B, 0x03, SS_DEF,
+ "Tape or electronic vertical forms unit not ready") },
+/* L */{SST(0x3B, 0x04, SS_DEF,
+ "Slew failure") },
+/* L */{SST(0x3B, 0x05, SS_DEF,
+ "Paper jam") },
+/* L */{SST(0x3B, 0x06, SS_DEF,
+ "Failed to sense top-of-form") },
+/* L */{SST(0x3B, 0x07, SS_DEF,
+ "Failed to sense bottom-of-form") },
+/* T */{SST(0x3B, 0x08, SS_DEF,
+ "Reposition error") },
+/* S */{SST(0x3B, 0x09, SS_DEF,
+ "Read past end of medium") },
+/* S */{SST(0x3B, 0x0A, SS_DEF,
+ "Read past beginning of medium") },
+/* S */{SST(0x3B, 0x0B, SS_DEF,
+ "Position past end of medium") },
+/* T S */{SST(0x3B, 0x0C, SS_DEF,
+ "Position past beginning of medium") },
+/* DT WR OM */{SST(0x3B, 0x0D, SS_NEDEF|ENOSPC,
+ "Medium destination element full") },
+/* DT WR OM */{SST(0x3B, 0x0E, SS_DEF,
+ "Medium source element empty") },
+/* R */{SST(0x3B, 0x0F, SS_DEF,
+ "End of medium reached") },
+/* DT WR OM */{SST(0x3B, 0x11, SS_DEF,
+ "Medium magazine not accessible") },
+/* DT WR OM */{SST(0x3B, 0x12, SS_DEF,
+ "Medium magazine removed") },
+/* DT WR OM */{SST(0x3B, 0x13, SS_DEF,
+ "Medium magazine inserted") },
+/* DT WR OM */{SST(0x3B, 0x14, SS_DEF,
+ "Medium magazine locked") },
+/* DT WR OM */{SST(0x3B, 0x15, SS_DEF,
+ "Medium magazine unlocked") },
+/* DTLPWRSOMCAE */{SST(0x3D, 0x00, SS_DEF,
+ "Invalid bits in identify message") },
+/* DTLPWRSOMCAE */{SST(0x3E, 0x00, SS_DEF,
+ "Logical unit has not self-configured yet") },
+/* DTLPWRSOMCAE */{SST(0x3E, 0x01, SS_DEF,
+ "Logical unit failure") },
+/* DTLPWRSOMCAE */{SST(0x3E, 0x02, SS_DEF,
+ "Timeout on logical unit") },
+/* DTLPWRSOMCAE */{SST(0x3F, 0x00, SS_DEF,
+ "Target operating conditions have changed") },
+/* DTLPWRSOMCAE */{SST(0x3F, 0x01, SS_DEF,
+ "Microcode has been changed") },
+/* DTLPWRSOMC */{SST(0x3F, 0x02, SS_DEF,
+ "Changed operating definition") },
+/* DTLPWRSOMCAE */{SST(0x3F, 0x03, SS_DEF,
+ "Inquiry data has changed") },
+/* DT WR OMCAE */{SST(0x3F, 0x04, SS_DEF,
+ "Component device attached") },
+/* DT WR OMCAE */{SST(0x3F, 0x05, SS_DEF,
+ "Device identifier changed") },
+/* DT WR OMCAE */{SST(0x3F, 0x06, SS_DEF,
+ "Redundancy group created or modified") },
+/* DT WR OMCAE */{SST(0x3F, 0x07, SS_DEF,
+ "Redundancy group deleted") },
+/* DT WR OMCAE */{SST(0x3F, 0x08, SS_DEF,
+ "Spare created or modified") },
+/* DT WR OMCAE */{SST(0x3F, 0x09, SS_DEF,
+ "Spare deleted") },
+/* DT WR OMCAE */{SST(0x3F, 0x0A, SS_DEF,
+ "Volume set created or modified") },
+/* DT WR OMCAE */{SST(0x3F, 0x0B, SS_DEF,
+ "Volume set deleted") },
+/* DT WR OMCAE */{SST(0x3F, 0x0C, SS_DEF,
+ "Volume set deassigned") },
+/* DT WR OMCAE */{SST(0x3F, 0x0D, SS_DEF,
+ "Volume set reassigned") },
+/* D */{SST(0x40, 0x00, SS_DEF,
+ "Ram failure") }, /* deprecated - use 40 NN instead */
+/* DTLPWRSOMCAE */{SST(0x40, 0x80, SS_DEF,
+ "Diagnostic failure: ASCQ = Component ID") },
+/* DTLPWRSOMCAE */{SST(0x40, 0xFF, SS_DEF|SSQ_RANGE,
+ NULL) },/* Range 0x80->0xFF */
+/* D */{SST(0x41, 0x00, SS_DEF,
+ "Data path failure") }, /* deprecated - use 40 NN instead */
+/* D */{SST(0x42, 0x00, SS_DEF,
+ "Power-on or self-test failure") }, /* deprecated - use 40 NN instead */
+/* DTLPWRSOMCAE */{SST(0x43, 0x00, SS_DEF,
+ "Message error") },
+/* DTLPWRSOMCAE */{SST(0x44, 0x00, SS_DEF,
+ "Internal target failure") },
+/* DTLPWRSOMCAE */{SST(0x45, 0x00, SS_DEF,
+ "Select or reselect failure") },
+/* DTLPWRSOMC */{SST(0x46, 0x00, SS_DEF,
+ "Unsuccessful soft reset") },
+/* DTLPWRSOMCAE */{SST(0x47, 0x00, SS_DEF,
+ "SCSI parity error") },
+/* DTLPWRSOMCAE */{SST(0x48, 0x00, SS_DEF,
+ "Initiator detected error message received") },
+/* DTLPWRSOMCAE */{SST(0x49, 0x00, SS_DEF,
+ "Invalid message error") },
+/* DTLPWRSOMCAE */{SST(0x4A, 0x00, SS_DEF,
+ "Command phase error") },
+/* DTLPWRSOMCAE */{SST(0x4B, 0x00, SS_DEF,
+ "Data phase error") },
+/* DTLPWRSOMCAE */{SST(0x4C, 0x00, SS_DEF,
+ "Logical unit failed self-configuration") },
+/* DTLPWRSOMCAE */{SST(0x4D, 0x00, SS_DEF,
+ "Tagged overlapped commands: ASCQ = Queue tag ID") },
+/* DTLPWRSOMCAE */{SST(0x4D, 0xFF, SS_DEF|SSQ_RANGE,
+ NULL)}, /* Range 0x00->0xFF */
+/* DTLPWRSOMCAE */{SST(0x4E, 0x00, SS_DEF,
+ "Overlapped commands attempted") },
+/* T */{SST(0x50, 0x00, SS_DEF,
+ "Write append error") },
+/* T */{SST(0x50, 0x01, SS_DEF,
+ "Write append position error") },
+/* T */{SST(0x50, 0x02, SS_DEF,
+ "Position error related to timing") },
+/* T O */{SST(0x51, 0x00, SS_DEF,
+ "Erase failure") },
+/* T */{SST(0x52, 0x00, SS_DEF,
+ "Cartridge fault") },
+/* DTL WRSOM */{SST(0x53, 0x00, SS_DEF,
+ "Media load or eject failed") },
+/* T */{SST(0x53, 0x01, SS_DEF,
+ "Unload tape failure") },
+/* DT WR OM */{SST(0x53, 0x02, SS_DEF,
+ "Medium removal prevented") },
+/* P */{SST(0x54, 0x00, SS_DEF,
+ "Scsi to host system interface failure") },
+/* P */{SST(0x55, 0x00, SS_DEF,
+ "System resource failure") },
+/* D O */{SST(0x55, 0x01, SS_NEDEF|ENOSPC,
+ "System buffer full") },
+/* R */{SST(0x57, 0x00, SS_DEF,
+ "Unable to recover table-of-contents") },
+/* O */{SST(0x58, 0x00, SS_DEF,
+ "Generation does not exist") },
+/* O */{SST(0x59, 0x00, SS_DEF,
+ "Updated block read") },
+/* DTLPWRSOM */{SST(0x5A, 0x00, SS_DEF,
+ "Operator request or state change input") },
+/* DT WR OM */{SST(0x5A, 0x01, SS_DEF,
+ "Operator medium removal request") },
+/* DT W O */{SST(0x5A, 0x02, SS_DEF,
+ "Operator selected write protect") },
+/* DT W O */{SST(0x5A, 0x03, SS_DEF,
+ "Operator selected write permit") },
+/* DTLPWRSOM */{SST(0x5B, 0x00, SS_DEF,
+ "Log exception") },
+/* DTLPWRSOM */{SST(0x5B, 0x01, SS_DEF,
+ "Threshold condition met") },
+/* DTLPWRSOM */{SST(0x5B, 0x02, SS_DEF,
+ "Log counter at maximum") },
+/* DTLPWRSOM */{SST(0x5B, 0x03, SS_DEF,
+ "Log list codes exhausted") },
+/* D O */{SST(0x5C, 0x00, SS_DEF,
+ "RPL status change") },
+/* D O */{SST(0x5C, 0x01, SS_NEDEF,
+ "Spindles synchronized") },
+/* D O */{SST(0x5C, 0x02, SS_DEF,
+ "Spindles not synchronized") },
+/* DTLPWRSOMCAE */{SST(0x5D, 0x00, SS_DEF,
+ "Failure prediction threshold exceeded") },
+/* DTLPWRSOMCAE */{SST(0x5D, 0xFF, SS_DEF,
+ "Failure prediction threshold exceeded (false)") },
+/* DTLPWRSO CA */{SST(0x5E, 0x00, SS_DEF,
+ "Low power condition on") },
+/* DTLPWRSO CA */{SST(0x5E, 0x01, SS_DEF,
+ "Idle condition activated by timer") },
+/* DTLPWRSO CA */{SST(0x5E, 0x02, SS_DEF,
+ "Standby condition activated by timer") },
+/* DTLPWRSO CA */{SST(0x5E, 0x03, SS_DEF,
+ "Idle condition activated by command") },
+/* DTLPWRSO CA */{SST(0x5E, 0x04, SS_DEF,
+ "Standby condition activated by command") },
+/* S */{SST(0x60, 0x00, SS_DEF,
+ "Lamp failure") },
+/* S */{SST(0x61, 0x00, SS_DEF,
+ "Video acquisition error") },
+/* S */{SST(0x61, 0x01, SS_DEF,
+ "Unable to acquire video") },
+/* S */{SST(0x61, 0x02, SS_DEF,
+ "Out of focus") },
+/* S */{SST(0x62, 0x00, SS_DEF,
+ "Scan head positioning error") },
+/* R */{SST(0x63, 0x00, SS_DEF,
+ "End of user area encountered on this track") },
+/* R */{SST(0x63, 0x01, SS_NEDEF|ENOSPC,
+ "Packet does not fit in available space") },
+/* R */{SST(0x64, 0x00, SS_DEF,
+ "Illegal mode for this track") },
+/* R */{SST(0x64, 0x01, SS_DEF,
+ "Invalid packet size") },
+/* DTLPWRSOMCAE */{SST(0x65, 0x00, SS_DEF,
+ "Voltage fault") },
+/* S */{SST(0x66, 0x00, SS_DEF,
+ "Automatic document feeder cover up") },
+/* S */{SST(0x66, 0x01, SS_DEF,
+ "Automatic document feeder lift up") },
+/* S */{SST(0x66, 0x02, SS_DEF,
+ "Document jam in automatic document feeder") },
+/* S */{SST(0x66, 0x03, SS_DEF,
+ "Document miss feed automatic in document feeder") },
+/* A */{SST(0x67, 0x00, SS_DEF,
+ "Configuration failure") },
+/* A */{SST(0x67, 0x01, SS_DEF,
+ "Configuration of incapable logical units failed") },
+/* A */{SST(0x67, 0x02, SS_DEF,
+ "Add logical unit failed") },
+/* A */{SST(0x67, 0x03, SS_DEF,
+ "Modification of logical unit failed") },
+/* A */{SST(0x67, 0x04, SS_DEF,
+ "Exchange of logical unit failed") },
+/* A */{SST(0x67, 0x05, SS_DEF,
+ "Remove of logical unit failed") },
+/* A */{SST(0x67, 0x06, SS_DEF,
+ "Attachment of logical unit failed") },
+/* A */{SST(0x67, 0x07, SS_DEF,
+ "Creation of logical unit failed") },
+/* A */{SST(0x68, 0x00, SS_DEF,
+ "Logical unit not configured") },
+/* A */{SST(0x69, 0x00, SS_DEF,
+ "Data loss on logical unit") },
+/* A */{SST(0x69, 0x01, SS_DEF,
+ "Multiple logical unit failures") },
+/* A */{SST(0x69, 0x02, SS_DEF,
+ "Parity/data mismatch") },
+/* A */{SST(0x6A, 0x00, SS_DEF,
+ "Informational, refer to log") },
+/* A */{SST(0x6B, 0x00, SS_DEF,
+ "State change has occurred") },
+/* A */{SST(0x6B, 0x01, SS_DEF,
+ "Redundancy level got better") },
+/* A */{SST(0x6B, 0x02, SS_DEF,
+ "Redundancy level got worse") },
+/* A */{SST(0x6C, 0x00, SS_DEF,
+ "Rebuild failure occurred") },
+/* A */{SST(0x6D, 0x00, SS_DEF,
+ "Recalculate failure occurred") },
+/* A */{SST(0x6E, 0x00, SS_DEF,
+ "Command to logical unit failed") },
+/* T */{SST(0x70, 0x00, SS_DEF,
+ "Decompression exception short: ASCQ = Algorithm ID") },
+/* T */{SST(0x70, 0xFF, SS_DEF|SSQ_RANGE,
+ NULL) }, /* Range 0x00 -> 0xFF */
+/* T */{SST(0x71, 0x00, SS_DEF,
+ "Decompression exception long: ASCQ = Algorithm ID") },
+/* T */{SST(0x71, 0xFF, SS_DEF|SSQ_RANGE,
+ NULL) }, /* Range 0x00 -> 0xFF */
+/* R */{SST(0x72, 0x00, SS_DEF,
+ "Session fixation error") },
+/* R */{SST(0x72, 0x01, SS_DEF,
+ "Session fixation error writing lead-in") },
+/* R */{SST(0x72, 0x02, SS_DEF,
+ "Session fixation error writing lead-out") },
+/* R */{SST(0x72, 0x03, SS_DEF,
+ "Session fixation error - incomplete track in session") },
+/* R */{SST(0x72, 0x04, SS_DEF,
+ "Empty or partially written reserved track") },
+/* R */{SST(0x73, 0x00, SS_DEF,
+ "CD control error") },
+/* R */{SST(0x73, 0x01, SS_DEF,
+ "Power calibration area almost full") },
+/* R */{SST(0x73, 0x02, SS_NEDEF|ENOSPC,
+ "Power calibration area is full") },
+/* R */{SST(0x73, 0x03, SS_DEF,
+ "Power calibration area error") },
+/* R */{SST(0x73, 0x04, SS_DEF,
+ "Program memory area update failure") },
+/* R */{SST(0x73, 0x05, SS_DEF,
+ "program memory area is full") }
+};
+
+#if !defined(SCSI_NO_SENSE_STRINGS)
+const char *
+scsi_sense_desc(int asc, int ascq, struct scsi_inquiry_data *inq_data)
+{
+ int i, j;
+ caddr_t match;
+ struct asc_table_entry *table[2];
+ int table_size[2];
+ int num_tables;
+
+ if (inq_data == NULL)
+ return(NULL);
+
+ match = cam_quirkmatch((caddr_t)inq_data,
+ (caddr_t)asc_quirk_table,
+ sizeof(asc_quirk_table)/sizeof(*asc_quirk_table),
+ sizeof(*asc_quirk_table), scsi_inquiry_match);
+
+ if (match != NULL) {
+ table[0] = ((struct scsi_sense_quirk_entry *)match)->asc_info;
+ table_size[0] =
+ ((struct scsi_sense_quirk_entry *)match)->num_ascs;
+ table[1] = asc_text;
+ table_size[1] = sizeof(asc_text)/sizeof(asc_text[0]);
+ num_tables = 2;
+ } else {
+ table[0] = asc_text;
+ table_size[0] = sizeof(asc_text)/sizeof(asc_text[0]);
+ num_tables = 1;
+ }
+
+ for (j = 0; j < num_tables; j++) {
+ for (i = 0; i < table_size[j]; i++) {
+ if (table[j][i].asc == asc) {
+
+ /* Check for ranges */
+ if ((table[j][i].action & SSQ_RANGE) != 0) {
+
+ if (table[j][i].ascq >= ascq
+ && table[j][i-1].ascq <= ascq)
+ return table[j][i-1].desc;
+
+ continue;
+ }
+
+ if (table[j][i].ascq == ascq)
+ return table[j][i].desc;
+ }
+ }
+ }
+
+ if (asc >= 0x80 && asc <= 0xff)
+ return "Vendor Specific ASC";
+
+ if (ascq >= 0x80 && ascq <= 0xff)
+ return "Vendor Specific ASCQ";
+
+ return "Reserved ASC/ASCQ pair";
+}
+
+#else /* SCSI_NO_SENSE_STRINGS */
+const char *
+scsi_sense_desc(int asc, int ascq, struct scsi_inquiry_data *inq_data)
+{
+ return ("");
+}
+#endif
+
+/*
+ * Given a particular failed CCB and its device type information, return
+ * the appropriate action from either the sense code quirk table or the
+ * sense code table.
+ */
+scsi_sense_action
+scsi_error_action(int asc, int ascq, struct scsi_inquiry_data *inq_data)
+{
+ caddr_t match;
+ struct asc_table_entry *table[2];
+ int table_size[2];
+ int num_tables;
+ int i, j;
+
+ /*
+ * If we don't have inquiry data, we can't match against any quirk
+ * entries.
+ */
+ if (inq_data != NULL) {
+ match = cam_quirkmatch((caddr_t)inq_data,
+ (caddr_t)asc_quirk_table,
+ sizeof(asc_quirk_table) /
+ sizeof(*asc_quirk_table),
+ sizeof(*asc_quirk_table),
+ scsi_inquiry_match);
+ } else
+ match = NULL;
+
+ if (match != NULL) {
+ table[0] = ((struct scsi_sense_quirk_entry *)match)->asc_info;
+ table_size[0] =
+ ((struct scsi_sense_quirk_entry *)match)->num_ascs;
+ table[1] = asc_text;
+ table_size[1] = sizeof(asc_text)/sizeof(asc_text[0]);
+ num_tables = 2;
+ } else {
+ table[0] = asc_text;
+ table_size[0] = sizeof(asc_text)/sizeof(asc_text[0]);
+ num_tables = 1;
+ }
+
+ for (j = 0; j < num_tables; j++) {
+ for (i = 0; i < table_size[j]; i++) {
+ if (table[j][i].asc == asc) {
+
+ /* Check for ranges */
+ if ((table[j][i].action & SSQ_RANGE) != 0){
+
+ if (table[j][i].ascq >= ascq
+ && table[j][i-1].ascq <= ascq)
+ return table[j][i].action;
+
+ continue;
+ }
+
+ /*
+ * Check to see if we have a match. If the
+ * current ascq in the table is greater
+ * than our ascq, and there aren't any more
+ * tables to search, just return the
+ * default action.
+ */
+ if (table[j][i].ascq == ascq)
+ return(table[j][i].action);
+ else if ((j == (num_tables - 1)) &&
+ (table[j][i].ascq > ascq))
+ return(SS_DEF);
+ }
+ }
+ }
+ /*
+ * If we get to this point, it's most likely a vendor specific
+ * ASC and we don't have a quirk entry for it. Oh well, we just
+ * tell the error handling code to take the default action.
+ */
+ return(SS_DEF);
+}
+
+char *
+scsi_cdb_string(u_int8_t *cdb_ptr, char *cdb_string)
+{
+ u_int8_t cdb_len;
+ char holdstr[8];
+ int i;
+
+ if (cdb_ptr == NULL)
+ return("");
+
+ /* Silence warnings */
+ cdb_len = 0;
+
+ /*
+ * This is taken from the SCSI-3 draft spec.
+ * (T10/1157D revision 0.3)
+ * The top 3 bits of an opcode are the group code. The next 5 bits
+ * are the command code.
+ * Group 0: six byte commands
+ * Group 1: ten byte commands
+ * Group 2: ten byte commands
+ * Group 3: reserved
+ * Group 4: sixteen byte commands
+ * Group 5: twelve byte commands
+ * Group 6: vendor specific
+ * Group 7: vendor specific
+ */
+ switch((*cdb_ptr >> 5) & 0x7) {
+ case 0:
+ cdb_len = 6;
+ break;
+ case 1:
+ case 2:
+ cdb_len = 10;
+ break;
+ case 3:
+ case 6:
+ case 7:
+ /* in this case, just print out the opcode */
+ cdb_len = 1;
+ break;
+ case 4:
+ cdb_len = 16;
+ break;
+ case 5:
+ cdb_len = 12;
+ break;
+ }
+ *cdb_string = '\0';
+ for (i = 0; i < cdb_len; i++) {
+ sprintf(holdstr, "%x ", cdb_ptr[i]);
+ strcat(cdb_string, holdstr);
+ }
+
+ return(cdb_string);
+}
+/*
+ * scsi_sense_print will decode the sense data into human
+ * readable form. Sense handlers can use this to generate
+ * a report.
+ */
+/*
+ * Because scsi_sense_print() utilizes transport layer functions, it will
+ * only work in the kernel.
+ */
+#ifdef KERNEL
+
+void
+scsi_sense_print(struct ccb_scsiio *csio)
+{
+ struct scsi_sense_data *sense;
+ u_int32_t info;
+ int error_code;
+ int sense_key;
+ int asc, ascq;
+ struct ccb_getdev cgd;
+ u_int8_t command_print;
+
+ sense = &csio->sense_data;
+
+ /*
+ * If the CDB is a physical address, we can't deal with it..
+ */
+ if ((csio->ccb_h.flags & CAM_CDB_PHYS) != 0)
+ command_print = 0;
+ else
+ command_print = 1;
+
+ /*
+ * Get the device information.
+ */
+ xpt_setup_ccb(&cgd.ccb_h,
+ csio->ccb_h.path,
+ /*priority*/ 1);
+ cgd.ccb_h.func_code = XPT_GDEV_TYPE;
+ xpt_action((union ccb *)&cgd);
+
+ /*
+ * If the device is unconfigured, just pretend that it is a hard
+ * drive. scsi_op_desc() needs this.
+ */
+ if (cgd.ccb_h.status == CAM_DEV_NOT_THERE)
+ cgd.inq_data.device = T_DIRECT;
+
+ if (command_print != 0) {
+ char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
+
+ xpt_print_path(csio->ccb_h.path);
+
+ if ((csio->ccb_h.flags & CAM_CDB_POINTER) != 0) {
+ printf("%s. CDB: %s\n",
+ scsi_op_desc(csio->cdb_io.cdb_ptr[0],
+ &cgd.inq_data),
+ scsi_cdb_string(csio->cdb_io.cdb_ptr, cdb_str));
+ } else {
+ printf("%s. CDB: %s\n",
+ scsi_op_desc(csio->cdb_io.cdb_bytes[0],
+ &cgd.inq_data), scsi_cdb_string(
+ (u_int8_t *)&csio->cdb_io.cdb_bytes, cdb_str));
+ }
+ }
+
+ /*
+ * If the sense data is a physical pointer, forget it.
+ */
+ if (csio->ccb_h.flags & CAM_SENSE_PTR) {
+ if (csio->ccb_h.flags & CAM_SENSE_PHYS)
+ return;
+ else {
+ /*
+ * XXX KDM this is stupid, but casting the
+ * structure doesn't work...
+ */
+ bcopy(&csio->sense_data, sense,
+ sizeof(struct scsi_sense_data *));
+ }
+ } else {
+ /*
+ * If the physical sense flag is set, but the sense pointer
+ * is not also set, we assume that the user is an idiot and
+ * return. (Well, okay, it could be that somehow, the
+ * entire csio is physical, but we would have probably core
+ * dumped on one of the bogus pointer deferences above
+ * already.)
+ */
+ if (csio->ccb_h.flags & CAM_SENSE_PHYS)
+ return;
+ else
+ sense = &csio->sense_data;
+ }
+
+ xpt_print_path(csio->ccb_h.path);
+ error_code = sense->error_code & SSD_ERRCODE;
+ sense_key = sense->flags & SSD_KEY;
+
+ switch (error_code) {
+ case SSD_DEFERRED_ERROR:
+ printf("Deferred Error: ");
+ /* FALLTHROUGH */
+ case SSD_CURRENT_ERROR:
+
+ printf("%s", scsi_sense_key_text[sense_key]);
+ info = scsi_4btoul(sense->info);
+
+ if (sense->error_code & SSD_ERRCODE_VALID) {
+
+ switch (sense_key) {
+ case SSD_KEY_NOT_READY:
+ case SSD_KEY_ILLEGAL_REQUEST:
+ case SSD_KEY_UNIT_ATTENTION:
+ case SSD_KEY_DATA_PROTECT:
+ break;
+ case SSD_KEY_BLANK_CHECK:
+ printf(" req sz: %d (decimal)",
+ info);
+ break;
+ default:
+ if (info) {
+ if (sense->flags & SSD_ILI) {
+ printf(" ILI (length mismatch):"
+ " %d", info);
+ } else {
+ printf(" info:%x", info);
+ }
+ }
+ }
+ } else if (info)
+ printf(" info?:%x", info);
+
+ if (sense->extra_len >= 4) {
+ if (bcmp(sense->cmd_spec_info, "\0\0\0\0", 4)) {
+ printf(" csi:%x,%x,%x,%x",
+ sense->cmd_spec_info[0],
+ sense->cmd_spec_info[1],
+ sense->cmd_spec_info[2],
+ sense->cmd_spec_info[3]);
+ }
+ }
+
+ asc = (sense->extra_len >= 5) ? sense->add_sense_code : 0;
+ ascq = (sense->extra_len >= 6) ? sense->add_sense_code_qual : 0;
+
+ if (asc || ascq) {
+ const char *desc = scsi_sense_desc(asc, ascq,
+ &cgd.inq_data);
+ printf(" asc:%x,%x\n", asc, ascq);
+
+ xpt_print_path(csio->ccb_h.path);
+ printf("%s", desc);
+ }
+
+ if (sense->extra_len >= 7 && sense->fru) {
+ printf(" field replaceable unit: %x", sense->fru);
+ }
+
+ if ((sense->extra_len >= 10)
+ && (sense->sense_key_spec[0] & SSD_SCS_VALID) != 0) {
+ printf(" sks:%x,%x", sense->sense_key_spec[0],
+ scsi_2btoul(&sense->sense_key_spec[1]));
+ }
+ break;
+
+ default:
+ printf("error code %d",
+ sense->error_code & SSD_ERRCODE);
+ if (sense->error_code & SSD_ERRCODE_VALID) {
+ printf(" at block no. %d (decimal)",
+ info = scsi_4btoul(sense->info));
+ }
+ }
+
+ printf("\n");
+}
+
+#else /* !KERNEL */
+
+
+char *
+scsi_sense_string(struct cam_device *device, struct ccb_scsiio *csio,
+ char *str, int str_len)
+{
+ struct scsi_sense_data *sense;
+ u_int32_t info;
+ int error_code;
+ int sense_key;
+ int asc, ascq;
+ u_int8_t command_print;
+ char path_str[64];
+ char tmpstr[2048];
+ int tmpstrlen = 2048;
+ int cur_len = 0, retlen;
+
+ if ((device == NULL) || (csio == NULL) || (str == NULL))
+ return(NULL);
+
+ if (str_len <= 0)
+ return(NULL);
+
+ /*
+ * If the CDB is a physical address, we can't deal with it..
+ */
+ if ((csio->ccb_h.flags & CAM_CDB_PHYS) != 0)
+ command_print = 0;
+ else
+ command_print = 1;
+
+ cam_path_string(device, path_str, 64);
+
+ str[0] = '\0';
+
+ sense = NULL;
+
+ if (command_print != 0) {
+ char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
+
+ retlen = snprintf(tmpstr, tmpstrlen, "%s", path_str);
+
+ strncat(str, tmpstr, str_len - cur_len - 1);
+ cur_len += retlen;
+
+ if ((csio->ccb_h.flags & CAM_CDB_POINTER) != 0) {
+ retlen = snprintf(tmpstr, tmpstrlen, "%s. CDB: %s\n",
+ scsi_op_desc(csio->cdb_io.cdb_ptr[0],
+ &device->inq_data),
+ scsi_cdb_string(csio->cdb_io.cdb_ptr,
+ cdb_str));
+ } else {
+ retlen = snprintf(tmpstr, tmpstrlen, "%s. CDB: %s\n",
+ scsi_op_desc(csio->cdb_io.cdb_bytes[0],
+ &device->inq_data), scsi_cdb_string(
+ (u_int8_t *)&csio->cdb_io.cdb_bytes, cdb_str));
+ }
+ strncat(str, tmpstr, str_len - cur_len - 1);
+ cur_len += retlen;
+ }
+
+ /*
+ * If the sense data is a physical pointer, forget it.
+ */
+ if (csio->ccb_h.flags & CAM_SENSE_PTR) {
+ if (csio->ccb_h.flags & CAM_SENSE_PHYS)
+ return(NULL);
+ else {
+ /*
+ * XXX KDM this is stupid, but casting the
+ * structure doesn't work...
+ */
+ bcopy(&csio->sense_data, sense,
+ sizeof(struct scsi_sense_data *));
+ }
+ } else {
+ /*
+ * If the physical sense flag is set, but the sense pointer
+ * is not also set, we assume that the user is an idiot and
+ * return. (Well, okay, it could be that somehow, the
+ * entire csio is physical, but we would have probably core
+ * dumped on one of the bogus pointer deferences above
+ * already.)
+ */
+ if (csio->ccb_h.flags & CAM_SENSE_PHYS)
+ return(NULL);
+ else
+ sense = &csio->sense_data;
+ }
+
+
+ retlen = snprintf(tmpstr, tmpstrlen, "%s", path_str);
+ strncat(str, tmpstr, str_len - cur_len - 1);
+ cur_len += retlen;
+
+ error_code = sense->error_code & SSD_ERRCODE;
+ sense_key = sense->flags & SSD_KEY;
+
+ switch (error_code) {
+ case SSD_DEFERRED_ERROR:
+ retlen = snprintf(tmpstr, tmpstrlen, "Deferred Error: ");
+ strncat(str, tmpstr, str_len - cur_len - 1);
+ cur_len += retlen;
+ /* FALLTHROUGH */
+ case SSD_CURRENT_ERROR:
+
+ retlen = snprintf(tmpstr, tmpstrlen, "%s",
+ scsi_sense_key_text[sense_key]);
+ strncat(str, tmpstr, str_len - cur_len - 1);
+ cur_len += retlen;
+
+ info = scsi_4btoul(sense->info);
+
+ if (sense->error_code & SSD_ERRCODE_VALID) {
+
+ switch (sense_key) {
+ case SSD_KEY_NOT_READY:
+ case SSD_KEY_ILLEGAL_REQUEST:
+ case SSD_KEY_UNIT_ATTENTION:
+ case SSD_KEY_DATA_PROTECT:
+ break;
+ case SSD_KEY_BLANK_CHECK:
+ retlen = snprintf(tmpstr, tmpstrlen,
+ " req sz: %d (decimal)",
+ info);
+ strncat(str, tmpstr, str_len - cur_len - 1);
+ cur_len += retlen;
+ break;
+ default:
+ if (info) {
+ if (sense->flags & SSD_ILI) {
+ retlen = snprintf (tmpstr,
+ tmpstrlen,
+ " ILI (length "
+ "mismatch): %d", info);
+
+ } else {
+ retlen = snprintf(tmpstr,
+ tmpstrlen,
+ " info:%x",
+ info);
+ }
+ strncat(str, tmpstr,
+ str_len - cur_len - 1);
+ cur_len += retlen;
+ }
+ }
+ } else if (info) {
+ retlen = snprintf(tmpstr, tmpstrlen," info?:%x", info);
+ strncat(str, tmpstr, str_len - cur_len - 1);
+ cur_len += retlen;
+ }
+
+ if (sense->extra_len >= 4) {
+ if (bcmp(sense->cmd_spec_info, "\0\0\0\0", 4)) {
+ retlen = snprintf(tmpstr, tmpstrlen,
+ " csi:%x,%x,%x,%x",
+ sense->cmd_spec_info[0],
+ sense->cmd_spec_info[1],
+ sense->cmd_spec_info[2],
+ sense->cmd_spec_info[3]);
+ strncat(str, tmpstr, str_len - cur_len - 1);
+ cur_len += retlen;
+ }
+ }
+
+ asc = (sense->extra_len >= 5) ? sense->add_sense_code : 0;
+ ascq = (sense->extra_len >= 6) ? sense->add_sense_code_qual : 0;
+
+ if (asc || ascq) {
+ const char *desc = scsi_sense_desc(asc, ascq,
+ &device->inq_data);
+ retlen = snprintf(tmpstr, tmpstrlen,
+ " asc:%x,%x\n%s%s", asc, ascq,
+ path_str, desc);
+ strncat(str, tmpstr, str_len - cur_len - 1);
+ cur_len += retlen;
+ }
+
+ if (sense->extra_len >= 7 && sense->fru) {
+ retlen = snprintf(tmpstr, tmpstrlen,
+ " field replaceable unit: %x",
+ sense->fru);
+ strncat(str, tmpstr, str_len - cur_len - 1);
+ cur_len += retlen;
+ }
+
+ if ((sense->extra_len >= 10)
+ && (sense->sense_key_spec[0] & SSD_SCS_VALID) != 0) {
+ retlen = snprintf(tmpstr, tmpstrlen, " sks:%x,%x",
+ sense->sense_key_spec[0],
+ scsi_2btoul(&sense->sense_key_spec[1]));
+ strncat(str, tmpstr, str_len - cur_len - 1);
+ cur_len += retlen;
+ }
+ break;
+
+ default:
+ retlen = snprintf(tmpstr, tmpstrlen, "error code %d",
+ sense->error_code & SSD_ERRCODE);
+ strncat(str, tmpstr, str_len - cur_len - 1);
+ cur_len += retlen;
+
+ if (sense->error_code & SSD_ERRCODE_VALID) {
+ retlen = snprintf(tmpstr, tmpstrlen,
+ " at block no. %d (decimal)",
+ info = scsi_4btoul(sense->info));
+ strncat(str, tmpstr, str_len - cur_len - 1);
+ cur_len += retlen;
+ }
+ }
+
+ retlen = snprintf(tmpstr, tmpstrlen, "\n");
+ strncat(str, tmpstr, str_len - cur_len - 1);
+ cur_len += retlen;
+
+ return(str);
+}
+
+void
+scsi_sense_print(struct cam_device *device, struct ccb_scsiio *csio,
+ FILE *ofile)
+{
+ char str[2048];
+
+ if ((device == NULL) || (csio == NULL) || (ofile == NULL))
+ return;
+
+ fprintf(ofile, "%s", scsi_sense_string(device, csio, str, 2048));
+}
+
+#endif /* KERNEL/!KERNEL */
+
+#ifdef KERNEL
+int
+scsi_interpret_sense(union ccb *ccb, u_int32_t sense_flags,
+ u_int32_t *relsim_flags, u_int32_t *openings,
+ u_int32_t *timeout, scsi_sense_action error_action)
+#else
+int
+scsi_interpret_sense(struct cam_device *device, union ccb *ccb,
+ u_int32_t sense_flags, u_int32_t *relsim_flags,
+ u_int32_t *openings, u_int32_t *timeout,
+ scsi_sense_action error_action)
+#endif
+{
+ struct scsi_sense_data *sense;
+ int error_code, sense_key, asc, ascq;
+ int error;
+ int print_sense;
+ struct ccb_scsiio *csio;
+ int retry;
+
+ csio = &ccb->csio;
+ sense = &csio->sense_data;
+ scsi_extract_sense(sense, &error_code, &sense_key, &asc, &ascq);
+
+#ifdef KERNEL
+ if ((sense_flags & SF_NO_PRINT) == 0 || bootverbose)
+#else
+ if ((sense_flags & SF_NO_PRINT) == 0)
+#endif
+ print_sense = TRUE;
+ else
+ print_sense = FALSE;
+
+ switch (error_code) {
+ case SSD_DEFERRED_ERROR:
+ {
+ /*
+ * XXX dufault@FreeBSD.org
+ * This error doesn't relate to the command associated
+ * with this request sense. A deferred error is an error
+ * for a command that has already returned GOOD status
+ * (see 8.2.14.2).
+ *
+ * By my reading of that section, it looks like the current
+ * command has been cancelled, we should now clean things up
+ * (hopefully recovering any lost data) and then retry the
+ * current command. There are two easy choices, both wrong:
+ *
+ * 1. Drop through (like we had been doing), thus treating
+ * this as if the error were for the current command and
+ * return and stop the current command.
+ *
+ * 2. Issue a retry (like I made it do) thus hopefully
+ * recovering the current transfer, and ignoring the
+ * fact that we've dropped a command.
+ *
+ * These should probably be handled in a device specific
+ * sense handler or punted back up to a user mode daemon
+ */
+
+ /* decrement the number of retries */
+ retry = ccb->ccb_h.retry_count > 0;
+ if (retry)
+ ccb->ccb_h.retry_count--;
+
+ error = ERESTART;
+ break;
+ }
+ case SSD_CURRENT_ERROR:
+ {
+
+ switch (sense_key) {
+ case SSD_KEY_NO_SENSE:
+ /* Why were we called then? Well don't bail now */
+ /* FALLTHROUGH */
+ case SSD_KEY_EQUAL:
+ /* These should be filtered by the peripheral drivers */
+ /* FALLTHROUGH */
+ case SSD_KEY_MISCOMPARE:
+ print_sense = FALSE;
+ /* FALLTHROUGH */
+ case SSD_KEY_RECOVERED_ERROR:
+
+ /* decrement the number of retries */
+ retry = ccb->ccb_h.retry_count > 0;
+ if (retry)
+ ccb->ccb_h.retry_count--;
+
+ error = 0;
+ break;
+ case SSD_KEY_ILLEGAL_REQUEST:
+ if ((sense_flags & SF_QUIET_IR) != 0)
+ print_sense = FALSE;
+
+ /* FALLTHROUGH */
+ case SSD_KEY_NOT_READY:
+ case SSD_KEY_DATA_PROTECT:
+ case SSD_KEY_VOLUME_OVERFLOW:
+ case SSD_KEY_BLANK_CHECK: /* should be filtered out by
+ peripheral drivers */
+ retry = ccb->ccb_h.retry_count > 0;
+ if (retry)
+ ccb->ccb_h.retry_count--;
+
+ if ((error_action & SSQ_PRINT_SENSE) == 0)
+ print_sense = FALSE;
+
+ error = error_action & SS_ERRMASK;
+
+ break;
+ case SSD_KEY_UNIT_ATTENTION:
+ /*
+ * This should also be filtered out by
+ * peripheral drivers since each has a different
+ * concept of what it means to invalidate the media.
+ */
+ if ((sense_flags & SF_RETRY_UA) != 0) {
+ /* don't decrement retry count */
+ error = ERESTART;
+ print_sense = FALSE;
+ } else {
+ /* decrement the number of retries */
+ retry = ccb->ccb_h.retry_count > 0;
+ if (retry)
+ ccb->ccb_h.retry_count--;
+
+ if ((error_action & SSQ_PRINT_SENSE) == 0)
+ print_sense = FALSE;
+
+ error = error_action & SS_ERRMASK;
+ }
+ break;
+ default:
+ /* decrement the number of retries */
+ retry = ccb->ccb_h.retry_count > 0;
+ if (retry)
+ ccb->ccb_h.retry_count--;
+
+ if ((error_action & SSQ_PRINT_SENSE) == 0)
+ print_sense = FALSE;
+
+ error = error_action & SS_ERRMASK;
+ }
+ break;
+ }
+ default:
+ /* decrement the number of retries */
+ retry = ccb->ccb_h.retry_count > 0;
+ if (retry)
+ ccb->ccb_h.retry_count--;
+ error =EIO;
+ break;
+ }
+
+ if (print_sense) {
+#ifdef KERNEL
+ scsi_sense_print(csio);
+#else
+ scsi_sense_print(device, csio, stdout);
+#endif
+ }
+
+ return (error);
+}
+
+void
+scsi_print_inquiry(struct scsi_inquiry_data *inq_data)
+{
+ u_int8_t type;
+ char *dtype, *qtype;
+ char vendor[16], product[48], revision[16];
+
+ type = SID_TYPE(inq_data);
+
+ /*
+ * Figure out basic device type and qualifier.
+ */
+ if (SID_QUAL_IS_VENDOR_UNIQUE(inq_data)) {
+ qtype = "(vendor-unique qualifier)";
+ } else {
+ switch (SID_QUAL(inq_data)) {
+ case SID_QUAL_LU_CONNECTED:
+ qtype = "";
+ break;
+
+ case SID_QUAL_LU_OFFLINE:
+ qtype = "(offline)";
+ break;
+
+ case SID_QUAL_RSVD:
+ qtype = "(reserved qualifier)";
+ break;
+ default:
+ case SID_QUAL_BAD_LU:
+ qtype = "(lun not supported)";
+ break;
+ }
+ }
+
+ switch (type) {
+ case T_DIRECT:
+ dtype = "Direct Access";
+ break;
+ case T_SEQUENTIAL:
+ dtype = "Sequential Access";
+ break;
+ case T_PRINTER:
+ dtype = "Printer";
+ break;
+ case T_PROCESSOR:
+ dtype = "Processor";
+ break;
+ case T_CDROM:
+ dtype = "CD-ROM";
+ break;
+ case T_WORM:
+ dtype = "Worm";
+ break;
+ case T_SCANNER:
+ dtype = "Scanner";
+ break;
+ case T_OPTICAL:
+ dtype = "Optical";
+ break;
+ case T_CHANGER:
+ dtype = "Changer";
+ break;
+ case T_COMM:
+ dtype = "Communication";
+ break;
+ case T_STORARRAY:
+ dtype = "Storage Arrray";
+ break;
+ case T_ENCLOSURE:
+ dtype = "Enclosure Services";
+ break;
+ case T_NODEVICE:
+ dtype = "Uninstalled";
+ default:
+ dtype = "unknown";
+ break;
+ }
+
+ cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor),
+ sizeof(vendor));
+ cam_strvis(product, inq_data->product, sizeof(inq_data->product),
+ sizeof(product));
+ cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision),
+ sizeof(revision));
+
+ printf("<%s %s %s> %s %s SCSI%d device %s\n",
+ vendor, product, revision,
+ SID_IS_REMOVABLE(inq_data) ? "Removable" : "Fixed",
+ dtype, SID_ANSI_REV(inq_data), qtype);
+}
+
+/*
+ * Table of syncrates that don't follow the "divisible by 4"
+ * rule. This table will be expanded in future SCSI specs.
+ * I believe that FAST-40 has already been defined...
+ */
+static struct {
+ u_int period_factor;
+ u_int period; /* in 10ths of ns */
+} scsi_syncrates[] = {
+ { 0x0a, 250 },
+ { 0x0b, 303 },
+ { 0x0c, 500 }
+};
+
+/*
+ * Return the frequency in kHz corresponding to the given
+ * sync period factor.
+ */
+u_int
+scsi_calc_syncsrate(u_int period_factor)
+{
+ int i;
+ int num_syncrates;
+
+ num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
+ /* See if the period is in the "exception" table */
+ for (i = 0; i < num_syncrates; i++) {
+
+ if (period_factor == scsi_syncrates[i].period_factor) {
+ /* Period in kHz */
+ return (10000000 / scsi_syncrates[i].period);
+ }
+ }
+
+ /*
+ * Wasn't in the table, so use the standard
+ * 4 times conversion.
+ */
+ return (10000000 / (period_factor * 4 * 10));
+}
+
+/*
+ * Return the SCSI sync parameter that corresponsd to
+ * the passed in period in 10ths of ns.
+ */
+u_int
+scsi_calc_syncparam(u_int period)
+{
+ int i;
+ int num_syncrates;
+
+ if (period == 0)
+ return (~0); /* Async */
+
+ num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
+ /* See if the period is in the "exception" table */
+ for (i = 0; i < num_syncrates; i++) {
+
+ if (period <= scsi_syncrates[i].period) {
+ /* Period in kHz */
+ return (scsi_syncrates[i].period_factor);
+ }
+ }
+
+ /*
+ * Wasn't in the table, so use the standard
+ * 1/4 period in ns conversion.
+ */
+ return (period/40);
+}
+
+void
+scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_test_unit_ready *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+ scsi_cmd = (struct scsi_test_unit_ready *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = TEST_UNIT_READY;
+}
+
+void
+scsi_request_sense(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ void *data_ptr, u_int8_t dxfer_len, u_int8_t tag_action,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_request_sense *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ CAM_DIR_IN,
+ tag_action,
+ data_ptr,
+ dxfer_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+ scsi_cmd = (struct scsi_request_sense *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = REQUEST_SENSE;
+}
+
+void
+scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t *inq_buf, u_int32_t inq_len,
+ int evpd, u_int8_t page_code, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ struct scsi_inquiry *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_IN,
+ tag_action,
+ /*data_ptr*/inq_buf,
+ /*dxfer_len*/inq_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+ scsi_cmd = (struct scsi_inquiry *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = INQUIRY;
+ if (evpd) {
+ scsi_cmd->byte2 |= SI_EVPD;
+ scsi_cmd->page_code = page_code;
+ }
+ scsi_cmd->length = inq_len;
+}
+
+void
+scsi_mode_sense(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int dbd, u_int8_t page_code,
+ u_int8_t page, u_int8_t *param_buf, u_int32_t param_len,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+ u_int8_t cdb_len;
+
+ /*
+ * Use the smallest possible command to perform the operation.
+ */
+ if (param_len < 256) {
+ /*
+ * We can fit in a 6 byte cdb.
+ */
+ struct scsi_mode_sense_6 *scsi_cmd;
+
+ scsi_cmd = (struct scsi_mode_sense_6 *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = MODE_SENSE_6;
+ if (dbd != 0)
+ scsi_cmd->byte2 |= SMS_DBD;
+ scsi_cmd->page = page_code | page;
+ scsi_cmd->length = param_len;
+ cdb_len = sizeof(*scsi_cmd);
+ } else {
+ /*
+ * Need a 10 byte cdb.
+ */
+ struct scsi_mode_sense_10 *scsi_cmd;
+
+ scsi_cmd = (struct scsi_mode_sense_10 *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = MODE_SENSE_10;
+ if (dbd != 0)
+ scsi_cmd->byte2 |= SMS_DBD;
+ scsi_cmd->page = page_code | page;
+ scsi_ulto2b(param_len, scsi_cmd->length);
+ cdb_len = sizeof(*scsi_cmd);
+ }
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ CAM_DIR_IN,
+ tag_action,
+ param_buf,
+ param_len,
+ sense_len,
+ cdb_len,
+ timeout);
+}
+
+void
+scsi_mode_select(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int scsi_page_fmt, int save_pages,
+ u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ u_int8_t cdb_len;
+
+ /*
+ * Use the smallest possible command to perform the operation.
+ */
+ if (param_len < 256) {
+ /*
+ * We can fit in a 6 byte cdb.
+ */
+ struct scsi_mode_select_6 *scsi_cmd;
+
+ scsi_cmd = (struct scsi_mode_select_6 *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = MODE_SELECT_6;
+ if (scsi_page_fmt != 0)
+ scsi_cmd->byte2 |= SMS_PF;
+ if (save_pages != 0)
+ scsi_cmd->byte2 |= SMS_SP;
+ scsi_cmd->length = param_len;
+ cdb_len = sizeof(*scsi_cmd);
+ } else {
+ /*
+ * Need a 10 byte cdb.
+ */
+ struct scsi_mode_select_10 *scsi_cmd;
+
+ scsi_cmd =
+ (struct scsi_mode_select_10 *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = MODE_SELECT_10;
+ if (scsi_page_fmt != 0)
+ scsi_cmd->byte2 |= SMS_PF;
+ if (save_pages != 0)
+ scsi_cmd->byte2 |= SMS_SP;
+ scsi_ulto2b(param_len, scsi_cmd->length);
+ cdb_len = sizeof(*scsi_cmd);
+ }
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ CAM_DIR_OUT,
+ tag_action,
+ param_buf,
+ param_len,
+ sense_len,
+ cdb_len,
+ timeout);
+}
+
+
+/* XXX allow specification of address and PMI bit and LBA */
+void
+scsi_read_capacity(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action,
+ struct scsi_read_capacity_data *rcap_buf,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_read_capacity *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_IN,
+ tag_action,
+ /*data_ptr*/(u_int8_t *)rcap_buf,
+ /*dxfer_len*/sizeof(*rcap_buf),
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+ scsi_cmd = (struct scsi_read_capacity *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = READ_CAPACITY;
+}
+
+/*
+ * Prevent or allow the user to remove the media
+ */
+void
+scsi_prevent(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t action,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_prevent *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+ scsi_cmd = (struct scsi_prevent *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = PREVENT_ALLOW;
+ scsi_cmd->how = action;
+}
+
+/*
+ * Syncronize the media to the contents of the cache for
+ * the given lba/count pair. Specifying 0/0 means sync
+ * the whole cache.
+ */
+void
+scsi_synchronize_cache(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int32_t begin_lba,
+ u_int16_t lb_count, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ struct scsi_sync_cache *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+ scsi_cmd = (struct scsi_sync_cache *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = SYNCHRONIZE_CACHE;
+ scsi_ulto4b(begin_lba, scsi_cmd->begin_lba);
+ scsi_ulto2b(lb_count, scsi_cmd->lb_count);
+}
+
+/*
+ * Try make as good a match as possible with
+ * available sub drivers
+ */
+int
+scsi_inquiry_match(caddr_t inqbuffer, caddr_t table_entry)
+{
+ struct scsi_inquiry_pattern *entry;
+ struct scsi_inquiry_data *inq;
+
+ entry = (struct scsi_inquiry_pattern *)table_entry;
+ inq = (struct scsi_inquiry_data *)inqbuffer;
+
+ if (((SID_TYPE(inq) == entry->type)
+ || (entry->type == T_ANY))
+ && (SID_IS_REMOVABLE(inq) ? entry->media_type & SIP_MEDIA_REMOVABLE
+ : entry->media_type & SIP_MEDIA_FIXED)
+ && (cam_strmatch(inq->vendor, entry->vendor, sizeof(inq->vendor)) == 0)
+ && (cam_strmatch(inq->product, entry->product, sizeof(inq->product)) == 0)
+ && (cam_strmatch(inq->revision, entry->revision, sizeof(inq->revision)) == 0)) {
+ return (0);
+ }
+ return (-1);
+}
+
+/*
+ * Try make as good a match as possible with
+ * available sub drivers
+ */
+int
+scsi_static_inquiry_match(caddr_t inqbuffer, caddr_t table_entry)
+{
+ struct scsi_static_inquiry_pattern *entry;
+ struct scsi_inquiry_data *inq;
+
+ entry = (struct scsi_static_inquiry_pattern *)table_entry;
+ inq = (struct scsi_inquiry_data *)inqbuffer;
+
+ if (((SID_TYPE(inq) == entry->type)
+ || (entry->type == T_ANY))
+ && (SID_IS_REMOVABLE(inq) ? entry->media_type & SIP_MEDIA_REMOVABLE
+ : entry->media_type & SIP_MEDIA_FIXED)
+ && (cam_strmatch(inq->vendor, entry->vendor, sizeof(inq->vendor)) == 0)
+ && (cam_strmatch(inq->product, entry->product, sizeof(inq->product)) == 0)
+ && (cam_strmatch(inq->revision, entry->revision, sizeof(inq->revision)) == 0)) {
+ return (0);
+ }
+ return (-1);
+}
diff --git a/sys/cam/scsi/scsi_all.h b/sys/cam/scsi/scsi_all.h
new file mode 100644
index 0000000..b8920f5
--- /dev/null
+++ b/sys/cam/scsi/scsi_all.h
@@ -0,0 +1,814 @@
+/*
+ * Largely written by Julian Elischer (julian@tfs.com)
+ * for TRW Financial Systems.
+ *
+ * TRW Financial Systems, in accordance with their agreement with Carnegie
+ * Mellon University, makes this software available to CMU to distribute
+ * or use in any manner that they see fit as long as this message is kept with
+ * the software. For this reason TFS also grants any other persons or
+ * organisations permission to use or modify this software.
+ *
+ * TFS supplies this software to be publicly redistributed
+ * on the understanding that TFS is not responsible for the correct
+ * functioning of this software in any circumstances.
+ *
+ * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992
+ *
+ * $Id: scsi_all.h,v 1.13 1995/05/30 08:13:25 rgrimes Exp $
+ */
+
+/*
+ * SCSI general interface description
+ */
+
+#ifndef _SCSI_SCSI_ALL_H
+#define _SCSI_SCSI_ALL_H 1
+
+#include <sys/cdefs.h>
+
+/*
+ * SCSI command format
+ */
+
+/*
+ * Define dome bits that are in ALL (or a lot of) scsi commands
+ */
+#define SCSI_CTL_LINK 0x01
+#define SCSI_CTL_FLAG 0x02
+#define SCSI_CTL_VENDOR 0xC0
+#define SCSI_CMD_LUN 0xA0 /* these two should not be needed */
+#define SCSI_CMD_LUN_SHIFT 5 /* LUN in the cmd is no longer SCSI */
+
+#define SCSI_MAX_CDBLEN 16 /*
+ * 16 byte commands are in the
+ * SCSI-3 spec
+ */
+#if defined(CAM_MAX_CDBLEN) && (CAM_MAX_CDBLEN < SCSI_MAX_CDBLEN)
+#error "CAM_MAX_CDBLEN cannot be less than SCSI_MAX_CDBLEN"
+#endif
+
+/*
+ * This type defines actions to be taken when a particular sense code is
+ * received. Right now, these flags are only defined to take up 16 bits,
+ * but can be expanded in the future if necessary.
+ */
+typedef enum {
+ SS_NOP = 0x000000, /* Do nothing */
+ SS_RETRY = 0x010000, /* Retry the command */
+ SS_FAIL = 0x020000, /* Bail out */
+ SS_START = 0x030000, /* Send a Start Unit command to the device,
+ * then retry the original command.
+ */
+ SS_TUR = 0x040000, /* Send a Test Unit Ready command to the
+ * device, then retry the original command.
+ */
+ SS_MANUAL = 0x050000, /*
+ * This error must be handled manually,
+ * i.e. the code must look at the asc and
+ * ascq values and determine the proper
+ * course of action.
+ */
+ SS_TURSTART = 0x060000, /*
+ * Send a Test Unit Ready command to the
+ * device, and if that fails, send a start
+ * unit.
+ */
+ SS_MASK = 0xff0000
+} scsi_sense_action;
+
+typedef enum {
+ SSQ_NONE = 0x0000,
+ SSQ_DECREMENT_COUNT = 0x0100, /* Decrement the retry count */
+ SSQ_MANY = 0x0200, /* send lots of recovery commands */
+ SSQ_RANGE = 0x0400, /*
+ * Yes, this is a hack. Basically,
+ * if this flag is set then it
+ * represents an ascq range. The
+ * "correct" way to implement the
+ * ranges might be to add a special
+ * field to the sense code table,
+ * but that would take up a lot of
+ * additional space. This solution
+ * isn't as elegant, but is more
+ * space efficient.
+ */
+ SSQ_PRINT_SENSE = 0x0800,
+ SSQ_MASK = 0xff00
+} scsi_sense_action_qualifier;
+
+/* Mask for error status values */
+#define SS_ERRMASK 0xff
+
+/* The default error action */
+#define SS_DEF SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE|EIO
+
+/* Default error action, without an error return value */
+#define SS_NEDEF SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE
+
+/* Default error action, without sense printing or an error return value */
+#define SS_NEPDEF SS_RETRY|SSQ_DECREMENT_COUNT
+
+struct scsi_generic
+{
+ u_int8_t opcode;
+ u_int8_t bytes[11];
+};
+
+struct scsi_request_sense
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[2];
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_test_unit_ready
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[3];
+ u_int8_t control;
+};
+
+struct scsi_send_diag
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define SSD_UOL 0x01
+#define SSD_DOL 0x02
+#define SSD_SELFTEST 0x04
+#define SSD_PF 0x10
+ u_int8_t unused[1];
+ u_int8_t paramlen[2];
+ u_int8_t control;
+};
+
+struct scsi_sense
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[2];
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_inquiry
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define SI_EVPD 0x01
+ u_int8_t page_code;
+ u_int8_t reserved;
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_mode_sense_6
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define SMS_DBD 0x08
+ u_int8_t page;
+#define SMS_PAGE_CODE 0x3F
+#define SMS_VENDOR_SPECIFIC_PAGE 0x00
+#define SMS_DISCONNECT_RECONNECT_PAGE 0x02
+#define SMS_PERIPHERAL_DEVICE_PAGE 0x09
+#define SMS_CONTROL_MODE_PAGE 0x0A
+#define SMS_ALL_PAGES_PAGE 0x3F
+#define SMS_PAGE_CTRL_MASK 0xC0
+#define SMS_PAGE_CTRL_CURRENT 0x00
+#define SMS_PAGE_CTRL_CHANGEABLE 0x40
+#define SMS_PAGE_CTRL_DEFAULT 0x80
+#define SMS_PAGE_CTRL_SAVED 0xC0
+ u_int8_t unused;
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_mode_sense_10
+{
+ u_int8_t opcode;
+ u_int8_t byte2; /* same bits as small version */
+ u_int8_t page; /* same bits as small version */
+ u_int8_t unused[4];
+ u_int8_t length[2];
+ u_int8_t control;
+};
+
+struct scsi_mode_select_6
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define SMS_SP 0x01
+#define SMS_PF 0x10
+ u_int8_t unused[2];
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_mode_select_10
+{
+ u_int8_t opcode;
+ u_int8_t byte2; /* same bits as small version */
+ u_int8_t unused[5];
+ u_int8_t length[2];
+ u_int8_t control;
+};
+
+/*
+ * When sending a mode select to a tape drive, the medium type must be 0.
+ */
+struct scsi_mode_hdr_6
+{
+ u_int8_t datalen;
+ u_int8_t medium_type;
+ u_int8_t dev_specific;
+ u_int8_t block_descr_len;
+};
+
+struct scsi_mode_hdr_10
+{
+ u_int8_t datalen[2];
+ u_int8_t medium_type;
+ u_int8_t dev_specific;
+ u_int8_t reserved[2];
+ u_int8_t block_descr_len[2];
+};
+
+struct scsi_mode_block_descr
+{
+ u_int8_t density_code;
+ u_int8_t num_blocks[3];
+ u_int8_t reserved;
+ u_int8_t block_len[3];
+};
+
+struct scsi_control_page {
+ u_int8_t page_code;
+ u_int8_t page_length;
+ u_int8_t rlec;
+#define SCB_RLEC 0x01 /*Report Log Exception Cond*/
+ u_int8_t queue_flags;
+#define SCP_QUEUE_ALG_MASK 0xF0
+#define SCP_QUEUE_ALG_RESTRICTED 0x00
+#define SCP_QUEUE_ALG_UNRESTRICTED 0x10
+#define SCP_QUEUE_ERR 0x02 /*Queued I/O aborted for CACs*/
+#define SCP_QUEUE_DQUE 0x01 /*Queued I/O disabled*/
+ u_int8_t eca_and_aen;
+#define SCP_EECA 0x80 /*Enable Extended CA*/
+#define SCP_RAENP 0x04 /*Ready AEN Permission*/
+#define SCP_UAAENP 0x02 /*UA AEN Permission*/
+#define SCP_EAENP 0x01 /*Error AEN Permission*/
+ u_int8_t reserved;
+ u_int8_t aen_holdoff_period[2];
+};
+
+struct scsi_reserve
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[2];
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_release
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[2];
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_prevent
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[2];
+ u_int8_t how;
+ u_int8_t control;
+};
+#define PR_PREVENT 0x01
+#define PR_ALLOW 0x00
+
+struct scsi_sync_cache
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t begin_lba[4];
+ u_int8_t reserved;
+ u_int8_t lb_count[2];
+ u_int8_t control;
+};
+
+
+struct scsi_changedef
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused1;
+ u_int8_t how;
+ u_int8_t unused[4];
+ u_int8_t datalen;
+ u_int8_t control;
+};
+
+struct scsi_read_buffer
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define RWB_MODE 0x07
+#define RWB_MODE_HDR_DATA 0x00
+#define RWB_MODE_DATA 0x02
+#define RWB_MODE_DOWNLOAD 0x04
+#define RWB_MODE_DOWNLOAD_SAVE 0x05
+ u_int8_t buffer_id;
+ u_int8_t offset[3];
+ u_int8_t length[3];
+ u_int8_t control;
+};
+
+struct scsi_write_buffer
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t buffer_id;
+ u_int8_t offset[3];
+ u_int8_t length[3];
+ u_int8_t control;
+};
+
+#define SC_SCSI_1 0x01
+#define SC_SCSI_2 0x03
+
+/*
+ * Opcodes
+ */
+
+#define TEST_UNIT_READY 0x00
+#define REQUEST_SENSE 0x03
+#define INQUIRY 0x12
+#define MODE_SELECT_6 0x15
+#define MODE_SENSE_6 0x1a
+#define START_STOP 0x1b
+#define RESERVE 0x16
+#define RELEASE 0x17
+#define PREVENT_ALLOW 0x1e
+#define READ_CAPACITY 0x25
+#define POSITION_TO_ELEMENT 0x2b
+#define SYNCHRONIZE_CACHE 0x35
+#define WRITE_BUFFER 0x3b
+#define READ_BUFFER 0x3c
+#define CHANGE_DEFINITION 0x40
+#define MODE_SELECT_10 0x55
+#define MODE_SENSE_10 0x5A
+#define MOVE_MEDIUM 0xa5
+#define READ_ELEMENT_STATUS 0xb8
+
+
+/*
+ * Device Types
+ */
+#define T_DIRECT 0x00
+#define T_SEQUENTIAL 0x01
+#define T_PRINTER 0x02
+#define T_PROCESSOR 0x03
+#define T_WORM 0x04
+#define T_CDROM 0x05
+#define T_SCANNER 0x06
+#define T_OPTICAL 0x07
+#define T_CHANGER 0x08
+#define T_COMM 0x09
+#define T_ASC0 0x0a
+#define T_ASC1 0x0b
+#define T_STORARRAY 0x0c
+#define T_ENCLOSURE 0x0d
+#define T_NODEVICE 0x1F
+#define T_ANY 0xFF /* Used in Quirk table matches */
+
+#define T_REMOV 1
+#define T_FIXED 0
+
+struct scsi_inquiry_data
+{
+ u_int8_t device;
+#define SID_TYPE(inq_data) ((inq_data)->device & 0x1f)
+#define SID_QUAL(inq_data) (((inq_data)->device & 0xE0) >> 5)
+#define SID_QUAL_LU_CONNECTED 0x00 /* The specified peripheral device
+ * type is currently connected to
+ * logical unit. If the target cannot
+ * determine whether or not a physical
+ * device is currently connected, it
+ * shall also use this peripheral
+ * qualifier when returning the INQUIRY
+ * data. This peripheral qualifier
+ * does not mean that the device is
+ * ready for access by the initiator.
+ */
+#define SID_QUAL_LU_OFFLINE 0x01 /* The target is capable of supporting
+ * the specified peripheral device type
+ * on this logical unit; however, the
+ * physical device is not currently
+ * connected to this logical unit.
+ */
+#define SID_QUAL_RSVD 0x02
+#define SID_QUAL_BAD_LU 0x03 /* The target is not capable of
+ * supporting a physical device on
+ * this logical unit. For this
+ * peripheral qualifier the peripheral
+ * device type shall be set to 1Fh to
+ * provide compatibility with previous
+ * versions of SCSI. All other
+ * peripheral device type values are
+ * reserved for this peripheral
+ * qualifier.
+ */
+#define SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ((SID_QUAL(inq_data) & 0x08) != 0)
+ u_int8_t dev_qual2;
+#define SID_QUAL2 0x7F
+#define SID_IS_REMOVABLE(inq_data) (((inq_data)->dev_qual2 & 0x80) != 0)
+ u_int8_t version;
+#define SID_ANSI_REV(inq_data) ((inq_data)->version & 0x07)
+#define SID_ECMA 0x38
+#define SID_ISO 0xC0
+ u_int8_t response_format;
+#define SID_AENC 0x80
+#define SID_TrmIOP 0x40
+ u_int8_t additional_length;
+ u_int8_t reserved[2];
+ u_int8_t flags;
+#define SID_SftRe 0x01
+#define SID_CmdQue 0x02
+#define SID_Linked 0x08
+#define SID_Sync 0x10
+#define SID_WBus16 0x20
+#define SID_WBus32 0x40
+#define SID_RelAdr 0x80
+#define SID_VENDOR_SIZE 8
+ char vendor[SID_VENDOR_SIZE];
+#define SID_PRODUCT_SIZE 16
+ char product[SID_PRODUCT_SIZE];
+#define SID_REVISION_SIZE 4
+ char revision[SID_REVISION_SIZE];
+};
+
+struct scsi_vpd_unit_serial_number
+{
+ u_int8_t device;
+ u_int8_t page_code;
+#define SVPD_UNIT_SERIAL_NUMBER 0x80
+ u_int8_t reserved;
+ u_int8_t length; /* serial number length */
+#define SVPD_SERIAL_NUM_SIZE 251
+ char serial_num[SVPD_SERIAL_NUM_SIZE];
+};
+
+struct scsi_read_capacity
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t addr[4];
+ u_int8_t unused[3];
+ u_int8_t control;
+};
+
+struct scsi_read_capacity_data
+{
+ u_int8_t addr[4];
+ u_int8_t length[4];
+};
+
+struct scsi_sense_data
+{
+ u_int8_t error_code;
+#define SSD_ERRCODE 0x7F
+#define SSD_CURRENT_ERROR 0x70
+#define SSD_DEFERRED_ERROR 0x71
+#define SSD_ERRCODE_VALID 0x80
+ u_int8_t segment;
+ u_int8_t flags;
+#define SSD_KEY 0x0F
+#define SSD_KEY_NO_SENSE 0x00
+#define SSD_KEY_RECOVERED_ERROR 0x01
+#define SSD_KEY_NOT_READY 0x02
+#define SSD_KEY_MEDIUM_ERROR 0x03
+#define SSD_KEY_HARDWARE_ERROR 0x04
+#define SSD_KEY_ILLEGAL_REQUEST 0x05
+#define SSD_KEY_UNIT_ATTENTION 0x06
+#define SSD_KEY_DATA_PROTECT 0x07
+#define SSD_KEY_BLANK_CHECK 0x08
+#define SSD_KEY_Vendor_Specific 0x09
+#define SSD_KEY_COPY_ABORTED 0x0a
+#define SSD_KEY_ABORTED_COMMAND 0x0b
+#define SSD_KEY_EQUAL 0x0c
+#define SSD_KEY_VOLUME_OVERFLOW 0x0d
+#define SSD_KEY_MISCOMPARE 0x0e
+#define SSD_KEY_RESERVED 0x0f
+#define SSD_ILI 0x20
+#define SSD_EOM 0x40
+#define SSD_FILEMARK 0x80
+ u_int8_t info[4];
+ u_int8_t extra_len;
+ u_int8_t cmd_spec_info[4];
+ u_int8_t add_sense_code;
+ u_int8_t add_sense_code_qual;
+ u_int8_t fru;
+ u_int8_t sense_key_spec[3];
+#define SSD_SCS_VALID 0x80
+#define SSD_FIELDPTR_CMD 0x40
+#define SSD_BITPTR_VALID 0x08
+#define SSD_BITPTR_VALUE 0x07
+#define SSD_MIN_SIZE 18
+ u_int8_t extra_bytes[14];
+#define SSD_FULL_SIZE sizeof(struct scsi_sense_data)
+};
+
+struct scsi_mode_header_6
+{
+ u_int8_t data_length; /* Sense data length */
+ u_int8_t medium_type;
+ u_int8_t dev_spec;
+ u_int8_t blk_desc_len;
+};
+
+struct scsi_mode_header_10
+{
+ u_int8_t data_length[2];/* Sense data length */
+ u_int8_t medium_type;
+ u_int8_t dev_spec;
+ u_int8_t unused[2];
+ u_int8_t blk_desc_len[2];
+};
+
+struct scsi_mode_blk_desc
+{
+ u_int8_t density;
+ u_int8_t nblocks[3];
+ u_int8_t reserved;
+ u_int8_t blklen[3];
+};
+
+/*
+ * Status Byte
+ */
+#define SCSI_STATUS_OK 0x00
+#define SCSI_STATUS_CHECK_COND 0x02
+#define SCSI_STATUS_COND_MET 0x04
+#define SCSI_STATUS_BUSY 0x08
+#define SCSI_STATUS_INTERMED 0x10
+#define SCSI_STATUS_INTERMED_COND_MET 0x14
+#define SCSI_STATUS_RESERV_CONFLICT 0x18
+#define SCSI_STATUS_CMD_TERMINATED 0x22
+#define SCSI_STATUS_QUEUE_FULL 0x28
+
+struct scsi_inquiry_pattern {
+ u_int8_t type;
+ u_int8_t media_type;
+#define SIP_MEDIA_REMOVABLE 0x01
+#define SIP_MEDIA_FIXED 0x02
+ const char *vendor;
+ const char *product;
+ const char *revision;
+};
+
+struct scsi_static_inquiry_pattern {
+ u_int8_t type;
+ u_int8_t media_type;
+ char vendor[SID_VENDOR_SIZE+1];
+ char product[SID_PRODUCT_SIZE+1];
+ char revision[SID_REVISION_SIZE+1];
+};
+
+struct scsi_sense_quirk_entry {
+ struct scsi_inquiry_pattern inq_pat;
+ int num_ascs;
+ struct asc_table_entry *asc_info;
+};
+
+struct asc_table_entry {
+ u_int8_t asc;
+ u_int8_t ascq;
+ u_int32_t action;
+#if !defined(SCSI_NO_SENSE_STRINGS)
+ const char *desc;
+#endif
+};
+
+struct op_table_entry {
+ u_int8_t opcode;
+ u_int16_t opmask;
+ const char *desc;
+};
+
+struct scsi_op_quirk_entry {
+ struct scsi_inquiry_pattern inq_pat;
+ int num_ops;
+ struct op_table_entry *op_table;
+};
+
+
+struct ccb_scsiio;
+struct cam_periph;
+union ccb;
+#ifndef KERNEL
+struct cam_device;
+#endif
+
+extern const char *scsi_sense_key_text[];
+
+__BEGIN_DECLS
+const char * scsi_sense_desc(int asc, int ascq,
+ struct scsi_inquiry_data *inq_data);
+scsi_sense_action scsi_error_action(int asc, int ascq,
+ struct scsi_inquiry_data *inq_data);
+#ifdef KERNEL
+void scsi_sense_print(struct ccb_scsiio *csio);
+int scsi_interpret_sense(union ccb *ccb,
+ u_int32_t sense_flags,
+ u_int32_t *relsim_flags,
+ u_int32_t *reduction,
+ u_int32_t *timeout,
+ scsi_sense_action error_action);
+#else
+char * scsi_sense_string(struct cam_device *device,
+ struct ccb_scsiio *csio,
+ char *str, int str_len);
+void scsi_sense_print(struct cam_device *device,
+ struct ccb_scsiio *csio, FILE *ofile);
+int scsi_interpret_sense(struct cam_device *device,
+ union ccb *ccb,
+ u_int32_t sense_flags,
+ u_int32_t *relsim_flags,
+ u_int32_t *reduction,
+ u_int32_t *timeout,
+ scsi_sense_action error_action);
+#endif /* KERNEL */
+
+#define SF_RETRY_UA 0x01
+#define SF_NO_PRINT 0x02
+#define SF_QUIET_IR 0x04 /* Be quiet about Illegal Request reponses */
+
+
+const char * scsi_op_desc(u_int16_t opcode,
+ struct scsi_inquiry_data *inq_data);
+char * scsi_cdb_string(u_int8_t *cdb_ptr, char *cdb_string);
+
+void scsi_print_inquiry(struct scsi_inquiry_data *inq_data);
+
+u_int scsi_calc_syncsrate(u_int period_factor);
+u_int scsi_calc_syncparam(u_int period);
+
+void scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *),
+ u_int8_t tag_action,
+ u_int8_t sense_len, u_int32_t timeout);
+
+void scsi_request_sense(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *),
+ void *data_ptr, u_int8_t dxfer_len,
+ u_int8_t tag_action, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t *inq_buf,
+ u_int32_t inq_len, int evpd, u_int8_t page_code,
+ u_int8_t sense_len, u_int32_t timeout);
+
+void scsi_mode_sense(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *),
+ u_int8_t tag_action, int dbd,
+ u_int8_t page_code, u_int8_t page,
+ u_int8_t *param_buf, u_int32_t param_len,
+ u_int8_t sense_len, u_int32_t timeout);
+
+void scsi_mode_select(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *),
+ u_int8_t tag_action, int scsi_page_fmt,
+ int save_pages, u_int8_t *param_buf,
+ u_int32_t param_len, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_read_capacity(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *), u_int8_t tag_action,
+ struct scsi_read_capacity_data *rcap_buf,
+ u_int8_t sense_len, u_int32_t timeout);
+
+void scsi_prevent(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t action,
+ u_int8_t sense_len, u_int32_t timeout);
+
+void scsi_synchronize_cache(struct ccb_scsiio *csio,
+ u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *), u_int8_t tag_action,
+ u_int32_t begin_lba, u_int16_t lb_count,
+ u_int8_t sense_len, u_int32_t timeout);
+
+int scsi_inquiry_match(caddr_t inqbuffer, caddr_t table_entry);
+int scsi_static_inquiry_match(caddr_t inqbuffer,
+ caddr_t table_entry);
+
+static __inline void scsi_extract_sense(struct scsi_sense_data *sense,
+ int *error_code, int *sense_key,
+ int *asc, int *ascq);
+static __inline void scsi_ulto2b(u_int32_t val, u_int8_t *bytes);
+static __inline void scsi_ulto3b(u_int32_t val, u_int8_t *bytes);
+static __inline void scsi_ulto4b(u_int32_t val, u_int8_t *bytes);
+static __inline u_int32_t scsi_2btoul(u_int8_t *bytes);
+static __inline u_int32_t scsi_3btoul(u_int8_t *bytes);
+static __inline int32_t scsi_3btol(u_int8_t *bytes);
+static __inline u_int32_t scsi_4btoul(u_int8_t *bytes);
+
+
+static __inline void scsi_extract_sense(struct scsi_sense_data *sense,
+ int *error_code, int *sense_key,
+ int *asc, int *ascq)
+{
+ *error_code = sense->error_code & SSD_ERRCODE;
+ *sense_key = sense->flags & SSD_KEY;
+ *asc = (sense->extra_len >= 5) ? sense->add_sense_code : 0;
+ *ascq = (sense->extra_len >= 6) ? sense->add_sense_code_qual : 0;
+}
+
+static __inline void
+scsi_ulto2b(u_int32_t val, u_int8_t *bytes)
+{
+
+ bytes[0] = (val >> 8) & 0xff;
+ bytes[1] = val & 0xff;
+}
+
+static __inline void
+scsi_ulto3b(u_int32_t val, u_int8_t *bytes)
+{
+
+ bytes[0] = (val >> 16) & 0xff;
+ bytes[1] = (val >> 8) & 0xff;
+ bytes[2] = val & 0xff;
+}
+
+static __inline void
+scsi_ulto4b(u_int32_t val, u_int8_t *bytes)
+{
+
+ bytes[0] = (val >> 24) & 0xff;
+ bytes[1] = (val >> 16) & 0xff;
+ bytes[2] = (val >> 8) & 0xff;
+ bytes[3] = val & 0xff;
+}
+
+static __inline u_int32_t
+scsi_2btoul(u_int8_t *bytes)
+{
+ u_int32_t rv;
+
+ rv = (bytes[0] << 8) |
+ bytes[1];
+ return (rv);
+}
+
+static __inline u_int32_t
+scsi_3btoul(u_int8_t *bytes)
+{
+ u_int32_t rv;
+
+ rv = (bytes[0] << 16) |
+ (bytes[1] << 8) |
+ bytes[2];
+ return (rv);
+}
+
+static __inline int32_t
+scsi_3btol(u_int8_t *bytes)
+{
+ u_int32_t rc = scsi_3btoul(bytes);
+
+ if (rc & 0x00800000)
+ rc |= 0xff000000;
+
+ return (int32_t) rc;
+}
+
+static __inline u_int32_t
+scsi_4btoul(u_int8_t *bytes)
+{
+ u_int32_t rv;
+
+ rv = (bytes[0] << 24) |
+ (bytes[1] << 16) |
+ (bytes[2] << 8) |
+ bytes[3];
+ return (rv);
+}
+__END_DECLS
+
+#endif /*_SCSI_SCSI_ALL_H*/
diff --git a/sys/cam/scsi/scsi_cd.c b/sys/cam/scsi/scsi_cd.c
new file mode 100644
index 0000000..19a0e34
--- /dev/null
+++ b/sys/cam/scsi/scsi_cd.c
@@ -0,0 +1,3016 @@
+/*
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * Copyright (c) 1997, 1998 Kenneth D. Merry.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+/*
+ * Portions of this driver taken from the original FreeBSD cd driver.
+ * Written by Julian Elischer (julian@tfs.com)
+ * for TRW Financial Systems for use under the MACH(2.5) operating system.
+ *
+ * TRW Financial Systems, in accordance with their agreement with Carnegie
+ * Mellon University, makes this software available to CMU to distribute
+ * or use in any manner that they see fit as long as this message is kept with
+ * the software. For this reason TFS also grants any other persons or
+ * organisations permission to use or modify this software.
+ *
+ * TFS supplies this software to be publicly redistributed
+ * on the understanding that TFS is not responsible for the correct
+ * functioning of this software in any circumstances.
+ *
+ * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992
+ *
+ * from: cd.c,v 1.83 1997/05/04 15:24:22 joerg Exp $
+ */
+
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/buf.h>
+#include <sys/dkbad.h>
+#include <sys/disklabel.h>
+#include <sys/diskslice.h>
+#include <sys/malloc.h>
+#include <sys/fcntl.h>
+#include <sys/stat.h>
+#include <sys/conf.h>
+#include <sys/buf.h>
+#include <sys/cdio.h>
+#include <sys/errno.h>
+#include <sys/devicestat.h>
+#include <sys/sysctl.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_extend.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/cam_queue.h>
+#include <cam/cam_debug.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/scsi/scsi_cd.h>
+#include "opt_cd.h"
+
+#define ESUCCESS 0
+
+#define CDUNIT(DEV) ((minor(DEV)&0xF8) >> 3) /* 5 bit unit */
+#define CDSETUNIT(DEV, U) makedev(major(DEV), ((U) << 3))
+#define PARTITION(z) (minor(z) & 0x07)
+#define RAW_PART 2
+#define LEADOUT 0xaa /* leadout toc entry */
+
+struct cd_params {
+ u_int32_t blksize;
+ u_long disksize;
+};
+
+typedef enum {
+ CD_Q_NONE = 0x00,
+ CD_Q_NO_TOUCH = 0x01,
+ CD_Q_BCD_TRACKS = 0x02,
+ CD_Q_NO_CHANGER = 0x04,
+ CD_Q_CHANGER = 0x08
+} cd_quirks;
+
+typedef enum {
+ CD_FLAG_INVALID = 0x001,
+ CD_FLAG_NEW_DISC = 0x002,
+ CD_FLAG_DISC_LOCKED = 0x004,
+ CD_FLAG_DISC_REMOVABLE = 0x008,
+ CD_FLAG_TAGGED_QUEUING = 0x010,
+ CD_FLAG_OPEN = 0x020,
+ CD_FLAG_CHANGER = 0x040,
+ CD_FLAG_ACTIVE = 0x080,
+ CD_FLAG_SCHED_ON_COMP = 0x100,
+ CD_FLAG_RETRY_UA = 0x200
+} cd_flags;
+
+typedef enum {
+ CD_CCB_PROBE = 0x01,
+ CD_CCB_BUFFER_IO = 0x02,
+ CD_CCB_WAITING = 0x03,
+ CD_CCB_TYPE_MASK = 0x0F,
+ CD_CCB_RETRY_UA = 0x10
+} cd_ccb_state;
+
+typedef enum {
+ CHANGER_TIMEOUT_SCHED = 0x01,
+ CHANGER_SHORT_TMOUT_SCHED = 0x02,
+ CHANGER_MANUAL_CALL = 0x04,
+ CHANGER_NEED_TIMEOUT = 0x08
+} cd_changer_flags;
+
+#define ccb_state ppriv_field0
+#define ccb_bp ppriv_ptr1
+
+typedef enum {
+ CD_STATE_PROBE,
+ CD_STATE_NORMAL
+} cd_state;
+
+struct cd_softc {
+ cam_pinfo pinfo;
+ cd_state state;
+ cd_flags flags;
+ struct buf_queue_head buf_queue;
+ LIST_HEAD(, ccb_hdr) pending_ccbs;
+ struct cd_params params;
+ struct disklabel disklabel;
+ struct diskslices *cd_slices;
+ union ccb saved_ccb;
+ cd_quirks quirks;
+ struct devstat device_stats;
+ STAILQ_ENTRY(cd_softc) changer_links;
+ struct cdchanger *changer;
+ int bufs_left;
+ struct cam_periph *periph;
+#ifdef DEVFS
+ void *ra_devfs_token;
+ void *rc_devfs_token;
+ void *a_devfs_token;
+ void *c_devfs_token;
+ void *ctl_devfs_token;
+#endif
+};
+
+struct cd_quirk_entry {
+ struct scsi_inquiry_pattern inq_pat;
+ cd_quirks quirks;
+};
+
+/*
+ * These quirk entries aren't strictly necessary. Basically, what they do
+ * is tell cdregister() up front that a device is a changer. Otherwise, it
+ * will figure that fact out once it sees a LUN on the device that is
+ * greater than 0. If it is known up front that a device is a changer, all
+ * I/O to the device will go through the changer scheduling routines, as
+ * opposed to the "normal" CD code.
+ */
+static struct cd_quirk_entry cd_quirk_table[] =
+{
+ {
+ { T_CDROM, SIP_MEDIA_REMOVABLE, "NRC", "MBR-7", "*"},
+ /*quirks*/ CD_Q_CHANGER
+ },
+ {
+ { T_CDROM, SIP_MEDIA_REMOVABLE, "PIONEER", "CD-ROM DRM-604X",
+ "*"}, /* quirks */ CD_Q_CHANGER
+ }
+};
+
+#ifndef MIN
+#define MIN(x,y) ((x<y) ? x : y)
+#endif
+
+#define CD_CDEV_MAJOR 15
+#define CD_BDEV_MAJOR 6
+
+static d_open_t cdopen;
+static d_read_t cdread;
+static d_close_t cdclose;
+static d_ioctl_t cdioctl;
+static d_strategy_t cdstrategy;
+
+static periph_init_t cdinit;
+static periph_ctor_t cdregister;
+static periph_dtor_t cdcleanup;
+static periph_start_t cdstart;
+static void cdasync(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg);
+static void cdshorttimeout(void *arg);
+static void cdschedule(struct cam_periph *periph, int priority);
+static void cdrunchangerqueue(void *arg);
+static void cdchangerschedule(struct cd_softc *softc);
+static int cdrunccb(union ccb *ccb,
+ int (*error_routine)(union ccb *ccb,
+ u_int32_t cam_flags,
+ u_int32_t sense_flags),
+ u_int32_t cam_flags, u_int32_t sense_flags);
+union ccb *cdgetccb(struct cam_periph *periph,
+ u_int32_t priority);
+static void cddone(struct cam_periph *periph,
+ union ccb *start_ccb);
+static int cderror(union ccb *ccb, u_int32_t cam_flags,
+ u_int32_t sense_flags);
+static void cdprevent(struct cam_periph *periph, int action);
+static int cdsize(dev_t dev, u_int32_t *size);
+static int cdgetdisklabel (dev_t dev);
+static int cdreadtoc(struct cam_periph *periph, u_int32_t mode,
+ u_int32_t start, struct cd_toc_entry *data,
+ u_int32_t len);
+static int cdgetmode(struct cam_periph *periph,
+ struct cd_mode_data *data, u_int32_t page);
+static int cdsetmode(struct cam_periph *periph,
+ struct cd_mode_data *data);
+static int cdplay(struct cam_periph *periph, u_int32_t blk,
+ u_int32_t len);
+static int cdreadsubchannel(struct cam_periph *periph,
+ u_int32_t mode, u_int32_t format,
+ int track,
+ struct cd_sub_channel_info *data,
+ u_int32_t len);
+static int cdplaymsf(struct cam_periph *periph, u_int32_t startm,
+ u_int32_t starts, u_int32_t startf,
+ u_int32_t endm, u_int32_t ends,
+ u_int32_t endf);
+static int cdplaytracks(struct cam_periph *periph,
+ u_int32_t strack, u_int32_t sindex,
+ u_int32_t etrack, u_int32_t eindex);
+static int cdpause(struct cam_periph *periph, u_int32_t go);
+static int cdstopunit(struct cam_periph *periph, u_int32_t eject);
+static int cdstartunit(struct cam_periph *periph);
+
+static struct periph_driver cddriver =
+{
+ cdinit, "cd",
+ TAILQ_HEAD_INITIALIZER(cddriver.units), /* generation */ 0
+};
+
+DATA_SET(periphdriver_set, cddriver);
+
+/* For 2.2-stable support */
+#ifndef D_DISK
+#define D_DISK 0
+#endif
+static struct cdevsw cd_cdevsw =
+{
+ /*d_open*/ cdopen,
+ /*d_close*/ cdclose,
+ /*d_read*/ cdread,
+ /*d_write*/ nowrite,
+ /*d_ioctl*/ cdioctl,
+ /*d_stop*/ nostop,
+ /*d_reset*/ noreset,
+ /*d_devtotty*/ nodevtotty,
+ /*d_poll*/ seltrue,
+ /*d_mmap*/ nommap,
+ /*d_strategy*/ cdstrategy,
+ /*d_name*/ "cd",
+ /*d_spare*/ NULL,
+ /*d_maj*/ -1,
+ /*d_dump*/ nodump,
+ /*d_psize*/ nopsize,
+ /*d_flags*/ D_DISK,
+ /*d_maxio*/ 0,
+ /*b_maj*/ -1
+};
+
+static struct extend_array *cdperiphs;
+static int num_changers;
+
+#ifndef CHANGER_MIN_BUSY_SECONDS
+#define CHANGER_MIN_BUSY_SECONDS 2
+#endif
+#ifndef CHANGER_MAX_BUSY_SECONDS
+#define CHANGER_MAX_BUSY_SECONDS 10
+#endif
+
+static int changer_min_busy_seconds = CHANGER_MIN_BUSY_SECONDS;
+static int changer_max_busy_seconds = CHANGER_MAX_BUSY_SECONDS;
+
+/*
+ * XXX KDM this CAM node should be moved if we ever get more CAM sysctl
+ * variables.
+ */
+SYSCTL_NODE(_kern, OID_AUTO, cam, CTLFLAG_RD, 0, "CAM Subsystem");
+SYSCTL_NODE(_kern_cam, OID_AUTO, cd, CTLFLAG_RD, 0, "CAM CDROM driver");
+SYSCTL_NODE(_kern_cam_cd, OID_AUTO, changer, CTLFLAG_RD, 0, "CD Changer");
+SYSCTL_INT(_kern_cam_cd_changer, OID_AUTO, min_busy_seconds, CTLFLAG_RW,
+ &changer_min_busy_seconds, 0, "Minimum changer scheduling quantum");
+SYSCTL_INT(_kern_cam_cd_changer, OID_AUTO, max_busy_seconds, CTLFLAG_RW,
+ &changer_max_busy_seconds, 0, "Maximum changer scheduling quantum");
+
+struct cdchanger {
+ path_id_t path_id;
+ target_id_t target_id;
+ int num_devices;
+ struct camq devq;
+ struct timeval start_time;
+ struct cd_softc *cur_device;
+ struct callout_handle short_handle;
+ struct callout_handle long_handle;
+ cd_changer_flags flags;
+ STAILQ_ENTRY(cdchanger) changer_links;
+ STAILQ_HEAD(chdevlist, cd_softc) chluns;
+};
+
+STAILQ_HEAD(changerlist, cdchanger) changerq;
+
+void
+cdinit(void)
+{
+ cam_status status;
+ struct cam_path *path;
+
+ /*
+ * Create our extend array for storing the devices we attach to.
+ */
+ cdperiphs = cam_extend_new();
+ if (cdperiphs == NULL) {
+ printf("cd: Failed to alloc extend array!\n");
+ return;
+ }
+
+ /*
+ * Install a global async callback. This callback will
+ * receive async callbacks like "new device found".
+ */
+ status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
+
+ if (status == CAM_REQ_CMP) {
+ struct ccb_setasync csa;
+
+ xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_FOUND_DEVICE;
+ csa.callback = cdasync;
+ csa.callback_arg = NULL;
+ xpt_action((union ccb *)&csa);
+ status = csa.ccb_h.status;
+ xpt_free_path(path);
+ }
+
+ if (status != CAM_REQ_CMP) {
+ printf("cd: Failed to attach master async callback "
+ "due to status 0x%x!\n", status);
+ } else {
+ /* If we were successfull, register our devsw */
+ cdevsw_add_generic(CD_BDEV_MAJOR, CD_CDEV_MAJOR, &cd_cdevsw);
+ }
+}
+
+static void
+cdcleanup(struct cam_periph *periph)
+{
+ struct cd_softc *softc;
+
+ softc = (struct cd_softc *)periph->softc;
+
+ xpt_print_path(periph->path);
+ printf("removing device entry\n");
+ /*
+ * In the queued, non-active case, the device in question
+ * has already been removed from the changer run queue. Since this
+ * device is active, we need to de-activate it, and schedule
+ * another device to run. (if there is another one to run)
+ */
+ if ((softc->flags & CD_FLAG_CHANGER)
+ && (softc->flags & CD_FLAG_ACTIVE)) {
+
+ /*
+ * The purpose of the short timeout is soley to determine
+ * whether the current device has finished or not. Well,
+ * since we're removing the active device, we know that it
+ * is finished. So, get rid of the short timeout.
+ * Otherwise, if we're in the time period before the short
+ * timeout fires, and there are no other devices in the
+ * queue to run, there won't be any other device put in the
+ * active slot. i.e., when we call cdrunchangerqueue()
+ * below, it won't do anything. Then, when the short
+ * timeout fires, it'll look at the "current device", which
+ * we are free below, and possibly panic the kernel on a
+ * bogus pointer reference.
+ *
+ * The long timeout doesn't really matter, since we
+ * decrement the qfrozen_cnt to indicate that there is
+ * nothing in the active slot now. Therefore, there won't
+ * be any bogus pointer references there.
+ */
+ if (softc->changer->flags & CHANGER_SHORT_TMOUT_SCHED) {
+ untimeout(cdshorttimeout, softc->changer,
+ softc->changer->short_handle);
+ softc->changer->flags &= ~CHANGER_SHORT_TMOUT_SCHED;
+ }
+ softc->changer->devq.qfrozen_cnt--;
+ softc->changer->flags |= CHANGER_MANUAL_CALL;
+ cdrunchangerqueue(softc->changer);
+ }
+
+ /*
+ * If we're removing the last device on the changer, go ahead and
+ * remove the changer device structure.
+ */
+ if ((softc->flags & CD_FLAG_CHANGER)
+ && (--softc->changer->num_devices == 0)) {
+
+ /*
+ * Theoretically, there shouldn't be any timeouts left, but
+ * I'm not completely sure that that will be the case. So,
+ * it won't hurt to check and see if there are any left.
+ */
+ if (softc->changer->flags & CHANGER_TIMEOUT_SCHED) {
+ untimeout(cdrunchangerqueue, softc->changer,
+ softc->changer->long_handle);
+ softc->changer->flags &= ~CHANGER_TIMEOUT_SCHED;
+ }
+
+ if (softc->changer->flags & CHANGER_SHORT_TMOUT_SCHED) {
+ untimeout(cdshorttimeout, softc->changer,
+ softc->changer->short_handle);
+ softc->changer->flags &= ~CHANGER_SHORT_TMOUT_SCHED;
+ }
+
+ STAILQ_REMOVE(&changerq, softc->changer, cdchanger,
+ changer_links);
+ xpt_print_path(periph->path);
+ printf("removing changer entry\n");
+ free(softc->changer, M_DEVBUF);
+ num_changers--;
+ }
+ cam_extend_release(cdperiphs, periph->unit_number);
+ free(periph->softc, M_DEVBUF);
+}
+
+static void
+cdasync(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg)
+{
+ struct cam_periph *periph;
+
+ periph = (struct cam_periph *)callback_arg;
+ switch (code) {
+ case AC_FOUND_DEVICE:
+ {
+ struct ccb_getdev *cgd;
+ cam_status status;
+
+ cgd = (struct ccb_getdev *)arg;
+
+ if ((cgd->pd_type != T_CDROM) && (cgd->pd_type != T_WORM))
+ break;
+
+ /*
+ * Allocate a peripheral instance for
+ * this device and start the probe
+ * process.
+ */
+ status = cam_periph_alloc(cdregister, cdcleanup, cdstart,
+ "cd", CAM_PERIPH_BIO, cgd->ccb_h.path,
+ cdasync, AC_FOUND_DEVICE, cgd);
+
+ if (status != CAM_REQ_CMP
+ && status != CAM_REQ_INPROG)
+ printf("cdasync: Unable to attach new device "
+ "due to status 0x%x\n", status);
+
+ break;
+ }
+ case AC_LOST_DEVICE:
+ {
+ int s;
+ struct cd_softc *softc;
+ struct buf *q_bp;
+ struct ccb_setasync csa;
+
+ softc = (struct cd_softc *)periph->softc;
+
+ /*
+ * Insure that no other async callbacks that
+ * might affect this peripheral can come through.
+ */
+ s = splcam();
+
+ /*
+ * De-register any async callbacks.
+ */
+ xpt_setup_ccb(&csa.ccb_h, periph->path,
+ /* priority */ 5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = 0;
+ csa.callback = cdasync;
+ csa.callback_arg = periph;
+ xpt_action((union ccb *)&csa);
+
+ softc->flags |= CD_FLAG_INVALID;
+
+ /*
+ * Return all queued I/O with ENXIO.
+ * XXX Handle any transactions queued to the card
+ * with XPT_ABORT_CCB.
+ */
+ while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){
+ bufq_remove(&softc->buf_queue, q_bp);
+ q_bp->b_resid = q_bp->b_bcount;
+ q_bp->b_error = ENXIO;
+ q_bp->b_flags |= B_ERROR;
+ biodone(q_bp);
+ }
+
+ /*
+ * If this device is part of a changer, and it was scheduled
+ * to run, remove it from the run queue since we just nuked
+ * all of its scheduled I/O.
+ */
+ if ((softc->flags & CD_FLAG_CHANGER)
+ && (softc->pinfo.index != CAM_UNQUEUED_INDEX))
+ camq_remove(&softc->changer->devq, softc->pinfo.index);
+
+ devstat_remove_entry(&softc->device_stats);
+
+ xpt_print_path(periph->path);
+ printf("lost device\n");
+
+ splx(s);
+
+ cam_periph_invalidate(periph);
+ break;
+ }
+ case AC_SENT_BDR:
+ case AC_BUS_RESET:
+ {
+ struct cd_softc *softc;
+ struct ccb_hdr *ccbh;
+ int s;
+
+ softc = (struct cd_softc *)periph->softc;
+ s = splsoftcam();
+ /*
+ * Don't fail on the expected unit attention
+ * that will occur.
+ */
+ softc->flags |= CD_FLAG_RETRY_UA;
+ for (ccbh = LIST_FIRST(&softc->pending_ccbs);
+ ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le))
+ ccbh->ccb_state |= CD_CCB_RETRY_UA;
+ splx(s);
+ break;
+ }
+ case AC_TRANSFER_NEG:
+ case AC_SCSI_AEN:
+ case AC_UNSOL_RESEL:
+ default:
+ break;
+ }
+}
+
+static cam_status
+cdregister(struct cam_periph *periph, void *arg)
+{
+ int s;
+ struct cd_softc *softc;
+ struct ccb_setasync csa;
+ struct ccb_getdev *cgd;
+ caddr_t match;
+
+ cgd = (struct ccb_getdev *)arg;
+ if (periph == NULL) {
+ printf("cdregister: periph was NULL!!\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+ if (cgd == NULL) {
+ printf("cdregister: no getdev CCB, can't register device\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ softc = (struct cd_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
+
+ if (softc == NULL) {
+ printf("cdregister: Unable to probe new device. "
+ "Unable to allocate softc\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ bzero(softc, sizeof(*softc));
+ LIST_INIT(&softc->pending_ccbs);
+ softc->state = CD_STATE_PROBE;
+ bufq_init(&softc->buf_queue);
+ if (SID_IS_REMOVABLE(&cgd->inq_data))
+ softc->flags |= CD_FLAG_DISC_REMOVABLE;
+ if ((cgd->inq_data.flags & SID_CmdQue) != 0)
+ softc->flags |= CD_FLAG_TAGGED_QUEUING;
+
+ periph->softc = softc;
+ softc->periph = periph;
+
+ cam_extend_set(cdperiphs, periph->unit_number, periph);
+
+ /*
+ * See if this device has any quirks.
+ */
+ match = cam_quirkmatch((caddr_t)&cgd->inq_data,
+ (caddr_t)cd_quirk_table,
+ sizeof(cd_quirk_table)/sizeof(*cd_quirk_table),
+ sizeof(*cd_quirk_table), scsi_inquiry_match);
+
+ if (match != NULL)
+ softc->quirks = ((struct cd_quirk_entry *)match)->quirks;
+ else
+ softc->quirks = CD_Q_NONE;
+
+ /*
+ * We need to register the statistics structure for this device,
+ * but we don't have the blocksize yet for it. So, we register
+ * the structure and indicate that we don't have the blocksize
+ * yet. Unlike other SCSI peripheral drivers, we explicitly set
+ * the device type here to be CDROM, rather than just ORing in
+ * cgd->pd_type. This is because this driver can attach to either
+ * CDROM or WORM devices, and we want this peripheral driver to
+ * show up in the devstat list as a CD peripheral driver, not a
+ * WORM peripheral driver. WORM drives will also have the WORM
+ * driver attached to them.
+ */
+ devstat_add_entry(&softc->device_stats, "cd",
+ periph->unit_number, 0,
+ DEVSTAT_BS_UNAVAILABLE,
+ DEVSTAT_TYPE_CDROM | DEVSTAT_TYPE_IF_SCSI);
+
+ /*
+ * Add an async callback so that we get
+ * notified if this device goes away.
+ */
+ xpt_setup_ccb(&csa.ccb_h, periph->path,
+ /* priority */ 5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE;
+ csa.callback = cdasync;
+ csa.callback_arg = periph;
+ xpt_action((union ccb *)&csa);
+
+ /*
+ * If the target lun is greater than 0, we most likely have a CD
+ * changer device. Check the quirk entries as well, though, just
+ * in case someone has a CD tower with one lun per drive or
+ * something like that. Also, if we know up front that a
+ * particular device is a changer, we can mark it as such starting
+ * with lun 0, instead of lun 1. It shouldn't be necessary to have
+ * a quirk entry to define something as a changer, however.
+ */
+ if (((cgd->ccb_h.target_lun > 0)
+ && ((softc->quirks & CD_Q_NO_CHANGER) == 0))
+ || ((softc->quirks & CD_Q_CHANGER) != 0)) {
+ struct cdchanger *nchanger;
+ struct cam_periph *nperiph;
+ struct cam_path *path;
+ cam_status status;
+ int found;
+
+ /* Set the changer flag in the current device's softc */
+ softc->flags |= CD_FLAG_CHANGER;
+
+ if (num_changers == 0)
+ STAILQ_INIT(&changerq);
+
+ /*
+ * Now, look around for an existing changer device with the
+ * same path and target ID as the current device.
+ */
+ for (found = 0,
+ nchanger = (struct cdchanger *)STAILQ_FIRST(&changerq);
+ nchanger != NULL;
+ nchanger = STAILQ_NEXT(nchanger, changer_links)){
+ if ((nchanger->path_id == cgd->ccb_h.path_id)
+ && (nchanger->target_id == cgd->ccb_h.target_id)) {
+ found = 1;
+ break;
+ }
+ }
+
+ /*
+ * If we found a matching entry, just add this device to
+ * the list of devices on this changer.
+ */
+ if (found == 1) {
+ struct chdevlist *chlunhead;
+
+ chlunhead = &nchanger->chluns;
+
+ /*
+ * XXX KDM look at consolidating this code with the
+ * code below in a separate function.
+ */
+
+ /*
+ * Create a path with lun id 0, and see if we can
+ * find a matching device
+ */
+ status = xpt_create_path(&path, /*periph*/ periph,
+ cgd->ccb_h.path_id,
+ cgd->ccb_h.target_id, 0);
+
+ if ((status == CAM_REQ_CMP)
+ && ((nperiph = cam_periph_find(path, "cd")) != NULL)){
+ struct cd_softc *nsoftc;
+
+ nsoftc = (struct cd_softc *)nperiph->softc;
+
+ if ((nsoftc->flags & CD_FLAG_CHANGER) == 0){
+ nsoftc->flags |= CD_FLAG_CHANGER;
+ nchanger->num_devices++;
+ if (camq_resize(&nchanger->devq,
+ nchanger->num_devices)!=CAM_REQ_CMP){
+ printf("cdregister: "
+ "camq_resize "
+ "failed, changer "
+ "support may "
+ "be messed up\n");
+ }
+ nsoftc->changer = nchanger;
+ nsoftc->pinfo.index =CAM_UNQUEUED_INDEX;
+
+ STAILQ_INSERT_TAIL(&nchanger->chluns,
+ nsoftc,changer_links);
+ }
+ } else if (status == CAM_REQ_CMP)
+ xpt_free_path(path);
+ else {
+ printf("cdregister: unable to allocate path\n"
+ "cdregister: changer support may be "
+ "broken\n");
+ }
+
+ nchanger->num_devices++;
+
+ softc->changer = nchanger;
+ softc->pinfo.index = CAM_UNQUEUED_INDEX;
+
+ if (camq_resize(&nchanger->devq,
+ nchanger->num_devices) != CAM_REQ_CMP) {
+ printf("cdregister: camq_resize "
+ "failed, changer support may "
+ "be messed up\n");
+ }
+
+ STAILQ_INSERT_TAIL(chlunhead, softc, changer_links);
+ }
+ /*
+ * In this case, we don't already have an entry for this
+ * particular changer, so we need to create one, add it to
+ * the queue, and queue this device on the list for this
+ * changer. Before we queue this device, however, we need
+ * to search for lun id 0 on this target, and add it to the
+ * queue first, if it exists. (and if it hasn't already
+ * been marked as part of the changer.)
+ */
+ else {
+ nchanger = malloc(sizeof(struct cdchanger),
+ M_DEVBUF, M_NOWAIT);
+
+ if (nchanger == NULL) {
+ softc->flags &= ~CD_FLAG_CHANGER;
+ printf("cdregister: unable to malloc "
+ "changer structure\ncdregister: "
+ "changer support disabled\n");
+
+ /*
+ * Yes, gotos can be gross but in this case
+ * I think it's justified..
+ */
+ goto cdregisterexit;
+ }
+
+ /* zero the structure */
+ bzero(nchanger, sizeof(struct cdchanger));
+
+ if (camq_init(&nchanger->devq, 1) != 0) {
+ softc->flags &= ~CD_FLAG_CHANGER;
+ printf("cdregister: changer support "
+ "disabled\n");
+ goto cdregisterexit;
+ }
+
+ num_changers++;
+
+ nchanger->path_id = cgd->ccb_h.path_id;
+ nchanger->target_id = cgd->ccb_h.target_id;
+
+ /* this is superfluous, but it makes things clearer */
+ nchanger->num_devices = 0;
+
+ STAILQ_INIT(&nchanger->chluns);
+
+ STAILQ_INSERT_TAIL(&changerq, nchanger,
+ changer_links);
+
+ /*
+ * Create a path with lun id 0, and see if we can
+ * find a matching device
+ */
+ status = xpt_create_path(&path, /*periph*/ periph,
+ cgd->ccb_h.path_id,
+ cgd->ccb_h.target_id, 0);
+
+ /*
+ * If we were able to allocate the path, and if we
+ * find a matching device and it isn't already
+ * marked as part of a changer, then we add it to
+ * the current changer.
+ */
+ if ((status == CAM_REQ_CMP)
+ && ((nperiph = cam_periph_find(path, "cd")) != NULL)
+ && ((((struct cd_softc *)periph->softc)->flags &
+ CD_FLAG_CHANGER) == 0)) {
+ struct cd_softc *nsoftc;
+
+ nsoftc = (struct cd_softc *)nperiph->softc;
+
+ nsoftc->flags |= CD_FLAG_CHANGER;
+ nchanger->num_devices++;
+ if (camq_resize(&nchanger->devq,
+ nchanger->num_devices) != CAM_REQ_CMP) {
+ printf("cdregister: camq_resize "
+ "failed, changer support may "
+ "be messed up\n");
+ }
+ nsoftc->changer = nchanger;
+ nsoftc->pinfo.index = CAM_UNQUEUED_INDEX;
+
+ STAILQ_INSERT_TAIL(&nchanger->chluns,
+ nsoftc, changer_links);
+ } else if (status == CAM_REQ_CMP)
+ xpt_free_path(path);
+ else {
+ printf("cdregister: unable to allocate path\n"
+ "cdregister: changer support may be "
+ "broken\n");
+ }
+
+ softc->changer = nchanger;
+ softc->pinfo.index = CAM_UNQUEUED_INDEX;
+ nchanger->num_devices++;
+ if (camq_resize(&nchanger->devq,
+ nchanger->num_devices) != CAM_REQ_CMP) {
+ printf("cdregister: camq_resize "
+ "failed, changer support may "
+ "be messed up\n");
+ }
+ STAILQ_INSERT_TAIL(&nchanger->chluns, softc,
+ changer_links);
+ }
+ }
+
+cdregisterexit:
+
+ /* Lock this peripheral until we are setup */
+ /* Can't block */
+ cam_periph_lock(periph, PRIBIO);
+
+ if ((softc->flags & CD_FLAG_CHANGER) == 0)
+ xpt_schedule(periph, /*priority*/5);
+ else
+ cdschedule(periph, /*priority*/ 5);
+
+ return(CAM_REQ_CMP);
+}
+
+static int
+cdopen(dev_t dev, int flags, int fmt, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct cd_softc *softc;
+ u_int32_t size;
+ int unit, error;
+
+ error = 0; /* default to no error */
+
+ unit = dkunit(dev);
+ periph = cam_extend_get(cdperiphs, unit);
+
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct cd_softc *)periph->softc;
+
+ if (softc->flags & CD_FLAG_INVALID)
+ return(ENXIO);
+
+ if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0)
+ return (error);
+
+ if ((softc->flags & CD_FLAG_OPEN) == 0) {
+ if (cam_periph_acquire(periph) != CAM_REQ_CMP)
+ return(ENXIO);
+ softc->flags |= CD_FLAG_OPEN;
+ }
+
+ /* lock the cd */
+ cdprevent(periph, PR_PREVENT);
+
+ /* find out the size */
+ if ((error = cdsize(dev, &size)) != 0) {
+ cdprevent(periph, PR_ALLOW);
+ softc->flags &= ~CD_FLAG_OPEN;
+ cam_periph_unlock(periph);
+ cam_periph_release(periph);
+ return(error);
+ }
+
+ /* get a disklabel */
+ if ((error = cdgetdisklabel(dev)) != 0) {
+ printf("error getting disklabel\n");
+ cdprevent(periph, PR_ALLOW);
+ softc->flags &= ~CD_FLAG_OPEN;
+ cam_periph_unlock(periph);
+ cam_periph_release(periph);
+ return(error);
+ }
+
+ if (error == 0) {
+ /* Initialize slice tables. */
+ error = dsopen("cd", dev, fmt, DSO_NOLABELS | DSO_ONESLICE,
+ &softc->cd_slices, &softc->disklabel,
+ cdstrategy, (ds_setgeom_t *)NULL, &cd_cdevsw);
+
+ /*
+ * We unconditionally (re)set the blocksize each time the
+ * CD device is opened. This is because the CD can change,
+ * and therefore the blocksize might change.
+ */
+ if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0)
+ softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
+ softc->device_stats.block_size = softc->params.blksize;
+ } else {
+ if ((dsisopen(softc->cd_slices) == 0)
+ && ((softc->flags & CD_FLAG_DISC_REMOVABLE) != 0))
+ cdprevent(periph, PR_ALLOW);
+ }
+
+ cam_periph_unlock(periph);
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("leaving cdopen\n"));
+
+ return (error);
+}
+
+static int
+cdclose(dev_t dev, int flag, int fmt, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct cd_softc *softc;
+ int unit, error;
+
+ unit = dkunit(dev);
+ periph = cam_extend_get(cdperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct cd_softc *)periph->softc;
+
+ if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
+ return (error);
+
+ dsclose(dev, fmt, softc->cd_slices);
+
+ if ((softc->flags & CD_FLAG_DISC_REMOVABLE) != 0)
+ cdprevent(periph, PR_ALLOW);
+
+ /*
+ * Since we're closing this CD, mark the blocksize as unavailable.
+ * It will be marked as available whence the CD is opened again.
+ */
+ softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
+
+ softc->flags &= ~CD_FLAG_OPEN;
+ cam_periph_unlock(periph);
+ cam_periph_release(periph);
+
+ return (0);
+}
+
+static int
+cdread(dev_t dev, struct uio *uio, int ioflag)
+{
+ return(physio(cdstrategy, NULL, dev, 1, minphys, uio));
+}
+
+static void
+cdshorttimeout(void *arg)
+{
+ struct cdchanger *changer;
+ int s;
+
+ s = splsoftcam();
+
+ changer = (struct cdchanger *)arg;
+
+ /* Always clear the short timeout flag, since that's what we're in */
+ changer->flags &= ~CHANGER_SHORT_TMOUT_SCHED;
+
+ /*
+ * Check to see if there is any more pending or outstanding I/O for
+ * this device. If not, move it out of the active slot.
+ */
+ if ((bufq_first(&changer->cur_device->buf_queue) == NULL)
+ && (changer->cur_device->device_stats.busy_count == 0)) {
+ changer->flags |= CHANGER_MANUAL_CALL;
+ cdrunchangerqueue(changer);
+ }
+
+ splx(s);
+}
+
+/*
+ * This is a wrapper for xpt_schedule. It only applies to changers.
+ */
+static void
+cdschedule(struct cam_periph *periph, int priority)
+{
+ struct cd_softc *softc;
+ int s;
+
+ s = splsoftcam();
+
+ softc = (struct cd_softc *)periph->softc;
+
+ /*
+ * If this device isn't currently queued, and if it isn't
+ * the active device, then we queue this device and run the
+ * changer queue if there is no timeout scheduled to do it.
+ * If this device is the active device, just schedule it
+ * to run again. If this device is queued, there should be
+ * a timeout in place already that will make sure it runs.
+ */
+ if ((softc->pinfo.index == CAM_UNQUEUED_INDEX)
+ && ((softc->flags & CD_FLAG_ACTIVE) == 0)) {
+ /*
+ * We don't do anything with the priority here.
+ * This is strictly a fifo queue.
+ */
+ softc->pinfo.priority = 1;
+ if (softc->changer->devq.generation++ == 0)
+ camq_regen(&softc->changer->devq);
+ softc->pinfo.generation =
+ softc->changer->devq.generation;
+ camq_insert(&softc->changer->devq, (cam_pinfo *)softc);
+
+ /*
+ * Since we just put a device in the changer queue,
+ * check and see if there is a timeout scheduled for
+ * this changer. If so, let the timeout handle
+ * switching this device into the active slot. If
+ * not, manually call the timeout routine to
+ * bootstrap things.
+ */
+ if (((softc->changer->flags & CHANGER_TIMEOUT_SCHED)==0)
+ &&((softc->changer->flags & CHANGER_NEED_TIMEOUT)==0)){
+ softc->changer->flags |= CHANGER_MANUAL_CALL;
+ cdrunchangerqueue(softc->changer);
+ }
+ } else if ((softc->flags & CD_FLAG_ACTIVE)
+ && ((softc->flags & CD_FLAG_SCHED_ON_COMP) == 0))
+ xpt_schedule(periph, priority);
+
+ splx(s);
+
+}
+
+static void
+cdrunchangerqueue(void *arg)
+{
+ struct timeval cur_time, busy_time;
+ struct cd_softc *softc;
+ struct cdchanger *changer;
+ int called_from_timeout;
+ int s;
+
+ s = splsoftcam();
+
+ changer = (struct cdchanger *)arg;
+
+ /*
+ * If we have NOT been called from cdstrategy() or cddone(), and
+ * instead from a timeout routine, go ahead and clear the
+ * timeout flag.
+ */
+ if ((changer->flags & CHANGER_MANUAL_CALL) == 0) {
+ changer->flags &= ~CHANGER_TIMEOUT_SCHED;
+ called_from_timeout = 1;
+ } else
+ called_from_timeout = 0;
+
+ /* Always clear the manual call flag */
+ changer->flags &= ~CHANGER_MANUAL_CALL;
+
+ /* nothing to do if the queue is empty */
+ if (changer->devq.entries <= 0) {
+ splx(s);
+ return;
+ }
+
+ /*
+ * If the changer queue is frozen, that means we have an active
+ * device.
+ */
+ if (changer->devq.qfrozen_cnt > 0) {
+
+ if (changer->cur_device->device_stats.busy_count > 0) {
+ changer->cur_device->flags |= CD_FLAG_SCHED_ON_COMP;
+ changer->cur_device->bufs_left =
+ changer->cur_device->device_stats.busy_count;
+ if (called_from_timeout) {
+ changer->long_handle =
+ timeout(cdrunchangerqueue, changer,
+ changer_max_busy_seconds * hz);
+ changer->flags |= CHANGER_TIMEOUT_SCHED;
+ }
+ splx(s);
+ return;
+ }
+
+ /*
+ * We always need to reset the frozen count and clear the
+ * active flag.
+ */
+ changer->devq.qfrozen_cnt--;
+ changer->cur_device->flags &= ~CD_FLAG_ACTIVE;
+ changer->cur_device->flags &= ~CD_FLAG_SCHED_ON_COMP;
+
+ /*
+ * Check to see whether the current device has any I/O left
+ * to do. If so, requeue it at the end of the queue. If
+ * not, there is no need to requeue it.
+ */
+ if (bufq_first(&changer->cur_device->buf_queue) != NULL) {
+
+ if (changer->devq.generation++ == 0)
+ camq_regen(&changer->devq);
+
+ changer->cur_device->pinfo.generation =
+ changer->devq.generation;
+ camq_insert(&changer->devq,
+ (cam_pinfo *)changer->cur_device);
+ }
+ }
+
+ softc = (struct cd_softc *)camq_remove(&changer->devq, 0);
+
+ changer->cur_device = softc;
+
+ changer->devq.qfrozen_cnt++;
+ softc->flags |= CD_FLAG_ACTIVE;
+
+ /* Just in case this device is waiting */
+ wakeup(&softc->changer);
+ xpt_schedule(softc->periph, /*priority*/ 1);
+
+ /*
+ * Get rid of any pending timeouts, and set a flag to schedule new
+ * ones so this device gets its full time quantum.
+ */
+ if (changer->flags & CHANGER_TIMEOUT_SCHED) {
+ untimeout(cdrunchangerqueue, changer, changer->long_handle);
+ changer->flags &= ~CHANGER_TIMEOUT_SCHED;
+ }
+
+ if (changer->flags & CHANGER_SHORT_TMOUT_SCHED) {
+ untimeout(cdshorttimeout, changer, changer->short_handle);
+ changer->flags &= ~CHANGER_SHORT_TMOUT_SCHED;
+ }
+
+ /*
+ * We need to schedule timeouts, but we only do this after the
+ * first transaction has completed. This eliminates the changer
+ * switch time.
+ */
+ changer->flags |= CHANGER_NEED_TIMEOUT;
+
+ splx(s);
+}
+
+static void
+cdchangerschedule(struct cd_softc *softc)
+{
+ struct cdchanger *changer;
+ int s;
+
+ s = splsoftcam();
+
+ changer = softc->changer;
+
+ /*
+ * If this is a changer, and this is the current device,
+ * and this device has at least the minimum time quantum to
+ * run, see if we can switch it out.
+ */
+ if ((softc->flags & CD_FLAG_ACTIVE)
+ && ((changer->flags & CHANGER_SHORT_TMOUT_SCHED) == 0)
+ && ((changer->flags & CHANGER_NEED_TIMEOUT) == 0)) {
+ /*
+ * We try three things here. The first is that we
+ * check to see whether the schedule on completion
+ * flag is set. If it is, we decrement the number
+ * of buffers left, and if it's zero, we reschedule.
+ * Next, we check to see whether the pending buffer
+ * queue is empty and whether there are no
+ * outstanding transactions. If so, we reschedule.
+ * Next, we see if the pending buffer queue is empty.
+ * If it is, we set the number of buffers left to
+ * the current active buffer count and set the
+ * schedule on complete flag.
+ */
+ if (softc->flags & CD_FLAG_SCHED_ON_COMP) {
+ if (--softc->bufs_left == 0) {
+ softc->changer->flags |=
+ CHANGER_MANUAL_CALL;
+ softc->flags &= ~CD_FLAG_SCHED_ON_COMP;
+ cdrunchangerqueue(softc->changer);
+ }
+ } else if ((bufq_first(&softc->buf_queue) == NULL)
+ && (softc->device_stats.busy_count == 0)) {
+ softc->changer->flags |= CHANGER_MANUAL_CALL;
+ cdrunchangerqueue(softc->changer);
+ }
+ } else if ((softc->changer->flags & CHANGER_NEED_TIMEOUT)
+ && (softc->flags & CD_FLAG_ACTIVE)) {
+
+ /*
+ * Now that the first transaction to this
+ * particular device has completed, we can go ahead
+ * and schedule our timeouts.
+ */
+ if ((changer->flags & CHANGER_TIMEOUT_SCHED) == 0) {
+ changer->long_handle =
+ timeout(cdrunchangerqueue, changer,
+ changer_max_busy_seconds * hz);
+ changer->flags |= CHANGER_TIMEOUT_SCHED;
+ } else
+ printf("cdchangerschedule: already have a long"
+ " timeout!\n");
+
+ if ((changer->flags & CHANGER_SHORT_TMOUT_SCHED) == 0) {
+ changer->short_handle =
+ timeout(cdshorttimeout, changer,
+ changer_min_busy_seconds * hz);
+ changer->flags |= CHANGER_SHORT_TMOUT_SCHED;
+ } else
+ printf("cdchangerschedule: already have a short "
+ "timeout!\n");
+
+ /*
+ * We just scheduled timeouts, no need to schedule
+ * more.
+ */
+ changer->flags &= ~CHANGER_NEED_TIMEOUT;
+
+ }
+ splx(s);
+}
+
+static int
+cdrunccb(union ccb *ccb, int (*error_routine)(union ccb *ccb,
+ u_int32_t cam_flags,
+ u_int32_t sense_flags),
+ u_int32_t cam_flags, u_int32_t sense_flags)
+{
+ struct cd_softc *softc;
+ struct cam_periph *periph;
+ int error;
+
+ periph = xpt_path_periph(ccb->ccb_h.path);
+ softc = (struct cd_softc *)periph->softc;
+
+ error = cam_periph_runccb(ccb, error_routine, cam_flags, sense_flags,
+ &softc->device_stats);
+
+ if (softc->flags & CD_FLAG_CHANGER)
+ cdchangerschedule(softc);
+
+ return(error);
+}
+
+union ccb *
+cdgetccb(struct cam_periph *periph, u_int32_t priority)
+{
+ struct cd_softc *softc;
+ int s;
+
+ softc = (struct cd_softc *)periph->softc;
+
+ if (softc->flags & CD_FLAG_CHANGER) {
+
+ s = splsoftcam();
+
+ /*
+ * This should work the first time this device is woken up,
+ * but just in case it doesn't, we use a while loop.
+ */
+ while ((((volatile cd_flags)softc->flags) & CD_FLAG_ACTIVE)==0){
+ /*
+ * If this changer isn't already queued, queue it up.
+ */
+ if (softc->pinfo.index == CAM_UNQUEUED_INDEX) {
+ softc->pinfo.priority = 1;
+ if (softc->changer->devq.generation++ == 0)
+ camq_regen(&softc->changer->devq);
+ softc->pinfo.generation =
+ softc->changer->devq.generation;
+ camq_insert(&softc->changer->devq,
+ (cam_pinfo *)softc);
+ }
+ if (((((volatile cd_changer_flags)softc->changer->flags)
+ & CHANGER_TIMEOUT_SCHED)==0)
+ &&((((volatile cd_changer_flags)softc->changer->flags)
+ & CHANGER_NEED_TIMEOUT)==0)){
+ softc->changer->flags |= CHANGER_MANUAL_CALL;
+ cdrunchangerqueue(softc->changer);
+ } else
+ tsleep(&softc->changer, PRIBIO, "cgticb", 0);
+ }
+ splx(s);
+ }
+ return(cam_periph_getccb(periph, priority));
+}
+
+
+/*
+ * Actually translate the requested transfer into one the physical driver
+ * can understand. The transfer is described by a buf and will include
+ * only one physical transfer.
+ */
+static void
+cdstrategy(struct buf *bp)
+{
+ struct cam_periph *periph;
+ struct cd_softc *softc;
+ u_int unit, part;
+ int s;
+
+ unit = dkunit(bp->b_dev);
+ part = dkpart(bp->b_dev);
+ periph = cam_extend_get(cdperiphs, unit);
+ if (periph == NULL) {
+ bp->b_error = ENXIO;
+ goto bad;
+ }
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdstrategy\n"));
+
+ softc = (struct cd_softc *)periph->softc;
+
+ /*
+ * Do bounds checking, adjust transfer, and set b_pbklno.
+ */
+ if (dscheck(bp, softc->cd_slices) <= 0)
+ goto done;
+
+ /*
+ * Mask interrupts so that the pack cannot be invalidated until
+ * after we are in the queue. Otherwise, we might not properly
+ * clean up one of the buffers.
+ */
+ s = splbio();
+
+ /*
+ * If the device has been made invalid, error out
+ */
+ if ((softc->flags & CD_FLAG_INVALID)) {
+ splx(s);
+ bp->b_error = ENXIO;
+ goto bad;
+ }
+
+ /*
+ * Place it in the queue of disk activities for this disk
+ */
+ bufqdisksort(&softc->buf_queue, bp);
+
+ splx(s);
+
+ /*
+ * Schedule ourselves for performing the work. We do things
+ * differently for changers.
+ */
+ if ((softc->flags & CD_FLAG_CHANGER) == 0)
+ xpt_schedule(periph, /* XXX priority */1);
+ else
+ cdschedule(periph, /* priority */ 1);
+
+ return;
+bad:
+ bp->b_flags |= B_ERROR;
+done:
+ /*
+ * Correctly set the buf to indicate a completed xfer
+ */
+ bp->b_resid = bp->b_bcount;
+ biodone(bp);
+ return;
+}
+
+static void
+cdstart(struct cam_periph *periph, union ccb *start_ccb)
+{
+ struct cd_softc *softc;
+ struct buf *bp;
+ struct ccb_scsiio *csio;
+ struct scsi_read_capacity_data *rcap;
+ struct partition *p;
+ u_int32_t blkno, nblk;
+ int s;
+
+ softc = (struct cd_softc *)periph->softc;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdstart\n"));
+
+ switch (softc->state) {
+ case CD_STATE_NORMAL:
+ {
+ int oldspl;
+
+ s = splbio();
+ bp = bufq_first(&softc->buf_queue);
+ if (periph->immediate_priority <= periph->pinfo.priority) {
+ start_ccb->ccb_h.ccb_state = CD_CCB_WAITING;
+
+ SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
+ periph_links.sle);
+ periph->immediate_priority = CAM_PRIORITY_NONE;
+ splx(s);
+ wakeup(&periph->ccb_list);
+ } else if (bp == NULL) {
+ splx(s);
+ xpt_release_ccb(start_ccb);
+ } else {
+ bufq_remove(&softc->buf_queue, bp);
+
+ devstat_start_transaction(&softc->device_stats);
+
+ scsi_read_write(&start_ccb->csio,
+ /*retries*/4,
+ /* cbfcnp */ cddone,
+ (bp->b_flags & B_ORDERED) != 0 ?
+ MSG_ORDERED_Q_TAG :
+ MSG_SIMPLE_Q_TAG,
+ /* read */bp->b_flags & B_READ,
+ /* byte2 */ 0,
+ /* minimum_cmd_size */ 10,
+ /* lba */ bp->b_pblkno,
+ bp->b_bcount / softc->params.blksize,
+ /* data_ptr */ bp->b_data,
+ /* dxfer_len */ bp->b_bcount,
+ /* sense_len */ SSD_FULL_SIZE,
+ /* timeout */ 30000);
+ start_ccb->ccb_h.ccb_state = CD_CCB_BUFFER_IO;
+
+
+ /*
+ * Block out any asyncronous callbacks
+ * while we touch the pending ccb list.
+ */
+ oldspl = splcam();
+ LIST_INSERT_HEAD(&softc->pending_ccbs,
+ &start_ccb->ccb_h, periph_links.le);
+ splx(oldspl);
+
+ /* We expect a unit attention from this device */
+ if ((softc->flags & CD_FLAG_RETRY_UA) != 0) {
+ start_ccb->ccb_h.ccb_state |= CD_CCB_RETRY_UA;
+ softc->flags &= ~CD_FLAG_RETRY_UA;
+ }
+
+ start_ccb->ccb_h.ccb_bp = bp;
+ bp = bufq_first(&softc->buf_queue);
+ splx(s);
+
+ xpt_action(start_ccb);
+ }
+ if (bp != NULL) {
+ /* Have more work to do, so ensure we stay scheduled */
+ xpt_schedule(periph, /* XXX priority */1);
+ }
+ break;
+ }
+ case CD_STATE_PROBE:
+ {
+
+ rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
+ M_TEMP,
+ M_NOWAIT);
+ if (rcap == NULL) {
+ xpt_print_path(periph->path);
+ printf("cdstart: Couldn't malloc read_capacity data\n");
+ /* cd_free_periph??? */
+ break;
+ }
+ csio = &start_ccb->csio;
+ scsi_read_capacity(csio,
+ /*retries*/1,
+ cddone,
+ MSG_SIMPLE_Q_TAG,
+ rcap,
+ SSD_FULL_SIZE,
+ /*timeout*/20000);
+ start_ccb->ccb_h.ccb_bp = NULL;
+ start_ccb->ccb_h.ccb_state = CD_CCB_PROBE;
+ xpt_action(start_ccb);
+ break;
+ }
+ }
+}
+
+static void
+cddone(struct cam_periph *periph, union ccb *done_ccb)
+{
+ struct cd_softc *softc;
+ struct ccb_scsiio *csio;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cddone\n"));
+
+ softc = (struct cd_softc *)periph->softc;
+ csio = &done_ccb->csio;
+
+ switch (csio->ccb_h.ccb_state & CD_CCB_TYPE_MASK) {
+ case CD_CCB_BUFFER_IO:
+ {
+ struct buf *bp;
+ int error;
+ int oldspl;
+
+ bp = (struct buf *)done_ccb->ccb_h.ccb_bp;
+ error = 0;
+
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ int sf;
+
+ if ((done_ccb->ccb_h.ccb_state & CD_CCB_RETRY_UA) != 0)
+ sf = SF_RETRY_UA;
+ else
+ sf = 0;
+
+ if ((error = cderror(done_ccb, 0, sf)) == ERESTART) {
+ /*
+ * A retry was scheuled, so
+ * just return.
+ */
+ return;
+ }
+ }
+
+ if (error != 0) {
+ int s;
+ struct buf *q_bp;
+
+ xpt_print_path(periph->path);
+ printf("cddone: got error %#x back\n", error);
+ s = splbio();
+ while ((q_bp = bufq_first(&softc->buf_queue)) != NULL) {
+ bufq_remove(&softc->buf_queue, q_bp);
+ q_bp->b_resid = q_bp->b_bcount;
+ q_bp->b_error = EIO;
+ q_bp->b_flags |= B_ERROR;
+ biodone(q_bp);
+ }
+ splx(s);
+ bp->b_resid = bp->b_bcount;
+ bp->b_error = error;
+ bp->b_flags |= B_ERROR;
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+
+ } else {
+ bp->b_resid = csio->resid;
+ bp->b_error = 0;
+ if (bp->b_resid != 0) {
+ /* Short transfer ??? */
+ bp->b_flags |= B_ERROR;
+ }
+ }
+
+ /*
+ * Block out any asyncronous callbacks
+ * while we touch the pending ccb list.
+ */
+ oldspl = splcam();
+ LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
+ splx(oldspl);
+
+ devstat_end_transaction(&softc->device_stats,
+ bp->b_bcount - bp->b_resid,
+ done_ccb->csio.tag_action & 0xf,
+ (bp->b_flags & B_READ) ? DEVSTAT_READ
+ : DEVSTAT_WRITE);
+
+ if (softc->flags & CD_FLAG_CHANGER)
+ cdchangerschedule(softc);
+
+ biodone(bp);
+ break;
+ }
+ case CD_CCB_PROBE:
+ {
+ struct scsi_read_capacity_data *rdcap;
+ char announce_buf[120]; /*
+ * Currently (9/30/97) the
+ * longest possible announce
+ * buffer is 108 bytes, for the
+ * first error case below.
+ * That is 39 bytes for the
+ * basic string, 16 bytes for the
+ * biggest sense key (hardware
+ * error), 52 bytes for the
+ * text of the largest sense
+ * qualifier valid for a CDROM,
+ * (0x72, 0x03 or 0x04,
+ * 0x03), and one byte for the
+ * null terminating character.
+ * To allow for longer strings,
+ * the announce buffer is 120
+ * bytes.
+ */
+ struct cd_params *cdp;
+
+ cdp = &softc->params;
+
+ rdcap = (struct scsi_read_capacity_data *)csio->data_ptr;
+
+ cdp->disksize = scsi_4btoul (rdcap->addr) + 1;
+ cdp->blksize = scsi_4btoul (rdcap->length);
+
+ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+
+ sprintf(announce_buf,
+ "cd present [%ld x %d byte records]",
+ cdp->disksize, cdp->blksize);
+
+ } else {
+ int error;
+ /*
+ * Retry any UNIT ATTENTION type errors. They
+ * are expected at boot.
+ */
+ error = cderror(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART) {
+ /*
+ * A retry was scheuled, so
+ * just return.
+ */
+ return;
+ } else if (error != 0) {
+
+ struct scsi_sense_data *sense;
+ int asc, ascq;
+ int sense_key, error_code;
+ int have_sense;
+ cam_status status;
+ struct ccb_getdev cgd;
+
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+
+ status = done_ccb->ccb_h.status;
+
+ xpt_setup_ccb(&cgd.ccb_h,
+ done_ccb->ccb_h.path,
+ /* priority */ 1);
+ cgd.ccb_h.func_code = XPT_GDEV_TYPE;
+ xpt_action((union ccb *)&cgd);
+
+ if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
+ || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
+ || ((status & CAM_AUTOSNS_VALID) == 0))
+ have_sense = FALSE;
+ else
+ have_sense = TRUE;
+
+ if (have_sense) {
+ sense = &csio->sense_data;
+ scsi_extract_sense(sense, &error_code,
+ &sense_key,
+ &asc, &ascq);
+ }
+ /*
+ * With CDROM devices, we expect 0x3a
+ * (Medium not present) errors, since not
+ * everyone leaves a CD in the drive. If
+ * the error is anything else, though, we
+ * shouldn't attach.
+ */
+ if ((have_sense) && (asc == 0x3a)
+ && (error_code == SSD_CURRENT_ERROR))
+ sprintf(announce_buf,
+ "Attempt to query device "
+ "size failed: %s, %s",
+ scsi_sense_key_text[sense_key],
+ scsi_sense_desc(asc,ascq,
+ &cgd.inq_data));
+ else if (cgd.pd_type == T_CDROM) {
+ /*
+ * We only print out an error for
+ * CDROM type devices. For WORM
+ * devices, we don't print out an
+ * error since a few WORM devices
+ * don't support CDROM commands.
+ * If we have sense information, go
+ * ahead and print it out.
+ * Otherwise, just say that we
+ * couldn't attach.
+ */
+ if ((have_sense) && (asc || ascq)
+ && (error_code == SSD_CURRENT_ERROR))
+ sprintf(announce_buf,
+ "fatal error: %s, %s "
+ "-- failed to attach "
+ "to device",
+ scsi_sense_key_text[sense_key],
+ scsi_sense_desc(asc,ascq,
+ &cgd.inq_data));
+ else
+ sprintf(announce_buf,
+ "fatal error, failed"
+ " to attach to device");
+
+ /*
+ * Just print out the error, not
+ * the full probe message, when we
+ * don't attach.
+ */
+ printf("%s%d: %s\n",
+ periph->periph_name,
+ periph->unit_number,
+ announce_buf);
+ scsi_sense_print(&done_ccb->csio);
+
+ /*
+ * Free up resources.
+ */
+ cam_extend_release(cdperiphs,
+ periph->unit_number);
+ cam_periph_invalidate(periph);
+ periph = NULL;
+ } else {
+ /*
+ * Free up resources.
+ */
+ cam_extend_release(cdperiphs,
+ periph->unit_number);
+ cam_periph_invalidate(periph);
+ periph = NULL;
+ }
+ }
+ }
+ free(rdcap, M_TEMP);
+ if (periph != NULL) {
+ xpt_announce_periph(periph, announce_buf);
+ softc->state = CD_STATE_NORMAL;
+ cam_periph_unlock(periph);
+ }
+
+ if (softc->flags & CD_FLAG_CHANGER)
+ cdchangerschedule(softc);
+ break;
+ }
+ case CD_CCB_WAITING:
+ {
+ /* Caller will release the CCB */
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
+ ("trying to wakeup ccbwait\n"));
+
+ wakeup(&done_ccb->ccb_h.cbfcnp);
+ return;
+ }
+ }
+ xpt_release_ccb(done_ccb);
+}
+
+static int
+cdioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
+{
+
+ struct cam_periph *periph;
+ struct cd_softc *softc;
+ u_int8_t unit, part;
+ int error;
+
+ unit = dkunit(dev);
+ part = dkpart(dev);
+
+ periph = cam_extend_get(cdperiphs, unit);
+ if (periph == NULL)
+ return(ENXIO);
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdioctl\n"));
+
+ softc = (struct cd_softc *)periph->softc;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
+ ("trying to do ioctl %#x\n", cmd));
+
+ error = 0;
+
+ switch (cmd) {
+
+ case CDIOCPLAYTRACKS:
+ {
+ struct ioc_play_track *args
+ = (struct ioc_play_track *) addr;
+ struct cd_mode_data *data;
+
+ data = malloc(sizeof(struct cd_mode_data), M_TEMP,
+ M_WAITOK);
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOCPLAYTRACKS\n"));
+
+ error = cdgetmode(periph, data, AUDIO_PAGE);
+ if (error) {
+ free(data, M_TEMP);
+ break;
+ }
+ data->page.audio.flags &= ~CD_PA_SOTC;
+ data->page.audio.flags |= CD_PA_IMMED;
+ error = cdsetmode(periph, data);
+ free(data, M_TEMP);
+ if (error)
+ break;
+ if (softc->quirks & CD_Q_BCD_TRACKS) {
+ args->start_track = bin2bcd(args->start_track);
+ args->end_track = bin2bcd(args->end_track);
+ }
+ error = cdplaytracks(periph,
+ args->start_track,
+ args->start_index,
+ args->end_track,
+ args->end_index);
+ }
+ break;
+ case CDIOCPLAYMSF:
+ {
+ struct ioc_play_msf *args
+ = (struct ioc_play_msf *) addr;
+ struct cd_mode_data *data;
+
+ data = malloc(sizeof(struct cd_mode_data), M_TEMP,
+ M_WAITOK);
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOCPLAYMSF\n"));
+
+ error = cdgetmode(periph, data, AUDIO_PAGE);
+ if (error) {
+ free(data, M_TEMP);
+ break;
+ }
+ data->page.audio.flags &= ~CD_PA_SOTC;
+ data->page.audio.flags |= CD_PA_IMMED;
+ error = cdsetmode(periph, data);
+ free(data, M_TEMP);
+ if (error)
+ break;
+ error = cdplaymsf(periph,
+ args->start_m,
+ args->start_s,
+ args->start_f,
+ args->end_m,
+ args->end_s,
+ args->end_f);
+ }
+ break;
+ case CDIOCPLAYBLOCKS:
+ {
+ struct ioc_play_blocks *args
+ = (struct ioc_play_blocks *) addr;
+ struct cd_mode_data *data;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOCPLAYBLOCKS\n"));
+
+ data = malloc(sizeof(struct cd_mode_data), M_TEMP,
+ M_WAITOK);
+
+ error = cdgetmode(periph, data, AUDIO_PAGE);
+ if (error) {
+ free(data, M_TEMP);
+ break;
+ }
+ data->page.audio.flags &= ~CD_PA_SOTC;
+ data->page.audio.flags |= CD_PA_IMMED;
+ error = cdsetmode(periph, data);
+ free(data, M_TEMP);
+ if (error)
+ break;
+ error = cdplay(periph, args->blk, args->len);
+ }
+ break;
+ case CDIOCREADSUBCHANNEL:
+ {
+ struct ioc_read_subchannel *args
+ = (struct ioc_read_subchannel *) addr;
+ struct cd_sub_channel_info *data;
+ u_int32_t len = args->data_len;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOCREADSUBCHANNEL\n"));
+
+ data = malloc(sizeof(struct cd_sub_channel_info),
+ M_TEMP, M_WAITOK);
+
+ if ((len > sizeof(struct cd_sub_channel_info)) ||
+ (len < sizeof(struct cd_sub_channel_header))) {
+ printf(
+ "scsi_cd: cdioctl: "
+ "cdioreadsubchannel: error, len=%d\n",
+ len);
+ error = EINVAL;
+ free(data, M_TEMP);
+ break;
+ }
+
+ if (softc->quirks & CD_Q_BCD_TRACKS)
+ args->track = bin2bcd(args->track);
+
+ error = cdreadsubchannel(periph, args->address_format,
+ args->data_format, args->track, data, len);
+
+ if (error) {
+ free(data, M_TEMP);
+ break;
+ }
+ if (softc->quirks & CD_Q_BCD_TRACKS)
+ data->what.track_info.track_number =
+ bcd2bin(data->what.track_info.track_number);
+ len = min(len, ((data->header.data_len[0] << 8) +
+ data->header.data_len[1] +
+ sizeof(struct cd_sub_channel_header)));
+ if (copyout(data, args->data, len) != 0) {
+ error = EFAULT;
+ }
+ free(data, M_TEMP);
+ }
+ break;
+
+ case CDIOREADTOCHEADER:
+ {
+ struct ioc_toc_header *th;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOREADTOCHEADER\n"));
+
+ th = malloc(sizeof(struct ioc_toc_header), M_TEMP,
+ M_WAITOK);
+ error = cdreadtoc(periph, 0, 0,
+ (struct cd_toc_entry *)th,
+ sizeof (*th));
+ if (error) {
+ free(th, M_TEMP);
+ break;
+ }
+ if (softc->quirks & CD_Q_BCD_TRACKS) {
+ /* we are going to have to convert the BCD
+ * encoding on the cd to what is expected
+ */
+ th->starting_track =
+ bcd2bin(th->starting_track);
+ th->ending_track = bcd2bin(th->ending_track);
+ }
+ NTOHS(th->len);
+ bcopy(th, addr, sizeof(*th));
+ free(th, M_TEMP);
+ }
+ break;
+ case CDIOREADTOCENTRYS:
+ {
+ typedef struct {
+ struct ioc_toc_header header;
+ struct cd_toc_entry entries[100];
+ } data_t;
+ typedef struct {
+ struct ioc_toc_header header;
+ struct cd_toc_entry entry;
+ } lead_t;
+
+ data_t *data;
+ lead_t *lead;
+ struct ioc_read_toc_entry *te =
+ (struct ioc_read_toc_entry *) addr;
+ struct ioc_toc_header *th;
+ u_int32_t len, readlen, idx, num;
+ u_int32_t starting_track = te->starting_track;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOREADTOCENTRYS\n"));
+
+ data = malloc(sizeof(data_t), M_TEMP, M_WAITOK);
+ lead = malloc(sizeof(lead_t), M_TEMP, M_WAITOK);
+
+ if (te->data_len < sizeof(struct cd_toc_entry)
+ || (te->data_len % sizeof(struct cd_toc_entry)) != 0
+ || (te->address_format != CD_MSF_FORMAT
+ && te->address_format != CD_LBA_FORMAT)) {
+ error = EINVAL;
+ printf("scsi_cd: error in readtocentries, "
+ "returning EINVAL\n");
+ free(data, M_TEMP);
+ free(lead, M_TEMP);
+ break;
+ }
+
+ th = &data->header;
+ error = cdreadtoc(periph, 0, 0,
+ (struct cd_toc_entry *)th,
+ sizeof (*th));
+ if (error) {
+ free(data, M_TEMP);
+ free(lead, M_TEMP);
+ break;
+ }
+
+ if (softc->quirks & CD_Q_BCD_TRACKS) {
+ /* we are going to have to convert the BCD
+ * encoding on the cd to what is expected
+ */
+ th->starting_track =
+ bcd2bin(th->starting_track);
+ th->ending_track = bcd2bin(th->ending_track);
+ }
+
+ if (starting_track == 0)
+ starting_track = th->starting_track;
+ else if (starting_track == LEADOUT)
+ starting_track = th->ending_track + 1;
+ else if (starting_track < th->starting_track ||
+ starting_track > th->ending_track + 1) {
+ printf("scsi_cd: error in readtocentries, "
+ "returning EINVAL\n");
+ free(data, M_TEMP);
+ free(lead, M_TEMP);
+ error = EINVAL;
+ break;
+ }
+
+ /* calculate reading length without leadout entry */
+ readlen = (th->ending_track - starting_track + 1) *
+ sizeof(struct cd_toc_entry);
+
+ /* and with leadout entry */
+ len = readlen + sizeof(struct cd_toc_entry);
+ if (te->data_len < len) {
+ len = te->data_len;
+ if (readlen > len)
+ readlen = len;
+ }
+ if (len > sizeof(data->entries)) {
+ printf("scsi_cd: error in readtocentries, "
+ "returning EINVAL\n");
+ error = EINVAL;
+ free(data, M_TEMP);
+ free(lead, M_TEMP);
+ break;
+ }
+ num = len / sizeof(struct cd_toc_entry);
+
+ if (readlen > 0) {
+ error = cdreadtoc(periph, te->address_format,
+ starting_track,
+ (struct cd_toc_entry *)data,
+ readlen + sizeof (*th));
+ if (error) {
+ free(data, M_TEMP);
+ free(lead, M_TEMP);
+ break;
+ }
+ }
+
+ /* make leadout entry if needed */
+ idx = starting_track + num - 1;
+ if (softc->quirks & CD_Q_BCD_TRACKS)
+ th->ending_track = bcd2bin(th->ending_track);
+ if (idx == th->ending_track + 1) {
+ error = cdreadtoc(periph, te->address_format,
+ LEADOUT,
+ (struct cd_toc_entry *)lead,
+ sizeof(*lead));
+ if (error) {
+ free(data, M_TEMP);
+ free(lead, M_TEMP);
+ break;
+ }
+ data->entries[idx - starting_track] =
+ lead->entry;
+ }
+ if (softc->quirks & CD_Q_BCD_TRACKS) {
+ for (idx = 0; idx < num - 1; idx++) {
+ data->entries[idx].track =
+ bcd2bin(data->entries[idx].track);
+ }
+ }
+
+ error = copyout(data->entries, te->data, len);
+ free(data, M_TEMP);
+ free(lead, M_TEMP);
+ }
+ break;
+ case CDIOREADTOCENTRY:
+ {
+ /* yeah yeah, this is ugly */
+ typedef struct {
+ struct ioc_toc_header header;
+ struct cd_toc_entry entry;
+ } data_t;
+
+ data_t *data;
+ struct ioc_read_toc_single_entry *te =
+ (struct ioc_read_toc_single_entry *) addr;
+ struct ioc_toc_header *th;
+ u_int32_t track;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOREADTOCENTRY\n"));
+
+ data = malloc(sizeof(data_t), M_TEMP, M_WAITOK);
+
+ if (te->address_format != CD_MSF_FORMAT
+ && te->address_format != CD_LBA_FORMAT) {
+ printf("error in readtocentry, "
+ " returning EINVAL\n");
+ free(data, M_TEMP);
+ error = EINVAL;
+ break;
+ }
+
+ th = &data->header;
+ error = cdreadtoc(periph, 0, 0,
+ (struct cd_toc_entry *)th,
+ sizeof (*th));
+ if (error) {
+ free(data, M_TEMP);
+ break;
+ }
+
+ if (softc->quirks & CD_Q_BCD_TRACKS) {
+ /* we are going to have to convert the BCD
+ * encoding on the cd to what is expected
+ */
+ th->starting_track =
+ bcd2bin(th->starting_track);
+ th->ending_track = bcd2bin(th->ending_track);
+ }
+ track = te->track;
+ if (track == 0)
+ track = th->starting_track;
+ else if (track == LEADOUT)
+ /* OK */;
+ else if (track < th->starting_track ||
+ track > th->ending_track + 1) {
+ printf("error in readtocentry, "
+ " returning EINVAL\n");
+ free(data, M_TEMP);
+ error = EINVAL;
+ break;
+ }
+
+ error = cdreadtoc(periph, te->address_format, track,
+ (struct cd_toc_entry *)data,
+ sizeof(data_t));
+ if (error) {
+ free(data, M_TEMP);
+ break;
+ }
+
+ if (softc->quirks & CD_Q_BCD_TRACKS)
+ data->entry.track = bcd2bin(data->entry.track);
+ bcopy(&data->entry, &te->entry,
+ sizeof(struct cd_toc_entry));
+ free(data, M_TEMP);
+ }
+ break;
+ case CDIOCSETPATCH:
+ {
+ struct ioc_patch *arg = (struct ioc_patch *) addr;
+ struct cd_mode_data *data;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOCSETPATCH\n"));
+
+ data = malloc(sizeof(struct cd_mode_data), M_TEMP,
+ M_WAITOK);
+ error = cdgetmode(periph, data, AUDIO_PAGE);
+ if (error) {
+ free(data, M_TEMP);
+ break;
+ }
+ data->page.audio.port[LEFT_PORT].channels =
+ arg->patch[0];
+ data->page.audio.port[RIGHT_PORT].channels =
+ arg->patch[1];
+ data->page.audio.port[2].channels = arg->patch[2];
+ data->page.audio.port[3].channels = arg->patch[3];
+ error = cdsetmode(periph, data);
+ free(data, M_TEMP);
+ }
+ break;
+ case CDIOCGETVOL:
+ {
+ struct ioc_vol *arg = (struct ioc_vol *) addr;
+ struct cd_mode_data *data;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOCGETVOL\n"));
+
+ data = malloc(sizeof(struct cd_mode_data), M_TEMP,
+ M_WAITOK);
+ error = cdgetmode(periph, data, AUDIO_PAGE);
+ if (error) {
+ free(data, M_TEMP);
+ break;
+ }
+ arg->vol[LEFT_PORT] =
+ data->page.audio.port[LEFT_PORT].volume;
+ arg->vol[RIGHT_PORT] =
+ data->page.audio.port[RIGHT_PORT].volume;
+ arg->vol[2] = data->page.audio.port[2].volume;
+ arg->vol[3] = data->page.audio.port[3].volume;
+ free(data, M_TEMP);
+ }
+ break;
+ case CDIOCSETVOL:
+ {
+ struct ioc_vol *arg = (struct ioc_vol *) addr;
+ struct cd_mode_data *data;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOCSETVOL\n"));
+
+ data = malloc(sizeof(struct cd_mode_data), M_TEMP,
+ M_WAITOK);
+ error = cdgetmode(periph, data, AUDIO_PAGE);
+ if (error) {
+ free(data, M_TEMP);
+ break;
+ }
+ data->page.audio.port[LEFT_PORT].channels = CHANNEL_0;
+ data->page.audio.port[LEFT_PORT].volume =
+ arg->vol[LEFT_PORT];
+ data->page.audio.port[RIGHT_PORT].channels = CHANNEL_1;
+ data->page.audio.port[RIGHT_PORT].volume =
+ arg->vol[RIGHT_PORT];
+ data->page.audio.port[2].volume = arg->vol[2];
+ data->page.audio.port[3].volume = arg->vol[3];
+ error = cdsetmode(periph, data);
+ free(data, M_TEMP);
+ }
+ break;
+ case CDIOCSETMONO:
+ {
+ struct cd_mode_data *data;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOCSETMONO\n"));
+
+ data = malloc(sizeof(struct cd_mode_data),
+ M_TEMP, M_WAITOK);
+ error = cdgetmode(periph, data, AUDIO_PAGE);
+ if (error) {
+ free(data, M_TEMP);
+ break;
+ }
+ data->page.audio.port[LEFT_PORT].channels =
+ LEFT_CHANNEL | RIGHT_CHANNEL;
+ data->page.audio.port[RIGHT_PORT].channels =
+ LEFT_CHANNEL | RIGHT_CHANNEL;
+ data->page.audio.port[2].channels = 0;
+ data->page.audio.port[3].channels = 0;
+ error = cdsetmode(periph, data);
+ free(data, M_TEMP);
+ }
+ break;
+ case CDIOCSETSTEREO:
+ {
+ struct cd_mode_data *data;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOCSETSTEREO\n"));
+
+ data = malloc(sizeof(struct cd_mode_data), M_TEMP,
+ M_WAITOK);
+ error = cdgetmode(periph, data, AUDIO_PAGE);
+ if (error) {
+ free(data, M_TEMP);
+ break;
+ }
+ data->page.audio.port[LEFT_PORT].channels =
+ LEFT_CHANNEL;
+ data->page.audio.port[RIGHT_PORT].channels =
+ RIGHT_CHANNEL;
+ data->page.audio.port[2].channels = 0;
+ data->page.audio.port[3].channels = 0;
+ error = cdsetmode(periph, data);
+ free(data, M_TEMP);
+ }
+ break;
+ case CDIOCSETMUTE:
+ {
+ struct cd_mode_data *data;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOCSETMUTE\n"));
+
+ data = malloc(sizeof(struct cd_mode_data), M_TEMP,
+ M_WAITOK);
+ error = cdgetmode(periph, data, AUDIO_PAGE);
+ if (error) {
+ free(data, M_TEMP);
+ break;
+ }
+ data->page.audio.port[LEFT_PORT].channels = 0;
+ data->page.audio.port[RIGHT_PORT].channels = 0;
+ data->page.audio.port[2].channels = 0;
+ data->page.audio.port[3].channels = 0;
+ error = cdsetmode(periph, data);
+ free(data, M_TEMP);
+ }
+ break;
+ case CDIOCSETLEFT:
+ {
+ struct cd_mode_data *data;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOCSETLEFT\n"));
+
+ data = malloc(sizeof(struct cd_mode_data), M_TEMP,
+ M_WAITOK);
+ error = cdgetmode(periph, data, AUDIO_PAGE);
+ if (error) {
+ free(data, M_TEMP);
+ break;
+ }
+ data->page.audio.port[LEFT_PORT].channels =
+ LEFT_CHANNEL;
+ data->page.audio.port[RIGHT_PORT].channels =
+ LEFT_CHANNEL;
+ data->page.audio.port[2].channels = 0;
+ data->page.audio.port[3].channels = 0;
+ error = cdsetmode(periph, data);
+ free(data, M_TEMP);
+ }
+ break;
+ case CDIOCSETRIGHT:
+ {
+ struct cd_mode_data *data;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
+ ("trying to do CDIOCSETRIGHT\n"));
+
+ data = malloc(sizeof(struct cd_mode_data), M_TEMP,
+ M_WAITOK);
+ error = cdgetmode(periph, data, AUDIO_PAGE);
+ if (error) {
+ free(data, M_TEMP);
+ break;
+ }
+ data->page.audio.port[LEFT_PORT].channels =
+ RIGHT_CHANNEL;
+ data->page.audio.port[RIGHT_PORT].channels =
+ RIGHT_CHANNEL;
+ data->page.audio.port[2].channels = 0;
+ data->page.audio.port[3].channels = 0;
+ error = cdsetmode(periph, data);
+ free(data, M_TEMP);
+ }
+ break;
+ case CDIOCRESUME:
+ error = cdpause(periph, 1);
+ break;
+ case CDIOCPAUSE:
+ error = cdpause(periph, 0);
+ break;
+ case CDIOCSTART:
+ error = cdstartunit(periph);
+ break;
+ case CDIOCSTOP:
+ error = cdstopunit(periph, 0);
+ break;
+ case CDIOCEJECT:
+ error = cdstopunit(periph, 1);
+ break;
+ case CDIOCALLOW:
+ cdprevent(periph, PR_ALLOW);
+ break;
+ case CDIOCPREVENT:
+ cdprevent(periph, PR_PREVENT);
+ break;
+ case CDIOCSETDEBUG:
+ /* sc_link->flags |= (SDEV_DB1 | SDEV_DB2); */
+ error = ENOTTY;
+ break;
+ case CDIOCCLRDEBUG:
+ /* sc_link->flags &= ~(SDEV_DB1 | SDEV_DB2); */
+ error = ENOTTY;
+ break;
+ case CDIOCRESET:
+ /* return (cd_reset(periph)); */
+ error = ENOTTY;
+ break;
+ default:
+ if (cmd == DIOCSBAD) {
+ error = EINVAL; /* XXX */
+ break;
+ }
+
+ /*
+ * Check to see whether we've got a disk-type ioctl. If we
+ * don't, dsioctl will pass back an error code of ENOIOCTL.
+ */
+ error = dsioctl("cd", dev, cmd, addr, flag, &softc->cd_slices,
+ cdstrategy, (ds_setgeom_t *)NULL);
+
+ if (error != ENOIOCTL)
+ break;
+
+ error = cam_periph_ioctl(periph, cmd, addr, cderror);
+ break;
+ }
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("leaving cdioctl\n"));
+
+ return (error);
+}
+
+static void
+cdprevent(struct cam_periph *periph, int action)
+{
+ union ccb *ccb;
+ struct cd_softc *softc;
+ int error;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdprevent\n"));
+
+ softc = (struct cd_softc *)periph->softc;
+
+ if (((action == PR_ALLOW)
+ && (softc->flags & CD_FLAG_DISC_LOCKED) == 0)
+ || ((action == PR_PREVENT)
+ && (softc->flags & CD_FLAG_DISC_LOCKED) != 0)) {
+ return;
+ }
+
+ ccb = cdgetccb(periph, /* priority */ 1);
+
+ scsi_prevent(&ccb->csio,
+ /*retries*/ 1,
+ cddone,
+ MSG_SIMPLE_Q_TAG,
+ action,
+ SSD_FULL_SIZE,
+ /* timeout */60000);
+
+ error = cdrunccb(ccb, cderror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA|SF_NO_PRINT);
+
+ xpt_release_ccb(ccb);
+
+ if (error == 0) {
+ if (action == PR_ALLOW)
+ softc->flags &= ~CD_FLAG_DISC_LOCKED;
+ else
+ softc->flags |= CD_FLAG_DISC_LOCKED;
+ }
+}
+
+static int
+cdsize(dev_t dev, u_int32_t *size)
+{
+ struct cam_periph *periph;
+ struct cd_softc *softc;
+ union ccb *ccb;
+ struct scsi_read_capacity_data *rcap_buf;
+ int error;
+
+ periph = cam_extend_get(cdperiphs, dkunit(dev));
+
+ if (periph == NULL)
+ return (ENXIO);
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdsize\n"));
+
+ softc = (struct cd_softc *)periph->softc;
+
+ ccb = cdgetccb(periph, /* priority */ 1);
+
+ rcap_buf = malloc(sizeof(struct scsi_read_capacity_data),
+ M_TEMP, M_WAITOK);
+
+ scsi_read_capacity(&ccb->csio,
+ /*retries*/ 1,
+ cddone,
+ MSG_SIMPLE_Q_TAG,
+ rcap_buf,
+ SSD_FULL_SIZE,
+ /* timeout */20000);
+
+ error = cdrunccb(ccb, cderror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA|SF_NO_PRINT);
+
+ xpt_release_ccb(ccb);
+
+ softc->params.disksize = scsi_4btoul(rcap_buf->addr) + 1;
+ softc->params.blksize = scsi_4btoul(rcap_buf->length);
+
+ free(rcap_buf, M_TEMP);
+ *size = softc->params.disksize;
+
+ return (error);
+
+}
+
+static int
+cderror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
+{
+ struct cd_softc *softc;
+ struct cam_periph *periph;
+
+ periph = xpt_path_periph(ccb->ccb_h.path);
+ softc = (struct cd_softc *)periph->softc;
+
+ return (cam_periph_error(ccb, cam_flags, sense_flags,
+ &softc->saved_ccb));
+}
+
+static int
+cdgetdisklabel(dev_t dev)
+{
+ struct cam_periph *periph;
+ struct cd_softc *softc;
+
+ periph = cam_extend_get(cdperiphs, dkunit(dev));
+
+ if (periph == NULL)
+ return (ENXIO);
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetdisklabel\n"));
+
+ softc = (struct cd_softc *)periph->softc;
+
+ bzero(&softc->disklabel, sizeof(struct disklabel));
+
+ /* XXX Use the controller's geometry for this */
+ softc->disklabel.d_type = DTYPE_SCSI;
+ sprintf(softc->disklabel.d_typename, "%s%d", periph->periph_name,
+ periph->unit_number);
+ strncpy(softc->disklabel.d_packname, "ficticious", 16);
+ softc->disklabel.d_secsize = softc->params.blksize;
+ softc->disklabel.d_nsectors = 100;
+ softc->disklabel.d_ntracks = 1;
+ softc->disklabel.d_ncylinders = (softc->params.disksize / 100) + 1;
+ softc->disklabel.d_secpercyl = 100;
+ softc->disklabel.d_secperunit = softc->params.disksize;
+ softc->disklabel.d_rpm = 300;
+ softc->disklabel.d_interleave = 1;
+ softc->disklabel.d_flags = D_REMOVABLE;
+
+ /*
+ * Make partition 'a' cover the whole disk. This is a temporary
+ * compatibility hack. The 'a' partition should not exist, so
+ * the slice code won't create it. The slice code will make
+ * partition (RAW_PART + 'a') cover the whole disk and fill in
+ * some more defaults.
+ */
+ softc->disklabel.d_npartitions = 1;
+ softc->disklabel.d_partitions[0].p_offset = 0;
+ softc->disklabel.d_partitions[0].p_size
+ = softc->params.disksize;
+ softc->disklabel.d_partitions[0].p_fstype = FS_OTHER;
+
+ /*
+ * Signal to other users and routines that we now have a
+ * disklabel that represents the media (maybe)
+ */
+ return (0);
+
+}
+
+/*
+ * Read table of contents
+ */
+static int
+cdreadtoc(struct cam_periph *periph, u_int32_t mode, u_int32_t start,
+ struct cd_toc_entry *data, u_int32_t len)
+{
+ struct scsi_read_toc *scsi_cmd;
+ u_int32_t ntoc;
+ struct ccb_scsiio *csio;
+ union ccb *ccb;
+ int error;
+
+ ntoc = len;
+ error = 0;
+
+ ccb = cdgetccb(periph, /* priority */ 1);
+
+ csio = &ccb->csio;
+
+ cam_fill_csio(csio,
+ /* retries */ 1,
+ /* cbfcnp */ cddone,
+ /* flags */ CAM_DIR_IN,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* data_ptr */ (u_int8_t *)data,
+ /* dxfer_len */ len,
+ /* sense_len */ SSD_FULL_SIZE,
+ sizeof(struct scsi_read_toc),
+ /* timeout */ 50000);
+
+ scsi_cmd = (struct scsi_read_toc *)&csio->cdb_io.cdb_bytes;
+ bzero (scsi_cmd, sizeof(*scsi_cmd));
+
+ if (mode == CD_MSF_FORMAT)
+ scsi_cmd->byte2 |= CD_MSF;
+ scsi_cmd->from_track = start;
+ /* scsi_ulto2b(ntoc, (u_int8_t *)scsi_cmd->data_len); */
+ scsi_cmd->data_len[0] = (ntoc) >> 8;
+ scsi_cmd->data_len[1] = (ntoc) & 0xff;
+
+ scsi_cmd->op_code = READ_TOC;
+
+ error = cdrunccb(ccb, cderror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+}
+
+static int
+cdreadsubchannel(struct cam_periph *periph, u_int32_t mode,
+ u_int32_t format, int track,
+ struct cd_sub_channel_info *data, u_int32_t len)
+{
+ struct scsi_read_subchannel *scsi_cmd;
+ struct ccb_scsiio *csio;
+ union ccb *ccb;
+ int error;
+
+ error = 0;
+
+ ccb = cdgetccb(periph, /* priority */ 1);
+
+ csio = &ccb->csio;
+
+ cam_fill_csio(csio,
+ /* retries */ 1,
+ /* cbfcnp */ cddone,
+ /* flags */ CAM_DIR_IN,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* data_ptr */ (u_int8_t *)data,
+ /* dxfer_len */ len,
+ /* sense_len */ SSD_FULL_SIZE,
+ sizeof(struct scsi_read_subchannel),
+ /* timeout */ 50000);
+
+ scsi_cmd = (struct scsi_read_subchannel *)&csio->cdb_io.cdb_bytes;
+ bzero (scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->op_code = READ_SUBCHANNEL;
+ if (mode == CD_MSF_FORMAT)
+ scsi_cmd->byte1 |= CD_MSF;
+ scsi_cmd->byte2 = SRS_SUBQ;
+ scsi_cmd->subchan_format = format;
+ scsi_cmd->track = track;
+ scsi_ulto2b(len, (u_int8_t *)scsi_cmd->data_len);
+ scsi_cmd->control = 0;
+
+ error = cdrunccb(ccb, cderror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+}
+
+
+static int
+cdgetmode(struct cam_periph *periph, struct cd_mode_data *data, u_int32_t page)
+{
+ struct scsi_mode_sense_6 *scsi_cmd;
+ struct ccb_scsiio *csio;
+ union ccb *ccb;
+ int error;
+
+ ccb = cdgetccb(periph, /* priority */ 1);
+
+ csio = &ccb->csio;
+
+ bzero(data, sizeof(*data));
+ cam_fill_csio(csio,
+ /* retries */ 1,
+ /* cbfcnp */ cddone,
+ /* flags */ CAM_DIR_IN,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* data_ptr */ (u_int8_t *)data,
+ /* dxfer_len */ sizeof(*data),
+ /* sense_len */ SSD_FULL_SIZE,
+ sizeof(struct scsi_mode_sense_6),
+ /* timeout */ 50000);
+
+ scsi_cmd = (struct scsi_mode_sense_6 *)&csio->cdb_io.cdb_bytes;
+ bzero (scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->page = page;
+ scsi_cmd->length = sizeof(*data) & 0xff;
+ scsi_cmd->opcode = MODE_SENSE;
+
+ error = cdrunccb(ccb, cderror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+}
+
+static int
+cdsetmode(struct cam_periph *periph, struct cd_mode_data *data)
+{
+ struct scsi_mode_select_6 *scsi_cmd;
+ struct ccb_scsiio *csio;
+ union ccb *ccb;
+ int error;
+
+ ccb = cdgetccb(periph, /* priority */ 1);
+
+ csio = &ccb->csio;
+
+ error = 0;
+
+ cam_fill_csio(csio,
+ /* retries */ 1,
+ /* cbfcnp */ cddone,
+ /* flags */ CAM_DIR_IN,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* data_ptr */ (u_int8_t *)data,
+ /* dxfer_len */ sizeof(*data),
+ /* sense_len */ SSD_FULL_SIZE,
+ sizeof(struct scsi_mode_select_6),
+ /* timeout */ 50000);
+
+ scsi_cmd = (struct scsi_mode_select_6 *)&csio->cdb_io.cdb_bytes;
+
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = MODE_SELECT;
+ scsi_cmd->byte2 |= SMS_PF;
+ scsi_cmd->length = sizeof(*data) & 0xff;
+ data->header.data_length = 0;
+ /*
+ * SONY drives do not allow a mode select with a medium_type
+ * value that has just been returned by a mode sense; use a
+ * medium_type of 0 (Default) instead.
+ */
+ data->header.medium_type = 0;
+
+ error = cdrunccb(ccb, cderror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+}
+
+
+static int
+cdplay(struct cam_periph *periph, u_int32_t blk, u_int32_t len)
+{
+ struct scsi_play *scsi_cmd;
+ struct ccb_scsiio *csio;
+ union ccb *ccb;
+ int error;
+
+ error = 0;
+
+ ccb = cdgetccb(periph, /* priority */ 1);
+
+ csio = &ccb->csio;
+
+ cam_fill_csio(csio,
+ /* retries */ 1,
+ /* cbfcnp */ cddone,
+ /* flags */ CAM_DIR_IN,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* data_ptr */ NULL,
+ /* dxfer_len */ 0,
+ /* sense_len */ SSD_FULL_SIZE,
+ sizeof(struct scsi_play),
+ /* timeout */ 50000);
+
+ scsi_cmd = (struct scsi_play *)&csio->cdb_io.cdb_bytes;
+ bzero (scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->op_code = PLAY;
+ scsi_ulto4b(blk, (u_int8_t *)scsi_cmd->blk_addr);
+ scsi_ulto2b(len, (u_int8_t *)scsi_cmd->xfer_len);
+
+ error = cdrunccb(ccb, cderror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+
+}
+
+static int
+cdplaymsf(struct cam_periph *periph, u_int32_t startm, u_int32_t starts,
+ u_int32_t startf, u_int32_t endm, u_int32_t ends, u_int32_t endf)
+{
+ struct scsi_play_msf *scsi_cmd;
+ struct ccb_scsiio *csio;
+ union ccb *ccb;
+ int error;
+
+ error = 0;
+
+ ccb = cdgetccb(periph, /* priority */ 1);
+
+ csio = &ccb->csio;
+
+ cam_fill_csio(csio,
+ /* retries */ 1,
+ /* cbfcnp */ cddone,
+ /* flags */ CAM_DIR_IN,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* data_ptr */ NULL,
+ /* dxfer_len */ 0,
+ /* sense_len */ SSD_FULL_SIZE,
+ sizeof(struct scsi_play_msf),
+ /* timeout */ 50000);
+
+ scsi_cmd = (struct scsi_play_msf *)&csio->cdb_io.cdb_bytes;
+ bzero (scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->op_code = PLAY_MSF;
+ scsi_cmd->start_m = startm;
+ scsi_cmd->start_s = starts;
+ scsi_cmd->start_f = startf;
+ scsi_cmd->end_m = endm;
+ scsi_cmd->end_s = ends;
+ scsi_cmd->end_f = endf;
+
+ error = cdrunccb(ccb, cderror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+}
+
+
+static int
+cdplaytracks(struct cam_periph *periph, u_int32_t strack, u_int32_t sindex,
+ u_int32_t etrack, u_int32_t eindex)
+{
+ struct scsi_play_track *scsi_cmd;
+ struct ccb_scsiio *csio;
+ union ccb *ccb;
+ int error;
+
+ error = 0;
+
+ ccb = cdgetccb(periph, /* priority */ 1);
+
+ csio = &ccb->csio;
+
+ cam_fill_csio(csio,
+ /* retries */ 1,
+ /* cbfcnp */ cddone,
+ /* flags */ CAM_DIR_IN,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* data_ptr */ NULL,
+ /* dxfer_len */ 0,
+ /* sense_len */ SSD_FULL_SIZE,
+ sizeof(struct scsi_play_track),
+ /* timeout */ 50000);
+
+ scsi_cmd = (struct scsi_play_track *)&csio->cdb_io.cdb_bytes;
+ bzero (scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->op_code = PLAY_TRACK;
+ scsi_cmd->start_track = strack;
+ scsi_cmd->start_index = sindex;
+ scsi_cmd->end_track = etrack;
+ scsi_cmd->end_index = eindex;
+
+ error = cdrunccb(ccb, cderror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+}
+
+static int
+cdpause(struct cam_periph *periph, u_int32_t go)
+{
+ struct scsi_pause *scsi_cmd;
+ struct ccb_scsiio *csio;
+ union ccb *ccb;
+ int error;
+
+ error = 0;
+
+ ccb = cdgetccb(periph, /* priority */ 1);
+
+ csio = &ccb->csio;
+
+ cam_fill_csio(csio,
+ /* retries */ 1,
+ /* cbfcnp */ cddone,
+ /* flags */ CAM_DIR_IN,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* data_ptr */ NULL,
+ /* dxfer_len */ 0,
+ /* sense_len */ SSD_FULL_SIZE,
+ sizeof(struct scsi_pause),
+ /* timeout */ 50000);
+
+ scsi_cmd = (struct scsi_pause *)&csio->cdb_io.cdb_bytes;
+ bzero (scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->op_code = PAUSE;
+ scsi_cmd->resume = go;
+
+ error = cdrunccb(ccb, cderror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+}
+
+static int
+cdstartunit(struct cam_periph *periph)
+{
+ union ccb *ccb;
+ int error;
+
+ error = 0;
+
+ ccb = cdgetccb(periph, /* priority */ 1);
+
+ scsi_start_stop(&ccb->csio,
+ /* retries */ 1,
+ /* cbfcnp */ cddone,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* start */ TRUE,
+ /* load_eject */ TRUE,
+ /* immediate */ FALSE,
+ /* sense_len */ SSD_FULL_SIZE,
+ /* timeout */ 50000);
+
+ error = cdrunccb(ccb, cderror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+}
+
+static int
+cdstopunit(struct cam_periph *periph, u_int32_t eject)
+{
+ union ccb *ccb;
+ int error;
+
+ error = 0;
+
+ ccb = cdgetccb(periph, /* priority */ 1);
+
+ scsi_start_stop(&ccb->csio,
+ /* retries */ 1,
+ /* cbfcnp */ cddone,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* start */ FALSE,
+ /* load_eject */ eject,
+ /* immediate */ FALSE,
+ /* sense_len */ SSD_FULL_SIZE,
+ /* timeout */ 50000);
+
+ error = cdrunccb(ccb, cderror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+}
diff --git a/sys/cam/scsi/scsi_cd.h b/sys/cam/scsi/scsi_cd.h
new file mode 100644
index 0000000..8a76b0a
--- /dev/null
+++ b/sys/cam/scsi/scsi_cd.h
@@ -0,0 +1,217 @@
+/*
+ * Written by Julian Elischer (julian@tfs.com)
+ * for TRW Financial Systems.
+ *
+ * TRW Financial Systems, in accordance with their agreement with Carnegie
+ * Mellon University, makes this software available to CMU to distribute
+ * or use in any manner that they see fit as long as this message is kept with
+ * the software. For this reason TFS also grants any other persons or
+ * organisations permission to use or modify this software.
+ *
+ * TFS supplies this software to be publicly redistributed
+ * on the understanding that TFS is not responsible for the correct
+ * functioning of this software in any circumstances.
+ *
+ * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992
+ *
+ * from: scsi_cd.h,v 1.10 1997/02/22 09:44:28 peter Exp $
+ */
+#ifndef _SCSI_SCSI_CD_H
+#define _SCSI_SCSI_CD_H 1
+
+/*
+ * Define two bits always in the same place in byte 2 (flag byte)
+ */
+#define CD_RELADDR 0x01
+#define CD_MSF 0x02
+
+/*
+ * SCSI command format
+ */
+
+struct scsi_pause
+{
+ u_char op_code;
+ u_char byte2;
+ u_char unused[6];
+ u_char resume;
+ u_char control;
+};
+#define PA_PAUSE 1
+#define PA_RESUME 0
+
+struct scsi_play_msf
+{
+ u_char op_code;
+ u_char byte2;
+ u_char unused;
+ u_char start_m;
+ u_char start_s;
+ u_char start_f;
+ u_char end_m;
+ u_char end_s;
+ u_char end_f;
+ u_char control;
+};
+
+struct scsi_play_track
+{
+ u_char op_code;
+ u_char byte2;
+ u_char unused[2];
+ u_char start_track;
+ u_char start_index;
+ u_char unused1;
+ u_char end_track;
+ u_char end_index;
+ u_char control;
+};
+
+struct scsi_play
+{
+ u_char op_code;
+ u_char byte2;
+ u_char blk_addr[4];
+ u_char unused;
+ u_char xfer_len[2];
+ u_char control;
+};
+
+struct scsi_play_big
+{
+ u_char op_code;
+ u_char byte2; /* same as above */
+ u_char blk_addr[4];
+ u_char xfer_len[4];
+ u_char unused;
+ u_char control;
+};
+
+struct scsi_play_rel_big
+{
+ u_char op_code;
+ u_char byte2; /* same as above */
+ u_char blk_addr[4];
+ u_char xfer_len[4];
+ u_char track;
+ u_char control;
+};
+
+struct scsi_read_header
+{
+ u_char op_code;
+ u_char byte2;
+ u_char blk_addr[4];
+ u_char unused;
+ u_char data_len[2];
+ u_char control;
+};
+
+struct scsi_read_subchannel
+{
+ u_char op_code;
+ u_char byte1;
+ u_char byte2;
+#define SRS_SUBQ 0x40
+ u_char subchan_format;
+ u_char unused[2];
+ u_char track;
+ u_char data_len[2];
+ u_char control;
+};
+
+struct scsi_read_toc
+{
+ u_char op_code;
+ u_char byte2;
+ u_char unused[4];
+ u_char from_track;
+ u_char data_len[2];
+ u_char control;
+};
+;
+
+struct scsi_read_cd_capacity
+{
+ u_char op_code;
+ u_char byte2;
+ u_char addr_3; /* Most Significant */
+ u_char addr_2;
+ u_char addr_1;
+ u_char addr_0; /* Least Significant */
+ u_char unused[3];
+ u_char control;
+};
+
+/*
+ * Opcodes
+ */
+
+#define READ_CD_CAPACITY 0x25 /* slightly different from disk */
+#define READ_SUBCHANNEL 0x42 /* cdrom read Subchannel */
+#define READ_TOC 0x43 /* cdrom read TOC */
+#define READ_HEADER 0x44 /* cdrom read header */
+#define PLAY 0x45 /* cdrom play 'play audio' mode */
+#define PLAY_MSF 0x47 /* cdrom play Min,Sec,Frames mode */
+#define PLAY_TRACK 0x48 /* cdrom play track/index mode */
+#define PLAY_TRACK_REL 0x49 /* cdrom play track/index mode */
+#define PAUSE 0x4b /* cdrom pause in 'play audio' mode */
+#define PLAY_BIG 0xa5 /* cdrom pause in 'play audio' mode */
+#define PLAY_TRACK_REL_BIG 0xa9 /* cdrom play track/index mode */
+
+
+
+struct scsi_read_cd_cap_data
+{
+ u_char addr_3; /* Most significant */
+ u_char addr_2;
+ u_char addr_1;
+ u_char addr_0; /* Least significant */
+ u_char length_3; /* Most significant */
+ u_char length_2;
+ u_char length_1;
+ u_char length_0; /* Least significant */
+};
+
+union cd_pages
+{
+ struct audio_page
+ {
+ u_char page_code;
+#define CD_PAGE_CODE 0x3F
+#define AUDIO_PAGE 0x0e
+#define CD_PAGE_PS 0x80
+ u_char param_len;
+ u_char flags;
+#define CD_PA_SOTC 0x02
+#define CD_PA_IMMED 0x04
+ u_char unused[2];
+ u_char format_lba;
+#define CD_PA_FORMAT_LBA 0x0F
+#define CD_PA_APR_VALID 0x80
+ u_char lb_per_sec[2];
+ struct port_control
+ {
+ u_char channels;
+#define CHANNEL 0x0F
+#define CHANNEL_0 1
+#define CHANNEL_1 2
+#define CHANNEL_2 4
+#define CHANNEL_3 8
+#define LEFT_CHANNEL CHANNEL_0
+#define RIGHT_CHANNEL CHANNEL_1
+ u_char volume;
+ } port[4];
+#define LEFT_PORT 0
+#define RIGHT_PORT 1
+ }audio;
+};
+
+struct cd_mode_data
+{
+ struct scsi_mode_header_6 header;
+ struct scsi_mode_blk_desc blk_desc;
+ union cd_pages page;
+};
+#endif /*_SCSI_SCSI_CD_H*/
+
diff --git a/sys/cam/scsi/scsi_ch.c b/sys/cam/scsi/scsi_ch.c
new file mode 100644
index 0000000..0f5d9dc
--- /dev/null
+++ b/sys/cam/scsi/scsi_ch.c
@@ -0,0 +1,1584 @@
+/*
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * Copyright (c) 1997, 1998 Kenneth D. Merry.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: scsi_ch.c,v 1.1 1998/05/17 13:08:49 hans Exp hans $
+ */
+/*
+ * Derived from the NetBSD SCSI changer driver.
+ *
+ * $NetBSD: ch.c,v 1.32 1998/01/12 09:49:12 thorpej Exp $
+ *
+ */
+/*
+ * Copyright (c) 1996, 1997 Jason R. Thorpe <thorpej@and.com>
+ * All rights reserved.
+ *
+ * Partially based on an autochanger driver written by Stefan Grefen
+ * and on an autochanger driver written by the Systems Programming Group
+ * at the University of Utah Computer Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgements:
+ * This product includes software developed by Jason R. Thorpe
+ * for And Communications, http://www.and.com/
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/dkbad.h>
+#include <sys/malloc.h>
+#include <sys/fcntl.h>
+#include <sys/stat.h>
+#include <sys/conf.h>
+#include <sys/buf.h>
+#include <sys/chio.h>
+#include <sys/errno.h>
+#include <sys/devicestat.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_extend.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/cam_queue.h>
+#include <cam/cam_debug.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+#include <cam/scsi/scsi_ch.h>
+
+/*
+ * Timeout definitions for various changer related commands. They may
+ * be too short for some devices (especially the timeout for INITIALIZE
+ * ELEMENT STATUS).
+ */
+
+const u_int32_t CH_TIMEOUT_MODE_SENSE = 6000;
+const u_int32_t CH_TIMEOUT_MOVE_MEDIUM = 100000;
+const u_int32_t CH_TIMEOUT_EXCHANGE_MEDIUM = 100000;
+const u_int32_t CH_TIMEOUT_POSITION_TO_ELEMENT = 100000;
+const u_int32_t CH_TIMEOUT_READ_ELEMENT_STATUS = 10000;
+const u_int32_t CH_TIMEOUT_SEND_VOLTAG = 10000;
+const u_int32_t CH_TIMEOUT_INITIALIZE_ELEMENT_STATUS = 500000;
+
+typedef enum {
+ CH_FLAG_INVALID = 0x001,
+ CH_FLAG_OPEN = 0x002
+} ch_flags;
+
+typedef enum {
+ CH_STATE_PROBE,
+ CH_STATE_NORMAL
+} ch_state;
+
+typedef enum {
+ CH_CCB_PROBE,
+ CH_CCB_WAITING
+} ch_ccb_types;
+
+#define ccb_state ppriv_field0
+#define ccb_bp ppriv_ptr1
+
+struct scsi_mode_sense_data {
+ struct scsi_mode_header_6 header;
+ union {
+ struct page_element_address_assignment ea;
+ struct page_transport_geometry_parameters tg;
+ struct page_device_capabilities cap;
+ } pages;
+};
+
+struct ch_softc {
+ ch_flags flags;
+ ch_state state;
+ union ccb saved_ccb;
+ struct devstat device_stats;
+
+ int sc_picker; /* current picker */
+
+ /*
+ * The following information is obtained from the
+ * element address assignment page.
+ */
+ int sc_firsts[4]; /* firsts, indexed by CHET_* */
+ int sc_counts[4]; /* counts, indexed by CHET_* */
+
+ /*
+ * The following mask defines the legal combinations
+ * of elements for the MOVE MEDIUM command.
+ */
+ u_int8_t sc_movemask[4];
+
+ /*
+ * As above, but for EXCHANGE MEDIUM.
+ */
+ u_int8_t sc_exchangemask[4];
+
+ /*
+ * Quirks; see below. XXX KDM not implemented yet
+ */
+ int sc_settledelay; /* delay for settle */
+};
+
+#define CHUNIT(x) (minor((x)))
+#define CH_CDEV_MAJOR 17
+
+static d_open_t chopen;
+static d_close_t chclose;
+static d_ioctl_t chioctl;
+static periph_init_t chinit;
+static periph_ctor_t chregister;
+static periph_dtor_t chcleanup;
+static periph_start_t chstart;
+static void chasync(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg);
+static void chdone(struct cam_periph *periph,
+ union ccb *done_ccb);
+static int cherror(union ccb *ccb, u_int32_t cam_flags,
+ u_int32_t sense_flags);
+static int chmove(struct cam_periph *periph,
+ struct changer_move *cm);
+static int chexchange(struct cam_periph *periph,
+ struct changer_exchange *ce);
+static int chposition(struct cam_periph *periph,
+ struct changer_position *cp);
+static int chgetelemstatus(struct cam_periph *periph,
+ struct changer_element_status_request *csr);
+static int chsetvoltag(struct cam_periph *periph,
+ struct changer_set_voltag_request *csvr);
+static int chielem(struct cam_periph *periph,
+ unsigned int timeout);
+static int chgetparams(struct cam_periph *periph);
+
+static struct periph_driver chdriver =
+{
+ chinit, "ch",
+ TAILQ_HEAD_INITIALIZER(chdriver.units), /* generation */ 0
+};
+
+DATA_SET(periphdriver_set, chdriver);
+
+static struct cdevsw ch_cdevsw =
+{
+ /*d_open*/ chopen,
+ /*d_close*/ chclose,
+ /*d_read*/ noread,
+ /*d_write*/ nowrite,
+ /*d_ioctl*/ chioctl,
+ /*d_stop*/ nostop,
+ /*d_reset*/ noreset,
+ /*d_devtotty*/ nodevtotty,
+ /*d_poll*/ seltrue,
+ /*d_mmap*/ nommap,
+ /*d_strategy*/ nostrategy,
+ /*d_name*/ "ch",
+ /*d_spare*/ NULL,
+ /*d_maj*/ -1,
+ /*d_dump*/ nodump,
+ /*d_psize*/ nopsize,
+ /*d_flags*/ 0,
+ /*d_maxio*/ 0,
+ /*b_maj*/ -1
+};
+
+static struct extend_array *chperiphs;
+
+void
+chinit(void)
+{
+ cam_status status;
+ struct cam_path *path;
+
+ /*
+ * Create our extend array for storing the devices we attach to.
+ */
+ chperiphs = cam_extend_new();
+ if (chperiphs == NULL) {
+ printf("ch: Failed to alloc extend array!\n");
+ return;
+ }
+
+ /*
+ * Install a global async callback. This callback will
+ * receive async callbacks like "new device found".
+ */
+ status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
+
+ if (status == CAM_REQ_CMP) {
+ struct ccb_setasync csa;
+
+ xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_FOUND_DEVICE;
+ csa.callback = chasync;
+ csa.callback_arg = NULL;
+ xpt_action((union ccb *)&csa);
+ status = csa.ccb_h.status;
+ xpt_free_path(path);
+ }
+
+ if (status != CAM_REQ_CMP) {
+ printf("ch: Failed to attach master async callback "
+ "due to status 0x%x!\n", status);
+ } else {
+ dev_t dev;
+
+ /* If we were successfull, register our devsw */
+ dev = makedev(CH_CDEV_MAJOR, 0);
+ cdevsw_add(&dev, &ch_cdevsw, NULL);
+ }
+}
+
+static void
+chcleanup(struct cam_periph *periph)
+{
+
+ cam_extend_release(chperiphs, periph->unit_number);
+ xpt_print_path(periph->path);
+ printf("removing device entry\n");
+ free(periph->softc, M_DEVBUF);
+}
+
+static void
+chasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
+{
+ struct cam_periph *periph;
+
+ periph = (struct cam_periph *)callback_arg;
+
+ switch(code) {
+ case AC_FOUND_DEVICE:
+ {
+ struct ccb_getdev *cgd;
+ cam_status status;
+
+ cgd = (struct ccb_getdev *)arg;
+
+ if (cgd->pd_type != T_CHANGER)
+ break;
+
+ /*
+ * Allocate a peripheral instance for
+ * this device and start the probe
+ * process.
+ */
+ status = cam_periph_alloc(chregister, chcleanup, chstart,
+ "ch", CAM_PERIPH_BIO, cgd->ccb_h.path,
+ chasync, AC_FOUND_DEVICE, cgd);
+
+ if (status != CAM_REQ_CMP
+ && status != CAM_REQ_INPROG)
+ printf("chasync: Unable to probe new device "
+ "due to status 0x%x\n", status);
+
+ break;
+
+ }
+ case AC_LOST_DEVICE:
+ {
+ int s;
+ struct ch_softc *softc;
+ struct ccb_setasync csa;
+
+ softc = (struct ch_softc *)periph->softc;
+
+ /*
+ * Insure that no other async callbacks that
+ * might affect this peripheral can come through.
+ */
+ s = splcam();
+
+ /*
+ * De-register any async callbacks.
+ */
+ xpt_setup_ccb(&csa.ccb_h, periph->path,
+ /* priority */ 5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = 0;
+ csa.callback = chasync;
+ csa.callback_arg = periph;
+ xpt_action((union ccb *)&csa);
+
+ softc->flags |= CH_FLAG_INVALID;
+
+ devstat_remove_entry(&softc->device_stats);
+
+ xpt_print_path(periph->path);
+ printf("lost device\n");
+
+ splx(s);
+
+ cam_periph_invalidate(periph);
+ break;
+ }
+ case AC_TRANSFER_NEG:
+ case AC_SENT_BDR:
+ case AC_SCSI_AEN:
+ case AC_UNSOL_RESEL:
+ case AC_BUS_RESET:
+ default:
+ break;
+ }
+}
+
+static cam_status
+chregister(struct cam_periph *periph, void *arg)
+{
+ int s;
+ struct ch_softc *softc;
+ struct ccb_setasync csa;
+ struct ccb_getdev *cgd;
+
+ cgd = (struct ccb_getdev *)arg;
+ if (periph == NULL) {
+ printf("chregister: periph was NULL!!\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ if (cgd == NULL) {
+ printf("chregister: no getdev CCB, can't register device\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ softc = (struct ch_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
+
+ if (softc == NULL) {
+ printf("chregister: Unable to probe new device. "
+ "Unable to allocate softc\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ bzero(softc, sizeof(*softc));
+ softc->state = CH_STATE_PROBE;
+ periph->softc = softc;
+ cam_extend_set(chperiphs, periph->unit_number, periph);
+
+ /*
+ * Changers don't have a blocksize, and obviously don't support
+ * tagged queueing.
+ */
+ devstat_add_entry(&softc->device_stats, "ch",
+ periph->unit_number, 0,
+ DEVSTAT_NO_BLOCKSIZE | DEVSTAT_NO_ORDERED_TAGS,
+ cgd->pd_type | DEVSTAT_TYPE_IF_SCSI);
+
+ /*
+ * Add an async callback so that we get
+ * notified if this device goes away.
+ */
+ xpt_setup_ccb(&csa.ccb_h, periph->path, /* priority */ 5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_LOST_DEVICE;
+ csa.callback = chasync;
+ csa.callback_arg = periph;
+ xpt_action((union ccb *)&csa);
+
+ /*
+ * Lock this peripheral until we are setup.
+ * This first call can't block
+ */
+ (void)cam_periph_lock(periph, PRIBIO);
+ xpt_schedule(periph, /*priority*/5);
+
+ return(CAM_REQ_CMP);
+}
+
+static int
+chopen(dev_t dev, int flags, int fmt, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct ch_softc *softc;
+ int unit, error;
+
+ unit = CHUNIT(dev);
+ periph = cam_extend_get(chperiphs, unit);
+
+ if (periph == NULL)
+ return(ENXIO);
+
+ softc = (struct ch_softc *)periph->softc;
+
+ if (softc->flags & CH_FLAG_INVALID)
+ return(ENXIO);
+
+ if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0)
+ return (error);
+
+ if ((softc->flags & CH_FLAG_OPEN) == 0) {
+ if (cam_periph_acquire(periph) != CAM_REQ_CMP)
+ return(ENXIO);
+ softc->flags |= CH_FLAG_OPEN;
+ }
+
+ /*
+ * Load information about this changer device into the softc.
+ */
+ if ((error = chgetparams(periph)) != 0) {
+ softc->flags &= ~CH_FLAG_OPEN;
+ cam_periph_unlock(periph);
+ cam_periph_release(periph);
+ return(error);
+ }
+
+ cam_periph_unlock(periph);
+
+ return(error);
+}
+
+static int
+chclose(dev_t dev, int flag, int fmt, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct ch_softc *softc;
+ int unit, error;
+
+ error = 0;
+
+ unit = CHUNIT(dev);
+ periph = cam_extend_get(chperiphs, unit);
+ if (periph == NULL)
+ return(ENXIO);
+
+ softc = (struct ch_softc *)periph->softc;
+
+ if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
+ return(error);
+
+ softc->flags &= ~CH_FLAG_OPEN;
+
+ cam_periph_unlock(periph);
+ cam_periph_release(periph);
+
+ return(0);
+}
+
+static void
+chstart(struct cam_periph *periph, union ccb *start_ccb)
+{
+ struct ch_softc *softc;
+ int s;
+
+ softc = (struct ch_softc *)periph->softc;
+
+ switch (softc->state) {
+ case CH_STATE_NORMAL:
+ {
+ s = splbio();
+ if (periph->immediate_priority <= periph->pinfo.priority){
+ start_ccb->ccb_h.ccb_state = CH_CCB_WAITING;
+
+ SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
+ periph_links.sle);
+ periph->immediate_priority = CAM_PRIORITY_NONE;
+ splx(s);
+ wakeup(&periph->ccb_list);
+ } else
+ splx(s);
+ break;
+ }
+ case CH_STATE_PROBE:
+ {
+ struct scsi_mode_sense_data *sense_data;
+
+ sense_data = (struct scsi_mode_sense_data *)malloc(
+ sizeof(*sense_data),
+ M_TEMP, M_NOWAIT);
+
+ if (sense_data == NULL) {
+ printf("chstart: couldn't malloc mode sense data\n");
+ break;
+ }
+ bzero(sense_data, sizeof(*sense_data));
+
+ /*
+ * Get the element address assignment page.
+ */
+ scsi_mode_sense(&start_ccb->csio,
+ /* retries */ 1,
+ /* cbfcnp */ chdone,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* dbd */ TRUE,
+ /* page_code */ SMS_PAGE_CTRL_CURRENT,
+ /* page */ CH_ELEMENT_ADDR_ASSIGN_PAGE,
+ /* param_buf */ (u_int8_t *)sense_data,
+ /* param_len */ sizeof(*sense_data),
+ /* sense_len */ SSD_FULL_SIZE,
+ /* timeout */ CH_TIMEOUT_MODE_SENSE);
+
+ start_ccb->ccb_h.ccb_bp = NULL;
+ start_ccb->ccb_h.ccb_state = CH_CCB_PROBE;
+ xpt_action(start_ccb);
+ break;
+ }
+ }
+}
+
+static void
+chdone(struct cam_periph *periph, union ccb *done_ccb)
+{
+ struct ch_softc *softc;
+ struct ccb_scsiio *csio;
+
+ softc = (struct ch_softc *)periph->softc;
+ csio = &done_ccb->csio;
+
+ switch(done_ccb->ccb_h.ccb_state) {
+ case CH_CCB_PROBE:
+ {
+ struct scsi_mode_sense_data *sense_data;
+ char announce_buf[80];
+
+ sense_data = (struct scsi_mode_sense_data *)csio->data_ptr;
+
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP){
+
+ softc->sc_firsts[CHET_MT] =
+ scsi_2btoul(sense_data->pages.ea.mtea);
+ softc->sc_counts[CHET_MT] =
+ scsi_2btoul(sense_data->pages.ea.nmte);
+ softc->sc_firsts[CHET_ST] =
+ scsi_2btoul(sense_data->pages.ea.fsea);
+ softc->sc_counts[CHET_ST] =
+ scsi_2btoul(sense_data->pages.ea.nse);
+ softc->sc_firsts[CHET_IE] =
+ scsi_2btoul(sense_data->pages.ea.fieea);
+ softc->sc_counts[CHET_IE] =
+ scsi_2btoul(sense_data->pages.ea.niee);
+ softc->sc_firsts[CHET_DT] =
+ scsi_2btoul(sense_data->pages.ea.fdtea);
+ softc->sc_counts[CHET_DT] =
+ scsi_2btoul(sense_data->pages.ea.ndte);
+ softc->sc_picker = softc->sc_firsts[CHET_MT];
+
+#define PLURAL(c) (c) == 1 ? "" : "s"
+ sprintf(announce_buf, "%d slot%s, %d drive%s, "
+ "%d picker%s, %d portal%s",
+ softc->sc_counts[CHET_ST],
+ PLURAL(softc->sc_counts[CHET_ST]),
+ softc->sc_counts[CHET_DT],
+ PLURAL(softc->sc_counts[CHET_DT]),
+ softc->sc_counts[CHET_MT],
+ PLURAL(softc->sc_counts[CHET_MT]),
+ softc->sc_counts[CHET_IE],
+ PLURAL(softc->sc_counts[CHET_IE]));
+#undef PLURAL
+ } else {
+ int error;
+
+ error = cherror(done_ccb, 0, SF_RETRY_UA);
+ /*
+ * Retry any UNIT ATTENTION type errors. They
+ * are expected at boot.
+ */
+ if (error == ERESTART) {
+ /*
+ * A retry was scheuled, so
+ * just return.
+ */
+ return;
+ } else if (error != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ sprintf(announce_buf,
+ "Attempt to query device parameters"
+ " failed");
+ }
+ }
+ xpt_announce_periph(periph, announce_buf);
+ softc->state = CH_STATE_NORMAL;
+ free(sense_data, M_TEMP);
+ cam_periph_unlock(periph);
+ break;
+ }
+ case CH_CCB_WAITING:
+ {
+ /* Caller will release the CCB */
+ wakeup(&done_ccb->ccb_h.cbfcnp);
+ return;
+ }
+ }
+ xpt_release_ccb(done_ccb);
+}
+
+static int
+cherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
+{
+ struct ch_softc *softc;
+ struct cam_periph *periph;
+
+ periph = xpt_path_periph(ccb->ccb_h.path);
+ softc = (struct ch_softc *)periph->softc;
+
+ return (cam_periph_error(ccb, cam_flags, sense_flags,
+ &softc->saved_ccb));
+}
+
+static int
+chioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct ch_softc *softc;
+ u_int8_t unit;
+ int error;
+
+ unit = CHUNIT(dev);
+
+ periph = cam_extend_get(chperiphs, unit);
+ if (periph == NULL)
+ return(ENXIO);
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering chioctl\n"));
+
+ softc = (struct ch_softc *)periph->softc;
+
+ error = 0;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
+ ("trying to do ioctl %#x\n", cmd));
+
+ /*
+ * If this command can change the device's state, we must
+ * have the device open for writing.
+ */
+ switch (cmd) {
+ case CHIOGPICKER:
+ case CHIOGPARAMS:
+ case CHIOGSTATUS:
+ break;
+
+ default:
+ if ((flag & FWRITE) == 0)
+ return (EBADF);
+ }
+
+ switch (cmd) {
+ case CHIOMOVE:
+ error = chmove(periph, (struct changer_move *)addr);
+ break;
+
+ case CHIOEXCHANGE:
+ error = chexchange(periph, (struct changer_exchange *)addr);
+ break;
+
+ case CHIOPOSITION:
+ error = chposition(periph, (struct changer_position *)addr);
+ break;
+
+ case CHIOGPICKER:
+ *(int *)addr = softc->sc_picker - softc->sc_firsts[CHET_MT];
+ break;
+
+ case CHIOSPICKER:
+ {
+ int new_picker = *(int *)addr;
+
+ if (new_picker > (softc->sc_counts[CHET_MT] - 1))
+ return (EINVAL);
+ softc->sc_picker = softc->sc_firsts[CHET_MT] + new_picker;
+ break;
+ }
+ case CHIOGPARAMS:
+ {
+ struct changer_params *cp = (struct changer_params *)addr;
+
+ cp->cp_npickers = softc->sc_counts[CHET_MT];
+ cp->cp_nslots = softc->sc_counts[CHET_ST];
+ cp->cp_nportals = softc->sc_counts[CHET_IE];
+ cp->cp_ndrives = softc->sc_counts[CHET_DT];
+ break;
+ }
+ case CHIOIELEM:
+ error = chielem(periph, *(unsigned int *)addr);
+ break;
+
+ case CHIOGSTATUS:
+ {
+ error = chgetelemstatus(periph,
+ (struct changer_element_status_request *) addr);
+ break;
+ }
+
+ case CHIOSETVOLTAG:
+ {
+ error = chsetvoltag(periph,
+ (struct changer_set_voltag_request *) addr);
+ break;
+ }
+
+ /* Implement prevent/allow? */
+
+ default:
+ error = cam_periph_ioctl(periph, cmd, addr, cherror);
+ break;
+ }
+
+ return (error);
+}
+
+static int
+chmove(struct cam_periph *periph, struct changer_move *cm)
+{
+ struct ch_softc *softc;
+ u_int16_t fromelem, toelem;
+ union ccb *ccb;
+ int error;
+
+ error = 0;
+ softc = (struct ch_softc *)periph->softc;
+
+ /*
+ * Check arguments.
+ */
+ if ((cm->cm_fromtype > CHET_DT) || (cm->cm_totype > CHET_DT))
+ return (EINVAL);
+ if ((cm->cm_fromunit > (softc->sc_counts[cm->cm_fromtype] - 1)) ||
+ (cm->cm_tounit > (softc->sc_counts[cm->cm_totype] - 1)))
+ return (ENODEV);
+
+ /*
+ * Check the request against the changer's capabilities.
+ */
+ if ((softc->sc_movemask[cm->cm_fromtype] & (1 << cm->cm_totype)) == 0)
+ return (EINVAL);
+
+ /*
+ * Calculate the source and destination elements.
+ */
+ fromelem = softc->sc_firsts[cm->cm_fromtype] + cm->cm_fromunit;
+ toelem = softc->sc_firsts[cm->cm_totype] + cm->cm_tounit;
+
+ ccb = cam_periph_getccb(periph, /*priority*/ 1);
+
+ scsi_move_medium(&ccb->csio,
+ /* retries */ 1,
+ /* cbfcnp */ chdone,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* tea */ softc->sc_picker,
+ /* src */ fromelem,
+ /* dst */ toelem,
+ /* invert */ (cm->cm_flags & CM_INVERT) ? TRUE : FALSE,
+ /* sense_len */ SSD_FULL_SIZE,
+ /* timeout */ CH_TIMEOUT_MOVE_MEDIUM);
+
+ error = cam_periph_runccb(ccb, cherror, /*cam_flags*/0,
+ /*sense_flags*/ SF_RETRY_UA,
+ &softc->device_stats);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+}
+
+static int
+chexchange(struct cam_periph *periph, struct changer_exchange *ce)
+{
+ struct ch_softc *softc;
+ u_int16_t src, dst1, dst2;
+ union ccb *ccb;
+ int error;
+
+ error = 0;
+ softc = (struct ch_softc *)periph->softc;
+ /*
+ * Check arguments.
+ */
+ if ((ce->ce_srctype > CHET_DT) || (ce->ce_fdsttype > CHET_DT) ||
+ (ce->ce_sdsttype > CHET_DT))
+ return (EINVAL);
+ if ((ce->ce_srcunit > (softc->sc_counts[ce->ce_srctype] - 1)) ||
+ (ce->ce_fdstunit > (softc->sc_counts[ce->ce_fdsttype] - 1)) ||
+ (ce->ce_sdstunit > (softc->sc_counts[ce->ce_sdsttype] - 1)))
+ return (ENODEV);
+
+ /*
+ * Check the request against the changer's capabilities.
+ */
+ if (((softc->sc_exchangemask[ce->ce_srctype] &
+ (1 << ce->ce_fdsttype)) == 0) ||
+ ((softc->sc_exchangemask[ce->ce_fdsttype] &
+ (1 << ce->ce_sdsttype)) == 0))
+ return (EINVAL);
+
+ /*
+ * Calculate the source and destination elements.
+ */
+ src = softc->sc_firsts[ce->ce_srctype] + ce->ce_srcunit;
+ dst1 = softc->sc_firsts[ce->ce_fdsttype] + ce->ce_fdstunit;
+ dst2 = softc->sc_firsts[ce->ce_sdsttype] + ce->ce_sdstunit;
+
+ ccb = cam_periph_getccb(periph, /*priority*/ 1);
+
+ scsi_exchange_medium(&ccb->csio,
+ /* retries */ 1,
+ /* cbfcnp */ chdone,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* tea */ softc->sc_picker,
+ /* src */ src,
+ /* dst1 */ dst1,
+ /* dst2 */ dst2,
+ /* invert1 */ (ce->ce_flags & CE_INVERT1) ?
+ TRUE : FALSE,
+ /* invert2 */ (ce->ce_flags & CE_INVERT2) ?
+ TRUE : FALSE,
+ /* sense_len */ SSD_FULL_SIZE,
+ /* timeout */ CH_TIMEOUT_EXCHANGE_MEDIUM);
+
+ error = cam_periph_runccb(ccb, cherror, /*cam_flags*/0,
+ /*sense_flags*/ SF_RETRY_UA,
+ &softc->device_stats);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+}
+
+static int
+chposition(struct cam_periph *periph, struct changer_position *cp)
+{
+ struct ch_softc *softc;
+ u_int16_t dst;
+ union ccb *ccb;
+ int error;
+
+ error = 0;
+ softc = (struct ch_softc *)periph->softc;
+
+ /*
+ * Check arguments.
+ */
+ if (cp->cp_type > CHET_DT)
+ return (EINVAL);
+ if (cp->cp_unit > (softc->sc_counts[cp->cp_type] - 1))
+ return (ENODEV);
+
+ /*
+ * Calculate the destination element.
+ */
+ dst = softc->sc_firsts[cp->cp_type] + cp->cp_unit;
+
+ ccb = cam_periph_getccb(periph, /*priority*/ 1);
+
+ scsi_position_to_element(&ccb->csio,
+ /* retries */ 1,
+ /* cbfcnp */ chdone,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* tea */ softc->sc_picker,
+ /* dst */ dst,
+ /* invert */ (cp->cp_flags & CP_INVERT) ?
+ TRUE : FALSE,
+ /* sense_len */ SSD_FULL_SIZE,
+ /* timeout */ CH_TIMEOUT_POSITION_TO_ELEMENT);
+
+ error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ 0,
+ /*sense_flags*/ SF_RETRY_UA,
+ &softc->device_stats);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+}
+
+/*
+ * Copy a volume tag to a volume_tag struct, converting SCSI byte order
+ * to host native byte order in the volume serial number. The volume
+ * label as returned by the changer is transferred to user mode as
+ * nul-terminated string. Volume labels are truncated at the first
+ * space, as suggested by SCSI-2.
+ */
+static void
+copy_voltag(struct changer_voltag *uvoltag, struct volume_tag *voltag)
+{
+ int i;
+ for (i=0; i<CH_VOLTAG_MAXLEN; i++) {
+ char c = voltag->vif[i];
+ if (c && c != ' ')
+ uvoltag->cv_volid[i] = c;
+ else
+ break;
+ }
+ uvoltag->cv_serial = scsi_2btoul(voltag->vsn);
+}
+
+/*
+ * Copy an an element status descriptor to a user-mode
+ * changer_element_status structure.
+ */
+
+static void
+copy_element_status(struct ch_softc *softc,
+ u_int16_t flags,
+ struct read_element_status_descriptor *desc,
+ struct changer_element_status *ces)
+{
+ u_int16_t eaddr = scsi_2btoul(desc->eaddr);
+ u_int16_t et;
+
+ ces->ces_int_addr = eaddr;
+ /* set up logical address in element status */
+ for (et = CHET_MT; et <= CHET_DT; et++) {
+ if ((softc->sc_firsts[et] <= eaddr)
+ && ((softc->sc_firsts[et] + softc->sc_counts[et])
+ > eaddr)) {
+ ces->ces_addr = eaddr - softc->sc_firsts[et];
+ ces->ces_type = et;
+ break;
+ }
+ }
+
+ ces->ces_flags = desc->flags1;
+
+ ces->ces_sensecode = desc->sense_code;
+ ces->ces_sensequal = desc->sense_qual;
+
+ if (desc->flags2 & READ_ELEMENT_STATUS_INVERT)
+ ces->ces_flags |= CES_INVERT;
+
+ if (desc->flags2 & READ_ELEMENT_STATUS_SVALID) {
+
+ eaddr = scsi_2btoul(desc->ssea);
+
+ /* convert source address to logical format */
+ for (et = CHET_MT; et <= CHET_DT; et++) {
+ if ((softc->sc_firsts[et] <= eaddr)
+ && ((softc->sc_firsts[et] + softc->sc_counts[et])
+ > eaddr)) {
+ ces->ces_source_addr =
+ eaddr - softc->sc_firsts[et];
+ ces->ces_source_type = et;
+ ces->ces_flags |= CES_SOURCE_VALID;
+ break;
+ }
+ }
+
+ if (!(ces->ces_flags & CES_SOURCE_VALID))
+ printf("ch: warning: could not map element source "
+ "address %ud to a valid element type",
+ eaddr);
+ }
+
+
+ if (flags & READ_ELEMENT_STATUS_PVOLTAG)
+ copy_voltag(&(ces->ces_pvoltag), &(desc->pvoltag));
+ if (flags & READ_ELEMENT_STATUS_AVOLTAG)
+ copy_voltag(&(ces->ces_avoltag), &(desc->avoltag));
+
+ if (desc->dt_scsi_flags & READ_ELEMENT_STATUS_DT_IDVALID) {
+ ces->ces_flags |= CES_SCSIID_VALID;
+ ces->ces_scsi_id = desc->dt_scsi_addr;
+ }
+
+ if (desc->dt_scsi_addr & READ_ELEMENT_STATUS_DT_LUVALID) {
+ ces->ces_flags |= CES_LUN_VALID;
+ ces->ces_scsi_lun =
+ desc->dt_scsi_flags & READ_ELEMENT_STATUS_DT_LUNMASK;
+ }
+}
+
+static int
+chgetelemstatus(struct cam_periph *periph,
+ struct changer_element_status_request *cesr)
+{
+ struct read_element_status_header *st_hdr;
+ struct read_element_status_page_header *pg_hdr;
+ struct read_element_status_descriptor *desc;
+ caddr_t data = NULL;
+ size_t size, desclen;
+ int avail, i, error = 0;
+ struct changer_element_status *user_data = NULL;
+ struct ch_softc *softc;
+ union ccb *ccb;
+ int chet = cesr->cesr_element_type;
+ int want_voltags = (cesr->cesr_flags & CESR_VOLTAGS) ? 1 : 0;
+
+ softc = (struct ch_softc *)periph->softc;
+
+ /* perform argument checking */
+
+ /*
+ * Perform a range check on the cesr_element_{base,count}
+ * request argument fields.
+ */
+ if ((softc->sc_counts[chet] - cesr->cesr_element_base) <= 0
+ || (cesr->cesr_element_base + cesr->cesr_element_count)
+ > softc->sc_counts[chet])
+ return (EINVAL);
+
+ /*
+ * Request one descriptor for the given element type. This
+ * is used to determine the size of the descriptor so that
+ * we can allocate enough storage for all of them. We assume
+ * that the first one can fit into 1k.
+ */
+ data = (caddr_t)malloc(1024, M_DEVBUF, M_WAITOK);
+
+ ccb = cam_periph_getccb(periph, /*priority*/ 1);
+
+ scsi_read_element_status(&ccb->csio,
+ /* retries */ 1,
+ /* cbfcnp */ chdone,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* voltag */ want_voltags,
+ /* sea */ softc->sc_firsts[chet],
+ /* count */ 1,
+ /* data_ptr */ data,
+ /* dxfer_len */ 1024,
+ /* sense_len */ SSD_FULL_SIZE,
+ /* timeout */ CH_TIMEOUT_READ_ELEMENT_STATUS);
+
+ error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ 0,
+ /* sense_flags */ SF_RETRY_UA,
+ &softc->device_stats);
+
+ if (error)
+ goto done;
+
+ st_hdr = (struct read_element_status_header *)data;
+ pg_hdr = (struct read_element_status_page_header *)((u_long)st_hdr +
+ sizeof(struct read_element_status_header));
+ desclen = scsi_2btoul(pg_hdr->edl);
+
+ size = sizeof(struct read_element_status_header) +
+ sizeof(struct read_element_status_page_header) +
+ (desclen * cesr->cesr_element_count);
+
+ /*
+ * Reallocate storage for descriptors and get them from the
+ * device.
+ */
+ free(data, M_DEVBUF);
+ data = (caddr_t)malloc(size, M_DEVBUF, M_WAITOK);
+
+ scsi_read_element_status(&ccb->csio,
+ /* retries */ 1,
+ /* cbfcnp */ chdone,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* voltag */ want_voltags,
+ /* sea */ softc->sc_firsts[chet]
+ + cesr->cesr_element_base,
+ /* count */ cesr->cesr_element_count,
+ /* data_ptr */ data,
+ /* dxfer_len */ size,
+ /* sense_len */ SSD_FULL_SIZE,
+ /* timeout */ CH_TIMEOUT_READ_ELEMENT_STATUS);
+
+ error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ 0,
+ /* sense_flags */ SF_RETRY_UA,
+ &softc->device_stats);
+
+ if (error)
+ goto done;
+
+ /*
+ * Fill in the user status array.
+ */
+ st_hdr = (struct read_element_status_header *)data;
+ avail = scsi_2btoul(st_hdr->count);
+
+ if (avail != cesr->cesr_element_count) {
+ xpt_print_path(periph->path);
+ printf("warning, READ ELEMENT STATUS avail != count\n");
+ }
+
+ user_data = (struct changer_element_status *)
+ malloc(avail * sizeof(struct changer_element_status),
+ M_DEVBUF, M_WAITOK);
+ bzero(user_data, avail * sizeof(struct changer_element_status));
+
+ desc = (struct read_element_status_descriptor *)((u_long)data +
+ sizeof(struct read_element_status_header) +
+ sizeof(struct read_element_status_page_header));
+ /*
+ * Set up the individual element status structures
+ */
+ for (i = 0; i < avail; ++i) {
+ struct changer_element_status *ces = &(user_data[i]);
+
+ copy_element_status(softc, pg_hdr->flags, desc, ces);
+
+ (u_long)desc += desclen;
+ }
+
+ /* Copy element status structures out to userspace. */
+ error = copyout(user_data,
+ cesr->cesr_element_status,
+ avail * sizeof(struct changer_element_status));
+
+ done:
+ xpt_release_ccb(ccb);
+
+ if (data != NULL)
+ free(data, M_DEVBUF);
+ if (user_data != NULL)
+ free(user_data, M_DEVBUF);
+
+ return (error);
+}
+
+static int
+chielem(struct cam_periph *periph,
+ unsigned int timeout)
+{
+ union ccb *ccb;
+ struct ch_softc *softc;
+ int error;
+
+ if (!timeout) {
+ timeout = CH_TIMEOUT_INITIALIZE_ELEMENT_STATUS;
+ } else {
+ timeout *= 1000;
+ }
+
+ error = 0;
+ softc = (struct ch_softc *)periph->softc;
+
+ ccb = cam_periph_getccb(periph, /*priority*/ 1);
+
+ scsi_initialize_element_status(&ccb->csio,
+ /* retries */ 1,
+ /* cbfcnp */ chdone,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* sense_len */ SSD_FULL_SIZE,
+ /* timeout */ timeout);
+
+ error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ 0,
+ /* sense_flags */ SF_RETRY_UA,
+ &softc->device_stats);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+}
+
+static int
+chsetvoltag(struct cam_periph *periph,
+ struct changer_set_voltag_request *csvr)
+{
+ union ccb *ccb;
+ struct ch_softc *softc;
+ u_int16_t ea;
+ u_int8_t sac;
+ struct scsi_send_volume_tag_parameters ssvtp;
+ int error;
+ int i;
+
+ error = 0;
+ softc = (struct ch_softc *)periph->softc;
+
+ bzero(&ssvtp, sizeof(ssvtp));
+ for (i=0; i<sizeof(ssvtp.vitf); i++) {
+ ssvtp.vitf[i] = ' ';
+ }
+
+ /*
+ * Check arguments.
+ */
+ if (csvr->csvr_type > CHET_DT)
+ return EINVAL;
+ if (csvr->csvr_addr > (softc->sc_counts[csvr->csvr_type] - 1))
+ return ENODEV;
+
+ ea = softc->sc_firsts[csvr->csvr_type] + csvr->csvr_addr;
+
+ if (csvr->csvr_flags & CSVR_ALTERNATE) {
+ switch (csvr->csvr_flags & CSVR_MODE_MASK) {
+ case CSVR_MODE_SET:
+ sac = SEND_VOLUME_TAG_ASSERT_ALTERNATE;
+ break;
+ case CSVR_MODE_REPLACE:
+ sac = SEND_VOLUME_TAG_REPLACE_ALTERNATE;
+ break;
+ case CSVR_MODE_CLEAR:
+ sac = SEND_VOLUME_TAG_UNDEFINED_ALTERNATE;
+ break;
+ default:
+ error = EINVAL;
+ goto out;
+ }
+ } else {
+ switch (csvr->csvr_flags & CSVR_MODE_MASK) {
+ case CSVR_MODE_SET:
+ sac = SEND_VOLUME_TAG_ASSERT_PRIMARY;
+ break;
+ case CSVR_MODE_REPLACE:
+ sac = SEND_VOLUME_TAG_REPLACE_PRIMARY;
+ break;
+ case CSVR_MODE_CLEAR:
+ sac = SEND_VOLUME_TAG_UNDEFINED_PRIMARY;
+ break;
+ default:
+ error = EINVAL;
+ goto out;
+ }
+ }
+
+ memcpy(ssvtp.vitf, csvr->csvr_voltag.cv_volid,
+ min(strlen(csvr->csvr_voltag.cv_volid), sizeof(ssvtp.vitf)));
+ scsi_ulto2b(csvr->csvr_voltag.cv_serial, ssvtp.minvsn);
+
+ ccb = cam_periph_getccb(periph, /*priority*/ 1);
+
+ scsi_send_volume_tag(&ccb->csio,
+ /* retries */ 1,
+ /* cbfcnp */ chdone,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* element_address */ ea,
+ /* send_action_code */ sac,
+ /* parameters */ &ssvtp,
+ /* sense_len */ SSD_FULL_SIZE,
+ /* timeout */ CH_TIMEOUT_SEND_VOLTAG);
+
+ error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ 0,
+ /*sense_flags*/ SF_RETRY_UA,
+ &softc->device_stats);
+
+ xpt_release_ccb(ccb);
+
+ out:
+ return error;
+}
+
+static int
+chgetparams(struct cam_periph *periph)
+{
+ union ccb *ccb;
+ struct ch_softc *softc;
+ struct scsi_mode_sense_data *sense_data;
+ int error, from;
+ u_int8_t *moves, *exchanges;
+
+ error = 0;
+
+ softc = (struct ch_softc *)periph->softc;
+
+ ccb = cam_periph_getccb(periph, /*priority*/ 1);
+
+ sense_data = (struct scsi_mode_sense_data *)malloc(sizeof(*sense_data),
+ M_TEMP, M_NOWAIT);
+ if (sense_data == NULL) {
+ printf("chgetparams: couldn't malloc mode sense data\n");
+ return(ENOSPC);
+ }
+
+ bzero(sense_data, sizeof(*sense_data));
+
+ /*
+ * Get the element address assignment page.
+ */
+ scsi_mode_sense(&ccb->csio,
+ /* retries */ 1,
+ /* cbfcnp */ chdone,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* dbd */ TRUE,
+ /* page_code */ SMS_PAGE_CTRL_CURRENT,
+ /* page */ CH_ELEMENT_ADDR_ASSIGN_PAGE,
+ /* param_buf */ (u_int8_t *)sense_data,
+ /* param_len */ sizeof(*sense_data),
+ /* sense_len */ SSD_FULL_SIZE,
+ /* timeout */ CH_TIMEOUT_MODE_SENSE);
+
+ error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ 0,
+ /* sense_flags */ SF_RETRY_UA,
+ &softc->device_stats);
+
+ if (error) {
+ xpt_print_path(periph->path);
+ printf("chgetparams: error getting element address page\n");
+ xpt_release_ccb(ccb);
+ return(error);
+ }
+
+ softc->sc_firsts[CHET_MT] = scsi_2btoul(sense_data->pages.ea.mtea);
+ softc->sc_counts[CHET_MT] = scsi_2btoul(sense_data->pages.ea.nmte);
+ softc->sc_firsts[CHET_ST] = scsi_2btoul(sense_data->pages.ea.fsea);
+ softc->sc_counts[CHET_ST] = scsi_2btoul(sense_data->pages.ea.nse);
+ softc->sc_firsts[CHET_IE] = scsi_2btoul(sense_data->pages.ea.fieea);
+ softc->sc_counts[CHET_IE] = scsi_2btoul(sense_data->pages.ea.niee);
+ softc->sc_firsts[CHET_DT] = scsi_2btoul(sense_data->pages.ea.fdtea);
+ softc->sc_counts[CHET_DT] = scsi_2btoul(sense_data->pages.ea.ndte);
+
+ bzero(sense_data, sizeof(*sense_data));
+
+ /*
+ * Now get the device capabilities page.
+ */
+ scsi_mode_sense(&ccb->csio,
+ /* retries */ 1,
+ /* cbfcnp */ chdone,
+ /* tag_action */ MSG_SIMPLE_Q_TAG,
+ /* dbd */ TRUE,
+ /* page_code */ SMS_PAGE_CTRL_CURRENT,
+ /* page */ CH_DEVICE_CAP_PAGE,
+ /* param_buf */ (u_int8_t *)sense_data,
+ /* param_len */ sizeof(*sense_data),
+ /* sense_len */ SSD_FULL_SIZE,
+ /* timeout */ CH_TIMEOUT_MODE_SENSE);
+
+ error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ 0,
+ /* sense_flags */ SF_RETRY_UA,
+ &softc->device_stats);
+
+ xpt_release_ccb(ccb);
+
+ if (error) {
+ xpt_print_path(periph->path);
+ printf("chgetparams: error getting device capabilities page\n");
+ return(error);
+ }
+
+
+ bzero(softc->sc_movemask, sizeof(softc->sc_movemask));
+ bzero(softc->sc_exchangemask, sizeof(softc->sc_exchangemask));
+ moves = &sense_data->pages.cap.move_from_mt;
+ exchanges = &sense_data->pages.cap.exchange_with_mt;
+ for (from = CHET_MT; from <= CHET_DT; ++from) {
+ softc->sc_movemask[from] = moves[from];
+ softc->sc_exchangemask[from] = exchanges[from];
+ }
+
+ return(error);
+}
+
+void
+scsi_move_medium(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int32_t tea, u_int32_t src,
+ u_int32_t dst, int invert, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ struct scsi_move_medium *scsi_cmd;
+
+ scsi_cmd = (struct scsi_move_medium *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->opcode = MOVE_MEDIUM;
+
+ scsi_ulto2b(tea, scsi_cmd->tea);
+ scsi_ulto2b(src, scsi_cmd->src);
+ scsi_ulto2b(dst, scsi_cmd->dst);
+
+ if (invert)
+ scsi_cmd->invert |= MOVE_MEDIUM_INVERT;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/ NULL,
+ /*dxfer_len*/ 0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+void
+scsi_exchange_medium(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int32_t tea, u_int32_t src,
+ u_int32_t dst1, u_int32_t dst2, int invert1,
+ int invert2, u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_exchange_medium *scsi_cmd;
+
+ scsi_cmd = (struct scsi_exchange_medium *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->opcode = EXCHANGE_MEDIUM;
+
+ scsi_ulto2b(tea, scsi_cmd->tea);
+ scsi_ulto2b(src, scsi_cmd->src);
+ scsi_ulto2b(dst1, scsi_cmd->fdst);
+ scsi_ulto2b(dst2, scsi_cmd->sdst);
+
+ if (invert1)
+ scsi_cmd->invert |= EXCHANGE_MEDIUM_INV1;
+
+ if (invert2)
+ scsi_cmd->invert |= EXCHANGE_MEDIUM_INV2;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/ NULL,
+ /*dxfer_len*/ 0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+void
+scsi_position_to_element(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int32_t tea, u_int32_t dst,
+ int invert, u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_position_to_element *scsi_cmd;
+
+ scsi_cmd = (struct scsi_position_to_element *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->opcode = POSITION_TO_ELEMENT;
+
+ scsi_ulto2b(tea, scsi_cmd->tea);
+ scsi_ulto2b(dst, scsi_cmd->dst);
+
+ if (invert)
+ scsi_cmd->invert |= POSITION_TO_ELEMENT_INVERT;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/ NULL,
+ /*dxfer_len*/ 0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+void
+scsi_read_element_status(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int voltag, u_int32_t sea,
+ u_int32_t count, u_int8_t *data_ptr,
+ u_int32_t dxfer_len, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ struct scsi_read_element_status *scsi_cmd;
+
+ scsi_cmd = (struct scsi_read_element_status *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->opcode = READ_ELEMENT_STATUS;
+
+ scsi_ulto2b(sea, scsi_cmd->sea);
+ scsi_ulto2b(count, scsi_cmd->count);
+ scsi_ulto3b(dxfer_len, scsi_cmd->len);
+
+ if (voltag)
+ scsi_cmd->byte2 |= READ_ELEMENT_STATUS_VOLTAG;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ CAM_DIR_IN,
+ tag_action,
+ data_ptr,
+ dxfer_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+void
+scsi_initialize_element_status(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ struct scsi_initialize_element_status *scsi_cmd;
+
+ scsi_cmd = (struct scsi_initialize_element_status *)
+ &csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->opcode = INITIALIZE_ELEMENT_STATUS;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ CAM_DIR_NONE,
+ tag_action,
+ /* data_ptr */ NULL,
+ /* dxfer_len */ 0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+void
+scsi_send_volume_tag(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action,
+ u_int16_t element_address,
+ u_int8_t send_action_code,
+ struct scsi_send_volume_tag_parameters *parameters,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_send_volume_tag *scsi_cmd;
+
+ scsi_cmd = (struct scsi_send_volume_tag *) &csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->opcode = SEND_VOLUME_TAG;
+ scsi_ulto2b(element_address, scsi_cmd->ea);
+ scsi_cmd->sac = send_action_code;
+ scsi_ulto2b(sizeof(*parameters), scsi_cmd->pll);
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ CAM_DIR_OUT,
+ tag_action,
+ /* data_ptr */ (u_int8_t *) parameters,
+ sizeof(*parameters),
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
diff --git a/sys/cam/scsi/scsi_ch.h b/sys/cam/scsi/scsi_ch.h
new file mode 100644
index 0000000..97c6c69
--- /dev/null
+++ b/sys/cam/scsi/scsi_ch.h
@@ -0,0 +1,486 @@
+/* $NetBSD: scsi_changer.h,v 1.11 1998/02/13 08:28:32 enami Exp $ */
+
+/*
+ * Copyright (c) 1996 Jason R. Thorpe <thorpej@and.com>
+ * All rights reserved.
+ *
+ * Partially based on an autochanger driver written by Stefan Grefen
+ * and on an autochanger driver written by the Systems Programming Group
+ * at the University of Utah Computer Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgements:
+ * This product includes software developed by Jason R. Thorpe
+ * for And Communications, http://www.and.com/
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * SCSI changer interface description
+ */
+
+/*
+ * Partially derived from software written by Stefan Grefen
+ * (grefen@goofy.zdv.uni-mainz.de soon grefen@convex.com)
+ * based on the SCSI System by written Julian Elischer (julian@tfs.com)
+ * for TRW Financial Systems.
+ *
+ * TRW Financial Systems, in accordance with their agreement with Carnegie
+ * Mellon University, makes this software available to CMU to distribute
+ * or use in any manner that they see fit as long as this message is kept with
+ * the software. For this reason TFS also grants any other persons or
+ * organisations permission to use or modify this software.
+ *
+ * TFS supplies this software to be publicly redistributed
+ * on the understanding that TFS is not responsible for the correct
+ * functioning of this software in any circumstances.
+ *
+ * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992
+ */
+
+#ifndef _SCSI_SCSI_CH_H
+#define _SCSI_SCSI_CH_H 1
+
+#include <sys/cdefs.h>
+
+/*
+ * SCSI command format
+ */
+
+/*
+ * Exchange the medium in the source element with the medium
+ * located at the destination element.
+ */
+struct scsi_exchange_medium {
+ u_int8_t opcode;
+#define EXCHANGE_MEDIUM 0xa6
+ u_int8_t byte2;
+ u_int8_t tea[2]; /* transport element address */
+ u_int8_t src[2]; /* source address */
+ u_int8_t fdst[2]; /* first destination address */
+ u_int8_t sdst[2]; /* second destination address */
+ u_int8_t invert;
+#define EXCHANGE_MEDIUM_INV1 0x01
+#define EXCHANGE_MEDIUM_INV2 0x02
+ u_int8_t control;
+};
+
+/*
+ * Cause the medium changer to check all elements for medium and any
+ * other status relevant to the element.
+ */
+struct scsi_initialize_element_status {
+ u_int8_t opcode;
+#define INITIALIZE_ELEMENT_STATUS 0x07
+ u_int8_t byte2;
+ u_int8_t reserved[3];
+ u_int8_t control;
+};
+
+/*
+ * Request the changer to move a unit of media from the source element
+ * to the destination element.
+ */
+struct scsi_move_medium {
+ u_int8_t opcode;
+#define MOVE_MEDIUM 0xa5
+ u_int8_t byte2;
+ u_int8_t tea[2]; /* transport element address */
+ u_int8_t src[2]; /* source element address */
+ u_int8_t dst[2]; /* destination element address */
+ u_int8_t reserved[2];
+ u_int8_t invert;
+#define MOVE_MEDIUM_INVERT 0x01
+ u_int8_t control;
+};
+
+/*
+ * Position the specified transport element (picker) in front of
+ * the destination element specified.
+ */
+struct scsi_position_to_element {
+ u_int8_t opcode;
+#define POSITION_TO_ELEMENT 0x2b
+ u_int8_t byte2;
+ u_int8_t tea[2]; /* transport element address */
+ u_int8_t dst[2]; /* destination element address */
+ u_int8_t reserved[2];
+ u_int8_t invert;
+#define POSITION_TO_ELEMENT_INVERT 0x01
+ u_int8_t control;
+};
+
+/*
+ * Request that the changer report the status of its internal elements.
+ */
+struct scsi_read_element_status {
+ u_int8_t opcode;
+#define READ_ELEMENT_STATUS 0xb8
+ u_int8_t byte2;
+#define READ_ELEMENT_STATUS_VOLTAG 0x10 /* report volume tag info */
+ /* ...next 4 bits are an element type code... */
+ u_int8_t sea[2]; /* starting element address */
+ u_int8_t count[2]; /* number of elements */
+ u_int8_t reserved0;
+ u_int8_t len[3]; /* length of data buffer */
+ u_int8_t reserved1;
+ u_int8_t control;
+};
+
+struct scsi_request_volume_element_address {
+ u_int8_t opcode;
+#define REQUEST_VOLUME_ELEMENT_ADDRESS 0xb5
+ u_int8_t byte2;
+#define REQUEST_VOLUME_ELEMENT_ADDRESS_VOLTAG 0x10
+ /* ...next 4 bits are an element type code... */
+ u_int8_t eaddr[2]; /* element address */
+ u_int8_t count[2]; /* number of elements */
+ u_int8_t reserved0;
+ u_int8_t len[3]; /* length of data buffer */
+ u_int8_t reserved1;
+ u_int8_t control;
+};
+
+/* XXX scsi_release */
+
+/*
+ * Changer-specific mode page numbers.
+ */
+#define CH_ELEMENT_ADDR_ASSIGN_PAGE 0x1D
+#define CH_TRANS_GEOM_PARAMS_PAGE 0x1E
+#define CH_DEVICE_CAP_PAGE 0x1F
+
+/*
+ * Data returned by READ ELEMENT STATUS consists of an 8-byte header
+ * followed by one or more read_element_status_pages.
+ */
+struct read_element_status_header {
+ u_int8_t fear[2]; /* first element address reported */
+ u_int8_t count[2]; /* number of elements available */
+ u_int8_t reserved;
+ u_int8_t nbytes[3]; /* byte count of all pages */
+};
+
+struct read_element_status_page_header {
+ u_int8_t type; /* element type code; see type codes below */
+ u_int8_t flags;
+#define READ_ELEMENT_STATUS_AVOLTAG 0x40
+#define READ_ELEMENT_STATUS_PVOLTAG 0x80
+ u_int8_t edl[2]; /* element descriptor length */
+ u_int8_t reserved;
+ u_int8_t nbytes[3]; /* byte count of all descriptors */
+};
+
+/*
+ * Format of a volume tag
+ */
+
+struct volume_tag {
+ u_int8_t vif[32]; /* volume identification field */
+ u_int8_t reserved[2];
+ u_int8_t vsn[2]; /* volume sequence number */
+};
+
+struct read_element_status_descriptor {
+ u_int8_t eaddr[2]; /* element address */
+ u_int8_t flags1;
+
+#define READ_ELEMENT_STATUS_FULL 0x01
+#define READ_ELEMENT_STATUS_IMPEXP 0x02
+#define READ_ELEMENT_STATUS_EXCEPT 0x04
+#define READ_ELEMENT_STATUS_ACCESS 0x08
+#define READ_ELEMENT_STATUS_EXENAB 0x10
+#define READ_ELEMENT_STATUS_INENAB 0x20
+
+#define READ_ELEMENT_STATUS_MT_MASK1 0x05
+#define READ_ELEMENT_STATUS_ST_MASK1 0x0c
+#define READ_ELEMENT_STATUS_IE_MASK1 0x3f
+#define READ_ELEMENT_STATUS_DT_MASK1 0x0c
+
+ u_int8_t reserved0;
+ u_int8_t sense_code;
+ u_int8_t sense_qual;
+
+ /*
+ * dt_scsi_flags and dt_scsi_addr are valid only on data transport
+ * elements. These bytes are undefined for all other element types.
+ */
+ u_int8_t dt_scsi_flags;
+
+#define READ_ELEMENT_STATUS_DT_LUNMASK 0x07
+#define READ_ELEMENT_STATUS_DT_LUVALID 0x10
+#define READ_ELEMENT_STATUS_DT_IDVALID 0x20
+#define READ_ELEMENT_STATUS_DT_NOTBUS 0x80
+
+ u_int8_t dt_scsi_addr;
+
+ u_int8_t reserved1;
+
+ u_int8_t flags2;
+#define READ_ELEMENT_STATUS_INVERT 0x40
+#define READ_ELEMENT_STATUS_SVALID 0x80
+ u_int8_t ssea[2]; /* source storage element address */
+
+ struct volume_tag pvoltag; /* omitted if PVOLTAG == 0 */
+ struct volume_tag avoltag; /* omitted if AVOLTAG == 0 */
+
+ /* Other data may follow */
+};
+
+/* XXX add data returned by REQUEST VOLUME ELEMENT ADDRESS */
+
+/* Element type codes */
+#define ELEMENT_TYPE_MASK 0x0f /* Note: these aren't bits */
+#define ELEMENT_TYPE_ALL 0x00
+#define ELEMENT_TYPE_MT 0x01
+#define ELEMENT_TYPE_ST 0x02
+#define ELEMENT_TYPE_IE 0x03
+#define ELEMENT_TYPE_DT 0x04
+
+/*
+ * XXX The following definitions should be common to all SCSI device types.
+ */
+#define PGCODE_MASK 0x3f /* valid page number bits in pg_code */
+#define PGCODE_PS 0x80 /* indicates page is savable */
+
+/*
+ * Send volume tag information to the changer
+ */
+
+struct scsi_send_volume_tag {
+ u_int8_t opcode;
+#define SEND_VOLUME_TAG 0xb6
+ u_int8_t byte2;
+ u_int8_t ea[2]; /* element address */
+ u_int8_t reserved2;
+ u_int8_t sac; /* send action code */
+
+#define SEND_VOLUME_TAG_ASSERT_PRIMARY 0x08
+#define SEND_VOLUME_TAG_ASSERT_ALTERNATE 0x09
+#define SEND_VOLUME_TAG_REPLACE_PRIMARY 0x0a
+#define SEND_VOLUME_TAG_REPLACE_ALTERNATE 0x0b
+#define SEND_VOLUME_TAG_UNDEFINED_PRIMARY 0x0c
+#define SEND_VOLUME_TAG_UNDEFINED_ALTERNATE 0x0d
+
+ u_int8_t reserved4[2];
+ u_int8_t pll[2]; /* parameter list length */
+ u_int8_t reserved5;
+ u_int8_t control;
+};
+
+/*
+ * Parameter format for SEND VOLUME TAG
+ */
+
+struct scsi_send_volume_tag_parameters {
+ u_int8_t vitf[32]; /* volume tag identification template */
+ u_int8_t reserved1[2];
+ u_int8_t minvsn[2]; /* minimum volume sequence number */
+ u_int8_t reserved2[2];
+ u_int8_t maxvsn[2]; /* maximum volume sequence number */
+};
+
+/*
+ * Device capabilities page.
+ *
+ * This page defines characteristics of the elemenet types in the
+ * medium changer device.
+ *
+ * Note in the definitions below, the following abbreviations are
+ * used:
+ * MT Medium transport element (picker)
+ * ST Storage transport element (slot)
+ * IE Import/export element (portal)
+ * DT Data tranfer element (tape/disk drive)
+ */
+struct page_device_capabilities {
+ u_int8_t pg_code; /* page code (0x1f) */
+ u_int8_t pg_length; /* page length (0x12) */
+
+ /*
+ * The STOR_xx bits indicate that an element of a given
+ * type may provide independent storage for a unit of
+ * media. The top four bits of this value are reserved.
+ */
+ u_int8_t stor;
+#define STOR_MT 0x01
+#define STOR_ST 0x02
+#define STOR_IE 0x04
+#define STOR_DT 0x08
+
+ u_int8_t reserved0;
+
+ /*
+ * The MOVE_TO_yy bits indicate the changer supports
+ * moving a unit of medium from an element of a given type to an
+ * element of type yy. This is used to determine if a given
+ * MOVE MEDIUM command is legal. The top four bits of each
+ * of these values are reserved.
+ */
+ u_int8_t move_from_mt;
+ u_int8_t move_from_st;
+ u_int8_t move_from_ie;
+ u_int8_t move_from_dt;
+#define MOVE_TO_MT 0x01
+#define MOVE_TO_ST 0x02
+#define MOVE_TO_IE 0x04
+#define MOVE_TO_DT 0x08
+
+ u_int8_t reserved1[2];
+
+ /*
+ * Similar to above, but for EXCHANGE MEDIUM.
+ */
+ u_int8_t exchange_with_mt;
+ u_int8_t exchange_with_st;
+ u_int8_t exchange_with_ie;
+ u_int8_t exchange_with_dt;
+#define EXCHANGE_WITH_MT 0x01
+#define EXCHANGE_WITH_ST 0x02
+#define EXCHANGE_WITH_IE 0x04
+#define EXCHANGE_WITH_DT 0x08
+};
+
+/*
+ * Medium changer elemement address assignment page.
+ *
+ * Some of these fields can be a little confusing, so an explanation
+ * is in order.
+ *
+ * Each component within a a medium changer apparatus is called an
+ * "element".
+ *
+ * The "medium transport element address" is the address of the first
+ * picker (robotic arm). "Number of medium transport elements" tells
+ * us how many pickers exist in the changer.
+ *
+ * The "first storage element address" is the address of the first
+ * slot in the tape or disk magazine. "Number of storage elements" tells
+ * us how many slots exist in the changer.
+ *
+ * The "first import/export element address" is the address of the first
+ * medium portal accessible both by the medium changer and an outside
+ * human operator. This is where the changer might deposit tapes destined
+ * for some vault. The "number of import/export elements" tells us
+ * not many of these portals exist in the changer. NOTE: this number may
+ * be 0.
+ *
+ * The "first data transfer element address" is the address of the first
+ * tape or disk drive in the changer. "Number of data transfer elements"
+ * tells us how many drives exist in the changer.
+ */
+struct page_element_address_assignment {
+ u_int8_t pg_code; /* page code (0x1d) */
+ u_int8_t pg_length; /* page length (0x12) */
+
+ /* Medium transport element address */
+ u_int8_t mtea[2];
+
+ /* Number of medium transport elements */
+ u_int8_t nmte[2];
+
+ /* First storage element address */
+ u_int8_t fsea[2];
+
+ /* Number of storage elements */
+ u_int8_t nse[2];
+
+ /* First import/export element address */
+ u_int8_t fieea[2];
+
+ /* Number of import/export elements */
+ u_int8_t niee[2];
+
+ /* First data transfer element address */
+ u_int8_t fdtea[2];
+
+ /* Number of data trafer elements */
+ u_int8_t ndte[2];
+
+ u_int8_t reserved[2];
+};
+
+/*
+ * Transport geometry parameters page.
+ *
+ * Defines whether each medium transport element is a member of a set of
+ * elements that share a common robotics subsystem and whether the element
+ * is capable of media rotation. One transport geometry descriptor is
+ * transferred for each medium transport element, beginning with the first
+ * medium transport element (other than the default transport element address
+ * of 0).
+ */
+struct page_transport_geometry_parameters {
+ u_int8_t pg_code; /* page code (0x1e) */
+ u_int8_t pg_length; /* page length; variable */
+
+ /* Transport geometry descriptor(s) are here. */
+
+ u_int8_t misc;
+#define CAN_ROTATE 0x01
+
+ /* Member number in transport element set. */
+ u_int8_t member;
+};
+
+__BEGIN_DECLS
+void scsi_move_medium(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int32_t tea, u_int32_t src,
+ u_int32_t dst, int invert, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_exchange_medium(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int32_t tea, u_int32_t src,
+ u_int32_t dst1, u_int32_t dst2, int invert1,
+ int invert2, u_int8_t sense_len, u_int32_t timeout);
+
+void scsi_position_to_element(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int32_t tea, u_int32_t dst,
+ int invert, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_read_element_status(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int voltag, u_int32_t sea,
+ u_int32_t count, u_int8_t *data_ptr,
+ u_int32_t dxfer_len, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_initialize_element_status(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t sense_len,
+ u_int32_t timeout);
+void scsi_send_volume_tag(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action,
+ u_int16_t element_address,
+ u_int8_t send_action_code,
+ struct scsi_send_volume_tag_parameters *parameters,
+ u_int8_t sense_len, u_int32_t timeout);
+__END_DECLS
+
+#endif /* _SCSI_SCSI_CH_H */
diff --git a/sys/cam/scsi/scsi_da.c b/sys/cam/scsi/scsi_da.c
new file mode 100644
index 0000000..b8ae94c
--- /dev/null
+++ b/sys/cam/scsi/scsi_da.c
@@ -0,0 +1,1520 @@
+/*
+ * Implementation of SCSI Direct Access Peripheral driver for CAM.
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#ifdef KERNEL
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#endif
+#include <sys/types.h>
+#include <sys/buf.h>
+#include <sys/devicestat.h>
+#include <sys/dkbad.h>
+#include <sys/disklabel.h>
+#include <sys/diskslice.h>
+#include <sys/malloc.h>
+#include <sys/conf.h>
+
+#ifdef KERNEL
+#include <machine/cons.h> /* For cncheckc */
+#include <machine/md_var.h> /* For Maxmem */
+
+#include <vm/vm.h>
+#include <vm/vm_prot.h>
+#include <vm/pmap.h>
+#endif
+
+#ifndef KERNEL
+#include <stdio.h>
+#include <string.h>
+#endif
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_extend.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/cam_debug.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+#include <cam/scsi/scsi_da.h>
+
+#ifdef KERNEL
+
+typedef enum {
+ DA_STATE_PROBE,
+ DA_STATE_NORMAL
+} da_state;
+
+typedef enum {
+ DA_FLAG_PACK_INVALID = 0x001,
+ DA_FLAG_NEW_PACK = 0x002,
+ DA_FLAG_PACK_LOCKED = 0x004,
+ DA_FLAG_PACK_REMOVABLE = 0x008,
+ DA_FLAG_TAGGED_QUEUING = 0x010,
+ DA_FLAG_NEED_OTAG = 0x020,
+ DA_FLAG_WENT_IDLE = 0x040,
+ DA_FLAG_RETRY_UA = 0x080,
+ DA_FLAG_OPEN = 0x100
+} da_flags;
+
+typedef enum {
+ DA_CCB_PROBE = 0x01,
+ DA_CCB_BUFFER_IO = 0x02,
+ DA_CCB_WAITING = 0x03,
+ DA_CCB_DUMP = 0x04,
+ DA_CCB_TYPE_MASK = 0x0F,
+ DA_CCB_RETRY_UA = 0x10
+} da_ccb_state;
+
+/* Offsets into our private area for storing information */
+#define ccb_state ppriv_field0
+#define ccb_bp ppriv_ptr1
+
+struct disk_params {
+ u_int8_t heads;
+ u_int16_t cylinders;
+ u_int8_t secs_per_track;
+ u_int32_t secsize; /* Number of bytes/sector */
+ u_int32_t sectors; /* total number sectors */
+};
+
+struct da_softc {
+ struct buf_queue_head buf_queue;
+ struct devstat device_stats;
+ SLIST_ENTRY(da_softc) links;
+ LIST_HEAD(, ccb_hdr) pending_ccbs;
+ da_state state;
+ da_flags flags;
+ int ordered_tag_count;
+ struct disk_params params;
+ struct diskslices *dk_slices; /* virtual drives */
+ union ccb saved_ccb;
+#ifdef DEVFS
+ void *b_devfs_token;
+ void *c_devfs_token;
+ void *ctl_devfs_token;
+#endif
+};
+
+static d_open_t daopen;
+static d_read_t daread;
+static d_write_t dawrite;
+static d_close_t daclose;
+static d_strategy_t dastrategy;
+static d_ioctl_t daioctl;
+static d_dump_t dadump;
+static d_psize_t dasize;
+static periph_init_t dainit;
+static void daasync(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg);
+static periph_ctor_t daregister;
+static periph_dtor_t dacleanup;
+static periph_start_t dastart;
+static void dadone(struct cam_periph *periph,
+ union ccb *done_ccb);
+static int daerror(union ccb *ccb, u_int32_t cam_flags,
+ u_int32_t sense_flags);
+static void daprevent(struct cam_periph *periph, int action);
+static void dasetgeom(struct cam_periph *periph,
+ struct scsi_read_capacity_data * rdcap);
+static timeout_t dasendorderedtag;
+
+#ifndef DA_DEFAULT_TIMEOUT
+#define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */
+#endif
+
+/*
+ * DA_ORDEREDTAG_INTERVAL determines how often, relative
+ * to the default timeout, we check to see whether an ordered
+ * tagged transaction is appropriate to prevent simple tag
+ * starvation. Since we'd like to ensure that there is at least
+ * 1/2 of the timeout length left for a starved transaction to
+ * complete after we've sent an ordered tag, we must poll at least
+ * four times in every timeout period. This takes care of the worst
+ * case where a starved transaction starts during an interval that
+ * meets the requirement "don't send an ordered tag" test so it takes
+ * us two intervals to determine that a tag must be sent.
+ */
+#ifndef DA_ORDEREDTAG_INTERVAL
+#define DA_ORDEREDTAG_INTERVAL 4
+#endif
+
+static struct periph_driver dadriver =
+{
+ dainit, "da",
+ TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
+};
+
+DATA_SET(periphdriver_set, dadriver);
+
+#define DA_CDEV_MAJOR 13
+#define DA_BDEV_MAJOR 4
+
+/* For 2.2-stable support */
+#ifndef D_DISK
+#define D_DISK 0
+#endif
+
+static struct cdevsw da_cdevsw =
+{
+ /*d_open*/ daopen,
+ /*d_close*/ daclose,
+ /*d_read*/ daread,
+ /*d_write*/ dawrite,
+ /*d_ioctl*/ daioctl,
+ /*d_stop*/ nostop,
+ /*d_reset*/ noreset,
+ /*d_devtotty*/ nodevtotty,
+ /*d_poll*/ seltrue,
+ /*d_mmap*/ nommap,
+ /*d_strategy*/ dastrategy,
+ /*d_name*/ "da",
+ /*d_spare*/ NULL,
+ /*d_maj*/ -1,
+ /*d_dump*/ dadump,
+ /*d_psize*/ dasize,
+ /*d_flags*/ D_DISK,
+ /*d_maxio*/ 0,
+ /*b_maj*/ -1
+};
+
+static SLIST_HEAD(,da_softc) softc_list;
+static struct extend_array *daperiphs;
+
+static int
+daopen(dev_t dev, int flags, int fmt, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct da_softc *softc;
+ struct disklabel label;
+ int unit;
+ int part;
+ int error;
+
+ unit = dkunit(dev);
+ part = dkpart(dev);
+ periph = cam_extend_get(daperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct da_softc *)periph->softc;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
+ ("daopen: dev=0x%x (unit %d , partition %d)\n", dev,
+ unit, part));
+
+ if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
+ return (error); /* error code from tsleep */
+ }
+
+ if ((softc->flags & DA_FLAG_OPEN) == 0) {
+ if (cam_periph_acquire(periph) != CAM_REQ_CMP)
+ return(ENXIO);
+ softc->flags |= DA_FLAG_OPEN;
+ }
+
+ if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
+
+ if (softc->dk_slices != NULL) {
+ /*
+ * If any partition is open, but the disk has
+ * been invalidated, disallow further opens.
+ */
+ if (dsisopen(softc->dk_slices)) {
+ cam_periph_unlock(periph);
+ return(ENXIO);
+ }
+
+ /* Invalidate our pack information */
+ dsgone(&softc->dk_slices);
+ }
+ softc->flags &= ~DA_FLAG_PACK_INVALID;
+ }
+
+ /* Do a read capacity */
+ {
+ struct scsi_read_capacity_data *rcap;
+ union ccb *ccb;
+
+ rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
+ M_TEMP,
+ M_WAITOK);
+
+ ccb = cam_periph_getccb(periph, /*priority*/1);
+ scsi_read_capacity(&ccb->csio,
+ /*retries*/1,
+ /*cbfncp*/dadone,
+ MSG_SIMPLE_Q_TAG,
+ rcap,
+ SSD_FULL_SIZE,
+ /*timeout*/20000);
+ ccb->ccb_h.ccb_bp = NULL;
+
+ error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA,
+ &softc->device_stats);
+
+ xpt_release_ccb(ccb);
+
+ if (error == 0) {
+ dasetgeom(periph, rcap);
+ }
+
+ free(rcap, M_TEMP);
+ }
+
+ if (error == 0) {
+ /* Build label for whole disk. */
+ bzero(&label, sizeof(label));
+ label.d_type = DTYPE_SCSI;
+ label.d_secsize = softc->params.secsize;
+ label.d_nsectors = softc->params.secs_per_track;
+ label.d_ntracks = softc->params.heads;
+ label.d_ncylinders = softc->params.cylinders;
+ label.d_secpercyl = softc->params.heads
+ * softc->params.secs_per_track;
+ label.d_secperunit = softc->params.sectors;
+
+ if ((dsisopen(softc->dk_slices) == 0)
+ && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
+ daprevent(periph, PR_PREVENT);
+ }
+
+ /* Initialize slice tables. */
+ error = dsopen("da", dev, fmt, 0, &softc->dk_slices, &label,
+ dastrategy, (ds_setgeom_t *)NULL,
+ &da_cdevsw);
+
+ /*
+ * Check to see whether or not the blocksize is set yet.
+ * If it isn't, set it and then clear the blocksize
+ * unavailable flag for the device statistics.
+ */
+ if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
+ softc->device_stats.block_size = softc->params.secsize;
+ softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
+ }
+ }
+
+ if (error != 0) {
+ if ((dsisopen(softc->dk_slices) == 0)
+ && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
+ daprevent(periph, PR_ALLOW);
+ }
+ }
+ cam_periph_unlock(periph);
+ return (error);
+}
+
+static int
+daclose(dev_t dev, int flag, int fmt, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct da_softc *softc;
+ union ccb *ccb;
+ int unit;
+ int error;
+
+ unit = dkunit(dev);
+ periph = cam_extend_get(daperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct da_softc *)periph->softc;
+
+ if ((error = cam_periph_lock(periph, PRIBIO)) != 0) {
+ return (error); /* error code from tsleep */
+ }
+
+ dsclose(dev, fmt, softc->dk_slices);
+
+ ccb = cam_periph_getccb(periph, /*priority*/1);
+
+ scsi_synchronize_cache(&ccb->csio,
+ /*retries*/1,
+ /*cbfcnp*/dadone,
+ MSG_SIMPLE_Q_TAG,
+ /*begin_lba*/0, /* Cover the whole disk */
+ /*lb_count*/0,
+ SSD_FULL_SIZE,
+ 5 * 60 * 1000);
+
+ /* Ignore any errors */
+ cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
+ /*sense_flags*/0, &softc->device_stats);
+
+ xpt_release_ccb(ccb);
+
+ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
+ daprevent(periph, PR_ALLOW);
+ /*
+ * If we've got removeable media, mark the blocksize as
+ * unavailable, since it could change when new media is
+ * inserted.
+ */
+ softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
+ }
+
+ softc->flags &= ~DA_FLAG_OPEN;
+ cam_periph_unlock(periph);
+ cam_periph_release(periph);
+ return (0);
+}
+
+static int
+daread(dev_t dev, struct uio *uio, int ioflag)
+{
+ return(physio(dastrategy, NULL, dev, 1, minphys, uio));
+}
+
+static int
+dawrite(dev_t dev, struct uio *uio, int ioflag)
+{
+ return(physio(dastrategy, NULL, dev, 0, minphys, uio));
+}
+
+/*
+ * Actually translate the requested transfer into one the physical driver
+ * can understand. The transfer is described by a buf and will include
+ * only one physical transfer.
+ */
+static void
+dastrategy(struct buf *bp)
+{
+ struct cam_periph *periph;
+ struct da_softc *softc;
+ u_int unit;
+ u_int part;
+ int s;
+
+ unit = dkunit(bp->b_dev);
+ part = dkpart(bp->b_dev);
+ periph = cam_extend_get(daperiphs, unit);
+ if (periph == NULL) {
+ bp->b_error = ENXIO;
+ goto bad;
+ }
+ softc = (struct da_softc *)periph->softc;
+#if 0
+ /*
+ * check it's not too big a transfer for our adapter
+ */
+ scsi_minphys(bp,&sd_switch);
+#endif
+
+ /*
+ * Do bounds checking, adjust transfer, set b_cylin and b_pbklno.
+ */
+ if (dscheck(bp, softc->dk_slices) <= 0)
+ goto done;
+
+ /*
+ * Mask interrupts so that the pack cannot be invalidated until
+ * after we are in the queue. Otherwise, we might not properly
+ * clean up one of the buffers.
+ */
+ s = splbio();
+
+ /*
+ * If the device has been made invalid, error out
+ */
+ if ((softc->flags & DA_FLAG_PACK_INVALID)) {
+ splx(s);
+ bp->b_error = ENXIO;
+ goto bad;
+ }
+
+ /*
+ * Place it in the queue of disk activities for this disk
+ */
+ bufqdisksort(&softc->buf_queue, bp);
+
+ splx(s);
+
+ /*
+ * Schedule ourselves for performing the work.
+ */
+ xpt_schedule(periph, /* XXX priority */1);
+
+ return;
+bad:
+ bp->b_flags |= B_ERROR;
+done:
+
+ /*
+ * Correctly set the buf to indicate a completed xfer
+ */
+ bp->b_resid = bp->b_bcount;
+ biodone(bp);
+ return;
+}
+
+/* For 2.2-stable support */
+#ifndef ENOIOCTL
+#define ENOIOCTL -1
+#endif
+
+static int
+daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct da_softc *softc;
+ int unit;
+ int error;
+
+ unit = dkunit(dev);
+ periph = cam_extend_get(daperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct da_softc *)periph->softc;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n"));
+
+ if (cmd == DIOCSBAD)
+ return (EINVAL); /* XXX */
+
+ if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
+ return (error); /* error code from tsleep */
+ }
+
+ error = dsioctl("da", dev, cmd, addr, flag, &softc->dk_slices,
+ dastrategy, (ds_setgeom_t *)NULL);
+
+ if (error == ENOIOCTL)
+ error = cam_periph_ioctl(periph, cmd, addr, daerror);
+
+ cam_periph_unlock(periph);
+
+ return (error);
+}
+
+static int
+dadump(dev_t dev)
+{
+ struct cam_periph *periph;
+ struct da_softc *softc;
+ struct disklabel *lp;
+ u_int unit;
+ u_int part;
+ long num; /* number of sectors to write */
+ long blkoff;
+ long blknum;
+ long blkcnt;
+ char *addr;
+ static int dadoingadump = 0;
+ struct ccb_scsiio csio;
+
+ /* toss any characters present prior to dump */
+ while (cncheckc() != -1)
+ ;
+
+ unit = dkunit(dev);
+ part = dkpart(dev);
+ periph = cam_extend_get(daperiphs, unit);
+ if (periph == NULL) {
+ return (ENXIO);
+ }
+ softc = (struct da_softc *)periph->softc;
+
+ if ((softc->flags & DA_FLAG_PACK_INVALID) != 0
+ || (softc->dk_slices == NULL)
+ || (lp = dsgetlabel(dev, softc->dk_slices)) == NULL)
+ return (ENXIO);
+
+ /* Size of memory to dump, in disk sectors. */
+ /* XXX Fix up for non DEV_BSIZE sectors!!! */
+ num = (u_long)Maxmem * PAGE_SIZE / softc->params.secsize;
+
+ blkoff = lp->d_partitions[part].p_offset;
+ blkoff += softc->dk_slices->dss_slices[dkslice(dev)].ds_offset;
+
+ /* check transfer bounds against partition size */
+ if ((dumplo < 0) || ((dumplo + num) > lp->d_partitions[part].p_size))
+ return (EINVAL);
+
+ if (dadoingadump != 0)
+ return (EFAULT);
+
+ dadoingadump = 1;
+
+ blknum = dumplo + blkoff;
+ blkcnt = PAGE_SIZE / softc->params.secsize;
+
+ addr = (char *)0; /* starting address */
+
+ while (num > 0) {
+
+ if (is_physical_memory((vm_offset_t)addr)) {
+ pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
+ trunc_page(addr), VM_PROT_READ, TRUE);
+ } else {
+ pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
+ trunc_page(0), VM_PROT_READ, TRUE);
+ }
+
+ xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
+ csio.ccb_h.ccb_state = DA_CCB_DUMP;
+ scsi_read_write(&csio,
+ /*retries*/1,
+ dadone,
+ MSG_ORDERED_Q_TAG,
+ /*read*/FALSE,
+ /*byte2*/0,
+ /*minimum_cmd_size*/ 6,
+ blknum,
+ blkcnt,
+ /*data_ptr*/CADDR1,
+ /*dxfer_len*/blkcnt * softc->params.secsize,
+ /*sense_len*/SSD_FULL_SIZE,
+ DA_DEFAULT_TIMEOUT * 1000);
+ xpt_polled_action((union ccb *)&csio);
+
+ if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ printf("Aborting dump due to I/O error. "
+ "status == 0x%x, scsi status == 0x%x\n",
+ csio.ccb_h.status, csio.scsi_status);
+ return (EIO);
+ }
+
+ if ((unsigned)addr % (1024 * 1024) == 0) {
+#ifdef HW_WDOG
+ if (wdog_tickler)
+ (*wdog_tickler)();
+#endif /* HW_WDOG */
+ /* Count in MB of data left to write */
+ printf("%ld ", (num * softc->params.secsize)
+ / (1024 * 1024));
+ }
+
+ /* update block count */
+ num -= blkcnt;
+ blknum += blkcnt;
+ (long)addr += blkcnt * softc->params.secsize;
+
+ /* operator aborting dump? */
+ if (cncheckc() != -1)
+ return (EINTR);
+ }
+ return (0);
+}
+
+static int
+dasize(dev_t dev)
+{
+ struct cam_periph *periph;
+ struct da_softc *softc;
+
+ periph = cam_extend_get(daperiphs, dkunit(dev));
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct da_softc *)periph->softc;
+
+ return (dssize(dev, &softc->dk_slices, daopen, daclose));
+}
+
+static void
+dainit(void)
+{
+ cam_status status;
+ struct cam_path *path;
+
+ /*
+ * Create our extend array for storing the devices we attach to.
+ */
+ daperiphs = cam_extend_new();
+ SLIST_INIT(&softc_list);
+ if (daperiphs == NULL) {
+ printf("da: Failed to alloc extend array!\n");
+ return;
+ }
+
+ /*
+ * Install a global async callback. This callback will
+ * receive async callbacks like "new device found".
+ */
+ status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
+
+ if (status == CAM_REQ_CMP) {
+ struct ccb_setasync csa;
+
+ xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_FOUND_DEVICE;
+ csa.callback = daasync;
+ csa.callback_arg = NULL;
+ xpt_action((union ccb *)&csa);
+ status = csa.ccb_h.status;
+ xpt_free_path(path);
+ }
+
+ if (status != CAM_REQ_CMP) {
+ printf("da: Failed to attach master async callback "
+ "due to status 0x%x!\n", status);
+ } else {
+ /* If we were successfull, register our devsw */
+ cdevsw_add_generic(DA_BDEV_MAJOR, DA_CDEV_MAJOR, &da_cdevsw);
+
+ /*
+ * Schedule a periodic event to occasioanly send an
+ * ordered tag to a device.
+ */
+ timeout(dasendorderedtag, NULL,
+ (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
+ }
+}
+
+static void
+dacleanup(struct cam_periph *periph)
+{
+ cam_extend_release(daperiphs, periph->unit_number);
+ xpt_print_path(periph->path);
+ printf("removing device entry\n");
+ free(periph->softc, M_DEVBUF);
+}
+
+static void
+daasync(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg)
+{
+ struct cam_periph *periph;
+
+ periph = (struct cam_periph *)callback_arg;
+ switch (code) {
+ case AC_FOUND_DEVICE:
+ {
+ struct ccb_getdev *cgd;
+ cam_status status;
+
+ cgd = (struct ccb_getdev *)arg;
+
+ if ((cgd->pd_type != T_DIRECT) && (cgd->pd_type != T_OPTICAL))
+ break;
+
+ /*
+ * Allocate a peripheral instance for
+ * this device and start the probe
+ * process.
+ */
+ status = cam_periph_alloc(daregister, dacleanup, dastart,
+ "da", CAM_PERIPH_BIO, cgd->ccb_h.path,
+ daasync, AC_FOUND_DEVICE, cgd);
+
+ if (status != CAM_REQ_CMP
+ && status != CAM_REQ_INPROG)
+ printf("daasync: Unable to attach to new device "
+ "due to status 0x%x\n", status);
+ break;
+ }
+ case AC_LOST_DEVICE:
+ {
+ int s;
+ struct da_softc *softc;
+ struct buf *q_bp;
+ struct ccb_setasync csa;
+
+ softc = (struct da_softc *)periph->softc;
+
+ /*
+ * Insure that no other async callbacks that
+ * might affect this peripheral can come through.
+ */
+ s = splcam();
+
+ /*
+ * De-register any async callbacks.
+ */
+ xpt_setup_ccb(&csa.ccb_h, periph->path,
+ /* priority */ 5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = 0;
+ csa.callback = daasync;
+ csa.callback_arg = periph;
+ xpt_action((union ccb *)&csa);
+
+ softc->flags |= DA_FLAG_PACK_INVALID;
+
+ /*
+ * Return all queued I/O with ENXIO.
+ * XXX Handle any transactions queued to the card
+ * with XPT_ABORT_CCB.
+ */
+ while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){
+ bufq_remove(&softc->buf_queue, q_bp);
+ q_bp->b_resid = q_bp->b_bcount;
+ q_bp->b_error = ENXIO;
+ q_bp->b_flags |= B_ERROR;
+ biodone(q_bp);
+ }
+ devstat_remove_entry(&softc->device_stats);
+
+ SLIST_REMOVE(&softc_list, softc, da_softc, links);
+
+ xpt_print_path(periph->path);
+ printf("lost device\n");
+
+ splx(s);
+
+ cam_periph_invalidate(periph);
+ break;
+ }
+ case AC_SENT_BDR:
+ case AC_BUS_RESET:
+ {
+ struct da_softc *softc;
+ struct ccb_hdr *ccbh;
+ int s;
+
+ softc = (struct da_softc *)periph->softc;
+ s = splsoftcam();
+ /*
+ * Don't fail on the expected unit attention
+ * that will occur.
+ */
+ softc->flags |= DA_FLAG_RETRY_UA;
+ for (ccbh = LIST_FIRST(&softc->pending_ccbs);
+ ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le))
+ ccbh->ccb_state |= DA_CCB_RETRY_UA;
+ splx(s);
+ break;
+ }
+ case AC_TRANSFER_NEG:
+ case AC_SCSI_AEN:
+ case AC_UNSOL_RESEL:
+ default:
+ break;
+ }
+}
+
+static cam_status
+daregister(struct cam_periph *periph, void *arg)
+{
+ int s;
+ struct da_softc *softc;
+ struct ccb_setasync csa;
+ struct ccb_getdev *cgd;
+
+ cgd = (struct ccb_getdev *)arg;
+ if (periph == NULL) {
+ printf("daregister: periph was NULL!!\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ if (cgd == NULL) {
+ printf("daregister: no getdev CCB, can't register device\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
+
+ if (softc == NULL) {
+ printf("daregister: Unable to probe new device. "
+ "Unable to allocate softc\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ bzero(softc, sizeof(*softc));
+ LIST_INIT(&softc->pending_ccbs);
+ softc->state = DA_STATE_PROBE;
+ bufq_init(&softc->buf_queue);
+ if (SID_IS_REMOVABLE(&cgd->inq_data))
+ softc->flags |= DA_FLAG_PACK_REMOVABLE;
+ if ((cgd->inq_data.flags & SID_CmdQue) != 0)
+ softc->flags |= DA_FLAG_TAGGED_QUEUING;
+
+ periph->softc = softc;
+
+ cam_extend_set(daperiphs, periph->unit_number, periph);
+ /*
+ * Block our timeout handler while we
+ * add this softc to the dev list.
+ */
+ s = splsoftclock();
+ SLIST_INSERT_HEAD(&softc_list, softc, links);
+ splx(s);
+
+ /*
+ * The DA driver supports a blocksize, but
+ * we don't know the blocksize until we do
+ * a read capacity. So, set a flag to
+ * indicate that the blocksize is
+ * unavailable right now. We'll clear the
+ * flag as soon as we've done a read capacity.
+ */
+ devstat_add_entry(&softc->device_stats, "da",
+ periph->unit_number, 0,
+ DEVSTAT_BS_UNAVAILABLE,
+ cgd->pd_type | DEVSTAT_TYPE_IF_SCSI);
+
+ /*
+ * Add async callbacks for bus reset and
+ * bus device reset calls. I don't bother
+ * checking if this fails as, in most cases,
+ * the system will function just fine without
+ * them and the only alternative would be to
+ * not attach the device on failure.
+ */
+ xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE;
+ csa.callback = daasync;
+ csa.callback_arg = periph;
+ xpt_action((union ccb *)&csa);
+ /*
+ * Lock this peripheral until we are setup.
+ * This first call can't block
+ */
+ (void)cam_periph_lock(periph, PRIBIO);
+ xpt_schedule(periph, /*priority*/5);
+
+ return(CAM_REQ_CMP);
+}
+
+static void
+dastart(struct cam_periph *periph, union ccb *start_ccb)
+{
+ struct da_softc *softc;
+
+ softc = (struct da_softc *)periph->softc;
+
+
+ switch (softc->state) {
+ case DA_STATE_NORMAL:
+ {
+ /* Pull a buffer from the queue and get going on it */
+ struct buf *bp;
+ int s;
+
+ /*
+ * See if there is a buf with work for us to do..
+ */
+ s = splbio();
+ bp = bufq_first(&softc->buf_queue);
+ if (periph->immediate_priority <= periph->pinfo.priority) {
+ CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
+ ("queuing for immediate ccb\n"));
+ start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
+ SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
+ periph_links.sle);
+ periph->immediate_priority = CAM_PRIORITY_NONE;
+ splx(s);
+ wakeup(&periph->ccb_list);
+ } else if (bp == NULL) {
+ splx(s);
+ xpt_release_ccb(start_ccb);
+ } else {
+ int oldspl;
+ u_int8_t tag_code;
+
+ bufq_remove(&softc->buf_queue, bp);
+
+ devstat_start_transaction(&softc->device_stats);
+
+ if ((bp->b_flags & B_ORDERED) != 0
+ || (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
+ softc->flags &= ~DA_FLAG_NEED_OTAG;
+ softc->ordered_tag_count++;
+ tag_code = MSG_ORDERED_Q_TAG;
+ } else {
+ tag_code = MSG_SIMPLE_Q_TAG;
+ }
+ scsi_read_write(&start_ccb->csio,
+ /*retries*/4,
+ dadone,
+ tag_code,
+ bp->b_flags & B_READ,
+ /*byte2*/0,
+ /*minimum_cmd_size*/ 6,
+ bp->b_pblkno,
+ bp->b_bcount / softc->params.secsize,
+ bp->b_data,
+ bp->b_bcount,
+ /*sense_len*/SSD_FULL_SIZE,
+ DA_DEFAULT_TIMEOUT * 1000);
+ start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
+
+ /*
+ * Block out any asyncronous callbacks
+ * while we touch the pending ccb list.
+ */
+ oldspl = splcam();
+ LIST_INSERT_HEAD(&softc->pending_ccbs,
+ &start_ccb->ccb_h, periph_links.le);
+ splx(oldspl);
+
+ /* We expect a unit attention from this device */
+ if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
+ start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
+ softc->flags &= ~DA_FLAG_RETRY_UA;
+ }
+
+ start_ccb->ccb_h.ccb_bp = bp;
+ bp = bufq_first(&softc->buf_queue);
+ splx(s);
+
+ xpt_action(start_ccb);
+ }
+
+ if (bp != NULL) {
+ /* Have more work to do, so ensure we stay scheduled */
+ xpt_schedule(periph, /* XXX priority */1);
+ }
+ break;
+ }
+ case DA_STATE_PROBE:
+ {
+ struct ccb_scsiio *csio;
+ struct scsi_read_capacity_data *rcap;
+
+ rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
+ M_TEMP,
+ M_NOWAIT);
+ if (rcap == NULL) {
+ printf("dastart: Couldn't malloc read_capacity data\n");
+ /* da_free_periph??? */
+ break;
+ }
+ csio = &start_ccb->csio;
+ scsi_read_capacity(csio,
+ /*retries*/4,
+ dadone,
+ MSG_SIMPLE_Q_TAG,
+ rcap,
+ SSD_FULL_SIZE,
+ /*timeout*/5000);
+ start_ccb->ccb_h.ccb_bp = NULL;
+ start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
+ xpt_action(start_ccb);
+ break;
+ }
+ }
+}
+
+
+static void
+dadone(struct cam_periph *periph, union ccb *done_ccb)
+{
+ struct da_softc *softc;
+ struct ccb_scsiio *csio;
+
+ softc = (struct da_softc *)periph->softc;
+ csio = &done_ccb->csio;
+ switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
+ case DA_CCB_BUFFER_IO:
+ {
+ struct buf *bp;
+ int oldspl;
+
+ bp = (struct buf *)done_ccb->ccb_h.ccb_bp;
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ int error;
+ int s;
+ int sf;
+
+ if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
+ sf = SF_RETRY_UA;
+ else
+ sf = 0;
+
+ if ((error = daerror(done_ccb, 0, sf)) == ERESTART) {
+ /*
+ * A retry was scheuled, so
+ * just return.
+ */
+ return;
+ }
+ if (error != 0) {
+ struct buf *q_bp;
+
+ s = splbio();
+
+ if (error == ENXIO) {
+ /*
+ * Catastrophic error. Mark our pack as
+ * invalid.
+ */
+ /* XXX See if this is really a media
+ * change first.
+ */
+ xpt_print_path(periph->path);
+ printf("Invalidating pack\n");
+ softc->flags |= DA_FLAG_PACK_INVALID;
+ }
+
+ /*
+ * return all queued I/O with EIO, so that
+ * the client can retry these I/Os in the
+ * proper order should it attempt to recover.
+ */
+ while ((q_bp = bufq_first(&softc->buf_queue))
+ != NULL) {
+ bufq_remove(&softc->buf_queue, q_bp);
+ q_bp->b_resid = q_bp->b_bcount;
+ q_bp->b_error = EIO;
+ q_bp->b_flags |= B_ERROR;
+ biodone(q_bp);
+ }
+ splx(s);
+ bp->b_error = error;
+ bp->b_resid = bp->b_bcount;
+ bp->b_flags |= B_ERROR;
+ } else {
+ bp->b_resid = csio->resid;
+ bp->b_error = 0;
+ if (bp->b_resid != 0) {
+ /* Short transfer ??? */
+ bp->b_flags |= B_ERROR;
+ }
+ }
+ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ } else {
+ bp->b_resid = csio->resid;
+ if (csio->resid > 0)
+ bp->b_flags |= B_ERROR;
+ }
+
+ /*
+ * Block out any asyncronous callbacks
+ * while we touch the pending ccb list.
+ */
+ oldspl = splcam();
+ LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
+ splx(oldspl);
+
+ devstat_end_transaction(&softc->device_stats,
+ bp->b_bcount - bp->b_resid,
+ done_ccb->csio.tag_action & 0xf,
+ (bp->b_flags & B_READ) ? DEVSTAT_READ
+ : DEVSTAT_WRITE);
+
+ if (softc->device_stats.busy_count == 0)
+ softc->flags |= DA_FLAG_WENT_IDLE;
+
+ biodone(bp);
+ break;
+ }
+ case DA_CCB_PROBE:
+ {
+ struct scsi_read_capacity_data *rdcap;
+ char announce_buf[80];
+
+ rdcap = (struct scsi_read_capacity_data *)csio->data_ptr;
+
+ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ struct disk_params *dp;
+
+ dasetgeom(periph, rdcap);
+ dp = &softc->params;
+ sprintf(announce_buf,
+ "%ldMB (%d %d byte sectors: %dH %dS/T %dC)",
+ dp->sectors / ((1024L * 1024L) / dp->secsize),
+ dp->sectors, dp->secsize, dp->heads,
+ dp->secs_per_track, dp->cylinders);
+ } else {
+ int error;
+
+ /*
+ * Retry any UNIT ATTENTION type errors. They
+ * are expected at boot.
+ */
+ error = daerror(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART) {
+ /*
+ * A retry was scheuled, so
+ * just return.
+ */
+ return;
+ } else if (error != 0) {
+ struct scsi_sense_data *sense;
+ int asc, ascq;
+ int sense_key, error_code;
+ int have_sense;
+ cam_status status;
+ struct ccb_getdev cgd;
+
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+
+ status = done_ccb->ccb_h.status;
+
+ xpt_setup_ccb(&cgd.ccb_h,
+ done_ccb->ccb_h.path,
+ /* priority */ 1);
+ cgd.ccb_h.func_code = XPT_GDEV_TYPE;
+ xpt_action((union ccb *)&cgd);
+
+ if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
+ || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
+ || ((status & CAM_AUTOSNS_VALID) == 0))
+ have_sense = FALSE;
+ else
+ have_sense = TRUE;
+
+ if (have_sense) {
+ sense = &csio->sense_data;
+ scsi_extract_sense(sense, &error_code,
+ &sense_key,
+ &asc, &ascq);
+ }
+ /*
+ * With removable media devices, we expect
+ * 0x3a (Medium not present) errors, since not
+ * everyone leaves a disk in the drive. If
+ * the error is anything else, though, we
+ * shouldn't attach.
+ */
+ if ((have_sense) && (asc == 0x3a)
+ && (error_code == SSD_CURRENT_ERROR))
+ sprintf(announce_buf,
+ "Attempt to query device "
+ "size failed: %s, %s",
+ scsi_sense_key_text[sense_key],
+ scsi_sense_desc(asc,ascq,
+ &cgd.inq_data));
+ else {
+ /*
+ * If we have sense information, go
+ * ahead and print it out.
+ * Otherwise, just say that we
+ * couldn't attach.
+ */
+ if ((have_sense) && (asc || ascq)
+ && (error_code == SSD_CURRENT_ERROR))
+ sprintf(announce_buf,
+ "fatal error: %s, %s "
+ "-- failed to attach "
+ "to device",
+ scsi_sense_key_text[sense_key],
+ scsi_sense_desc(asc,ascq,
+ &cgd.inq_data));
+ else
+ sprintf(announce_buf,
+ "fatal error, failed"
+ " to attach to device");
+
+ /*
+ * Just print out the error, not
+ * the full probe message, when we
+ * don't attach.
+ */
+ printf("%s%d: %s\n",
+ periph->periph_name,
+ periph->unit_number,
+ announce_buf);
+ scsi_sense_print(&done_ccb->csio);
+
+ /*
+ * Free up resources.
+ */
+ cam_extend_release(daperiphs,
+ periph->unit_number);
+ cam_periph_invalidate(periph);
+ periph = NULL;
+ }
+ }
+ }
+ free(rdcap, M_TEMP);
+ if (periph != NULL) {
+ xpt_announce_periph(periph, announce_buf);
+ softc->state = DA_STATE_NORMAL;
+ cam_periph_unlock(periph);
+ }
+ break;
+ }
+ case DA_CCB_WAITING:
+ {
+ /* Caller will release the CCB */
+ wakeup(&done_ccb->ccb_h.cbfcnp);
+ return;
+ }
+ case DA_CCB_DUMP:
+ /* No-op. We're polling */
+ return;
+ }
+ xpt_release_ccb(done_ccb);
+}
+
+static int
+daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
+{
+ struct da_softc *softc;
+ struct cam_periph *periph;
+
+ periph = xpt_path_periph(ccb->ccb_h.path);
+ softc = (struct da_softc *)periph->softc;
+
+ return(cam_periph_error(ccb, cam_flags, sense_flags,
+ &softc->saved_ccb));
+}
+
+static void
+daprevent(struct cam_periph *periph, int action)
+{
+ struct da_softc *softc;
+ union ccb *ccb;
+ int error;
+
+ softc = (struct da_softc *)periph->softc;
+
+ if (((action == PR_ALLOW)
+ && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
+ || ((action == PR_PREVENT)
+ && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
+ return;
+ }
+
+ ccb = cam_periph_getccb(periph, /*priority*/1);
+
+ scsi_prevent(&ccb->csio,
+ /*retries*/1,
+ /*cbcfp*/dadone,
+ MSG_SIMPLE_Q_TAG,
+ action,
+ SSD_FULL_SIZE,
+ 5000);
+
+ error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
+ /*sense_flags*/0, &softc->device_stats);
+
+ if (error == 0) {
+ if (action == PR_ALLOW)
+ softc->flags &= ~DA_FLAG_PACK_LOCKED;
+ else
+ softc->flags |= DA_FLAG_PACK_LOCKED;
+ }
+
+ xpt_release_ccb(ccb);
+}
+
+static void
+dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap)
+{
+ struct ccb_calc_geometry ccg;
+ struct da_softc *softc;
+ struct disk_params *dp;
+
+ softc = (struct da_softc *)periph->softc;
+
+ dp = &softc->params;
+ dp->secsize = scsi_4btoul(rdcap->length);
+ dp->sectors = scsi_4btoul(rdcap->addr) + 1;
+ /*
+ * Have the controller provide us with a geometry
+ * for this disk. The only time the geometry
+ * matters is when we boot and the controller
+ * is the only one knowledgeable enough to come
+ * up with something that will make this a bootable
+ * device.
+ */
+ xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
+ ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
+ ccg.block_size = dp->secsize;
+ ccg.volume_size = dp->sectors;
+ ccg.heads = 0;
+ ccg.secs_per_track = 0;
+ ccg.cylinders = 0;
+ xpt_action((union ccb*)&ccg);
+ dp->heads = ccg.heads;
+ dp->secs_per_track = ccg.secs_per_track;
+ dp->cylinders = ccg.cylinders;
+}
+
+#endif /* KERNEL */
+
+void
+scsi_read_write(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int readop, u_int8_t byte2,
+ int minimum_cmd_size, u_int32_t lba, u_int32_t block_count,
+ u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ u_int8_t cdb_len;
+ /*
+ * Use the smallest possible command to perform the operation
+ * as some legacy hardware does not support the 10 byte
+ * commands. If any of the lower 5 bits in byte2 is set, we have
+ * to go with a larger command.
+ *
+ */
+ if ((minimum_cmd_size < 10)
+ && ((lba & 0x1fffff) == lba)
+ && ((block_count & 0xff) == block_count)
+ && ((byte2 & 0xe0) == 0)) {
+ /*
+ * We can fit in a 6 byte cdb.
+ */
+ struct scsi_rw_6 *scsi_cmd;
+
+ scsi_cmd = (struct scsi_rw_6 *)&csio->cdb_io.cdb_bytes;
+ scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
+ scsi_ulto3b(lba, scsi_cmd->addr);
+ scsi_cmd->length = block_count & 0xff;
+ scsi_cmd->control = 0;
+ cdb_len = sizeof(*scsi_cmd);
+
+ CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE,
+ ("6byte: %x%x%x:%d:%d\n", scsi_cmd->addr[0],
+ scsi_cmd->addr[1], scsi_cmd->addr[2],
+ scsi_cmd->length, dxfer_len));
+ } else if ((minimum_cmd_size < 12)
+ && ((block_count & 0xffff) == block_count)) {
+ /*
+ * Need a 10 byte cdb.
+ */
+ struct scsi_rw_10 *scsi_cmd;
+
+ scsi_cmd = (struct scsi_rw_10 *)&csio->cdb_io.cdb_bytes;
+ scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
+ scsi_cmd->byte2 = byte2;
+ scsi_ulto4b(lba, scsi_cmd->addr);
+ scsi_cmd->reserved = 0;
+ scsi_ulto2b(block_count, scsi_cmd->length);
+ scsi_cmd->control = 0;
+ cdb_len = sizeof(*scsi_cmd);
+
+ CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE,
+ ("10byte: %x%x%x%x:%x%x: %d\n", scsi_cmd->addr[0],
+ scsi_cmd->addr[1], scsi_cmd->addr[2],
+ scsi_cmd->addr[3], scsi_cmd->length[0],
+ scsi_cmd->length[1], dxfer_len));
+ } else {
+ /*
+ * The block count is too big for a 10 byte CDB, use a 12
+ * byte CDB. READ/WRITE(12) are currently only defined for
+ * optical devices.
+ */
+ struct scsi_rw_12 *scsi_cmd;
+
+ scsi_cmd = (struct scsi_rw_12 *)&csio->cdb_io.cdb_bytes;
+ scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
+ scsi_cmd->byte2 = byte2;
+ scsi_ulto4b(lba, scsi_cmd->addr);
+ scsi_cmd->reserved = 0;
+ scsi_ulto4b(block_count, scsi_cmd->length);
+ scsi_cmd->control = 0;
+ cdb_len = sizeof(*scsi_cmd);
+
+ CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE,
+ ("12byte: %x%x%x%x:%x%x%x%x: %d\n", scsi_cmd->addr[0],
+ scsi_cmd->addr[1], scsi_cmd->addr[2],
+ scsi_cmd->addr[3], scsi_cmd->length[0],
+ scsi_cmd->length[1], scsi_cmd->length[2],
+ scsi_cmd->length[3], dxfer_len));
+ }
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT,
+ tag_action,
+ data_ptr,
+ dxfer_len,
+ sense_len,
+ cdb_len,
+ timeout);
+}
+
+void
+scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int start, int load_eject,
+ int immediate, u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_start_stop_unit *scsi_cmd;
+ int extra_flags = 0;
+
+ scsi_cmd = (struct scsi_start_stop_unit *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = START_STOP_UNIT;
+ if (start != 0) {
+ scsi_cmd->how |= SSS_START;
+ /* it takes a lot of power to start a drive */
+ extra_flags |= CAM_HIGH_POWER;
+ }
+ if (load_eject != 0)
+ scsi_cmd->how |= SSS_LOEJ;
+ if (immediate != 0)
+ scsi_cmd->byte2 |= SSS_IMMED;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_NONE | extra_flags,
+ tag_action,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+}
+
+#ifdef KERNEL
+
+static void
+dasendorderedtag(void *arg)
+{
+ struct da_softc *softc;
+ int s;
+
+ for (softc = SLIST_FIRST(&softc_list);
+ softc != NULL;
+ softc = SLIST_NEXT(softc, links)) {
+ s = splsoftcam();
+ if ((softc->ordered_tag_count == 0)
+ && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
+ softc->flags |= DA_FLAG_NEED_OTAG;
+ }
+ if (softc->device_stats.busy_count > 0)
+ softc->flags &= ~DA_FLAG_WENT_IDLE;
+
+ softc->ordered_tag_count = 0;
+ splx(s);
+ }
+ /* Queue us up again */
+ timeout(dasendorderedtag, NULL,
+ (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
+}
+
+#endif /* KERNEL */
diff --git a/sys/cam/scsi/scsi_da.h b/sys/cam/scsi/scsi_da.h
new file mode 100644
index 0000000..8cb1775
--- /dev/null
+++ b/sys/cam/scsi/scsi_da.h
@@ -0,0 +1,391 @@
+/*
+ * Structures and definitions for SCSI commands to Direct Access Devices
+ */
+
+/*
+ * Some lines of this file come from a file of the name "scsi.h"
+ * distributed by OSF as part of mach2.5,
+ * so the following disclaimer has been kept.
+ *
+ * Copyright 1990 by Open Software Foundation,
+ * Grenoble, FRANCE
+ *
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of OSF or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior
+ * permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Largely written by Julian Elischer (julian@tfs.com)
+ * for TRW Financial Systems.
+ *
+ * TRW Financial Systems, in accordance with their agreement with Carnegie
+ * Mellon University, makes this software available to CMU to distribute
+ * or use in any manner that they see fit as long as this message is kept with
+ * the software. For this reason TFS also grants any other persons or
+ * organisations permission to use or modify this software.
+ *
+ * TFS supplies this software to be publicly redistributed
+ * on the understanding that TFS is not responsible for the correct
+ * functioning of this software in any circumstances.
+ *
+ * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992
+ *
+ * $Id$
+ */
+
+#ifndef _SCSI_SCSI_DA_H
+#define _SCSI_SCSI_DA_H 1
+
+#include <sys/cdefs.h>
+
+struct scsi_rezero_unit
+{
+ u_int8_t opcode;
+#define SRZU_LUN_MASK 0xE0
+ u_int8_t byte2;
+ u_int8_t reserved[3];
+ u_int8_t control;
+};
+
+struct scsi_reassign_blocks
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[3];
+ u_int8_t control;
+};
+
+struct scsi_rw_6
+{
+ u_int8_t opcode;
+ u_int8_t addr[3];
+/* only 5 bits are valid in the MSB address byte */
+#define SRW_TOPADDR 0x1F
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_rw_10
+{
+ u_int8_t opcode;
+#define SRW10_RELADDR 0x01
+#define SRW10_FUA 0x08
+#define SRW10_DPO 0x10
+ u_int8_t byte2;
+ u_int8_t addr[4];
+ u_int8_t reserved;
+ u_int8_t length[2];
+ u_int8_t control;
+};
+
+struct scsi_rw_12
+{
+ u_int8_t opcode;
+#define SRW12_RELADDR 0x01
+#define SRW12_FUA 0x08
+#define SRW12_DPO 0x10
+ u_int8_t byte2;
+ u_int8_t addr[4];
+ u_int8_t reserved;
+ u_int8_t length[4];
+ u_int8_t control;
+};
+
+struct scsi_start_stop_unit
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define SSS_IMMED 0x01
+ u_int8_t reserved[2];
+ u_int8_t how;
+#define SSS_START 0x01
+#define SSS_LOEJ 0x02
+ u_int8_t control;
+};
+
+struct scsi_read_defect_data_10
+{
+ u_int8_t opcode;
+
+ /*
+ * The most significant 3 bits are the LUN, the other 5 are
+ * reserved.
+ */
+#define SRDD10_LUN_MASK 0xE0
+ u_int8_t byte2;
+#define SRDD10_GLIST 0x08
+#define SRDD10_PLIST 0x10
+#define SRDD10_DLIST_FORMAT_MASK 0x07
+#define SRDD10_BLOCK_FORMAT 0x00
+#define SRDD10_BYTES_FROM_INDEX_FORMAT 0x04
+#define SRDD10_PHYSICAL_SECTOR_FORMAT 0x05
+ u_int8_t format;
+
+ u_int8_t reserved[4];
+
+ u_int8_t alloc_length[2];
+
+ u_int8_t control;
+};
+
+struct scsi_read_defect_data_12
+{
+ u_int8_t opcode;
+
+ /*
+ * The most significant 3 bits are the LUN, the other 5 are
+ * reserved.
+ */
+#define SRDD12_LUN_MASK 0xE0
+ u_int8_t byte2;
+
+#define SRDD12_GLIST 0x08
+#define SRDD12_PLIST 0x10
+#define SRDD12_DLIST_FORMAT_MASK 0x07
+#define SRDD12_BLOCK_FORMAT 0x00
+#define SRDD12_BYTES_FROM_INDEX_FORMAT 0x04
+#define SRDD12_PHYSICAL_SECTOR_FORMAT 0x05
+ u_int8_t format;
+
+ u_int8_t reserved[4];
+
+ u_int8_t alloc_length[4];
+
+ u_int8_t control;
+
+};
+
+
+/*
+ * Opcodes
+ */
+#define REZERO_UNIT 0x01
+#define REASSIGN_BLOCKS 0x07
+#define READ_6 0x08
+#define WRITE_6 0x0a
+#define MODE_SELECT 0x15
+#define MODE_SENSE 0x1a
+#define START_STOP_UNIT 0x1b
+#define READ_10 0x28
+#define WRITE_10 0x2a
+#define READ_DEFECT_DATA_10 0x37
+#define READ_12 0xa8
+#define WRITE_12 0xaa
+#define READ_DEFECT_DATA_12 0xb7
+
+
+struct scsi_reassign_blocks_data
+{
+ u_int8_t reserved[2];
+ u_int8_t length[2];
+ struct {
+ u_int8_t dlbaddr[4]; /* defect logical block address */
+ } defect_descriptor[1];
+};
+
+
+/*
+ * This is the list header for the READ DEFECT DATA(10) command above.
+ * It may be a bit wrong to append the 10 at the end of the data structure,
+ * since it's only 4 bytes but it does tie it to the 10 byte command.
+ */
+struct scsi_read_defect_data_hdr_10
+{
+ u_int8_t reserved;
+#define SRDDH10_GLIST 0x08
+#define SRDDH10_PLIST 0x10
+#define SRDDH10_DLIST_FORMAT_MASK 0x07
+#define SRDDH10_BLOCK_FORMAT 0x00
+#define SRDDH10_BYTES_FROM_INDEX_FORMAT 0x04
+#define SRDDH10_PHYSICAL_SECTOR_FORMAT 0x05
+ u_int8_t format;
+ u_int8_t length[2];
+};
+
+struct scsi_defect_desc_block
+{
+ u_int8_t address[4];
+};
+
+struct scsi_defect_desc_bytes_from_index
+{
+ u_int8_t cylinder[3];
+ u_int8_t head;
+ u_int8_t bytes_from_index[4];
+};
+
+struct scsi_defect_desc_phys_sector
+{
+ u_int8_t cylinder[3];
+ u_int8_t head;
+ u_int8_t sector[4];
+};
+
+struct scsi_read_defect_data_hdr_12
+{
+ u_int8_t reserved;
+#define SRDDH12_GLIST 0x08
+#define SRDDH12_PLIST 0x10
+#define SRDDH12_DLIST_FORMAT_MASK 0x07
+#define SRDDH12_BLOCK_FORMAT 0x00
+#define SRDDH12_BYTES_FROM_INDEX_FORMAT 0x04
+#define SRDDH12_PHYSICAL_SECTOR_FORMAT 0x05
+ u_int8_t format;
+ u_int8_t length[4];
+};
+
+union disk_pages /* this is the structure copied from osf */
+{
+ struct format_device_page {
+ u_int8_t pg_code; /* page code (should be 3) */
+#define SMS_FORMAT_DEVICE_PAGE 0x03 /* only 6 bits valid */
+ u_int8_t pg_length; /* page length (should be 0x16) */
+#define SMS_FORMAT_DEVICE_PLEN 0x16
+ u_int8_t trk_z_1; /* tracks per zone (MSB) */
+ u_int8_t trk_z_0; /* tracks per zone (LSB) */
+ u_int8_t alt_sec_1; /* alternate sectors per zone (MSB) */
+ u_int8_t alt_sec_0; /* alternate sectors per zone (LSB) */
+ u_int8_t alt_trk_z_1; /* alternate tracks per zone (MSB) */
+ u_int8_t alt_trk_z_0; /* alternate tracks per zone (LSB) */
+ u_int8_t alt_trk_v_1; /* alternate tracks per volume (MSB) */
+ u_int8_t alt_trk_v_0; /* alternate tracks per volume (LSB) */
+ u_int8_t ph_sec_t_1; /* physical sectors per track (MSB) */
+ u_int8_t ph_sec_t_0; /* physical sectors per track (LSB) */
+ u_int8_t bytes_s_1; /* bytes per sector (MSB) */
+ u_int8_t bytes_s_0; /* bytes per sector (LSB) */
+ u_int8_t interleave_1; /* interleave (MSB) */
+ u_int8_t interleave_0; /* interleave (LSB) */
+ u_int8_t trk_skew_1; /* track skew factor (MSB) */
+ u_int8_t trk_skew_0; /* track skew factor (LSB) */
+ u_int8_t cyl_skew_1; /* cylinder skew (MSB) */
+ u_int8_t cyl_skew_0; /* cylinder skew (LSB) */
+ u_int8_t flags; /* various */
+#define DISK_FMT_SURF 0x10
+#define DISK_FMT_RMB 0x20
+#define DISK_FMT_HSEC 0x40
+#define DISK_FMT_SSEC 0x80
+ u_int8_t reserved21;
+ u_int8_t reserved22;
+ u_int8_t reserved23;
+ } format_device;
+ struct rigid_geometry_page {
+ u_int8_t pg_code; /* page code (should be 4) */
+#define SMS_RIGID_GEOMETRY_PAGE 0x04
+ u_int8_t pg_length; /* page length (should be 0x16) */
+#define SMS_RIGID_GEOMETRY_PLEN 0x16
+ u_int8_t ncyl_2; /* number of cylinders (MSB) */
+ u_int8_t ncyl_1; /* number of cylinders */
+ u_int8_t ncyl_0; /* number of cylinders (LSB) */
+ u_int8_t nheads; /* number of heads */
+ u_int8_t st_cyl_wp_2; /* starting cyl., write precomp (MSB) */
+ u_int8_t st_cyl_wp_1; /* starting cyl., write precomp */
+ u_int8_t st_cyl_wp_0; /* starting cyl., write precomp (LSB) */
+ u_int8_t st_cyl_rwc_2; /* starting cyl., red. write cur (MSB)*/
+ u_int8_t st_cyl_rwc_1; /* starting cyl., red. write cur */
+ u_int8_t st_cyl_rwc_0; /* starting cyl., red. write cur (LSB)*/
+ u_int8_t driv_step_1; /* drive step rate (MSB) */
+ u_int8_t driv_step_0; /* drive step rate (LSB) */
+ u_int8_t land_zone_2; /* landing zone cylinder (MSB) */
+ u_int8_t land_zone_1; /* landing zone cylinder */
+ u_int8_t land_zone_0; /* landing zone cylinder (LSB) */
+ u_int8_t rpl; /* rotational position locking (2 bits) */
+ u_int8_t rot_offset; /* rotational offset */
+ u_int8_t reserved19;
+ u_int8_t medium_rot_rate_1; /* medium rotation rate (RPM) (MSB) */
+ u_int8_t medium_rot_rate_0; /* medium rotation rate (RPM) (LSB) */
+ u_int8_t reserved22;
+ u_int8_t reserved23;
+ } rigid_geometry;
+ struct flexible_disk_page {
+ u_int8_t pg_code; /* page code (should be 5) */
+#define SMS_FLEXIBLE_GEOMETRY_PAGE 0x05
+ u_int8_t pg_length; /* page length (should be 0x1E) */
+#define SMS_FLEXIBLE_GEOMETRY_PLEN 0x0x1E
+ u_int8_t xfr_rate_1; /* transfer rate (MSB) */
+ u_int8_t xfr_rate_0; /* transfer rate (LSB) */
+ u_int8_t nheads; /* number of heads */
+ u_int8_t sec_per_track; /* Sectors per track */
+ u_int8_t bytes_s_1; /* bytes per sector (MSB) */
+ u_int8_t bytes_s_0; /* bytes per sector (LSB) */
+ u_int8_t ncyl_1; /* number of cylinders (MSB) */
+ u_int8_t ncyl_0; /* number of cylinders (LSB) */
+ u_int8_t st_cyl_wp_1; /* starting cyl., write precomp (MSB) */
+ u_int8_t st_cyl_wp_0; /* starting cyl., write precomp (LSB) */
+ u_int8_t st_cyl_rwc_1; /* starting cyl., red. write cur (MSB)*/
+ u_int8_t st_cyl_rwc_0; /* starting cyl., red. write cur (LSB)*/
+ u_int8_t driv_step_1; /* drive step rate (MSB) */
+ u_int8_t driv_step_0; /* drive step rate (LSB) */
+ u_int8_t driv_step_pw; /* drive step pulse width */
+ u_int8_t head_stl_del_1;/* Head settle delay (MSB) */
+ u_int8_t head_stl_del_0;/* Head settle delay (LSB) */
+ u_int8_t motor_on_del; /* Motor on delay */
+ u_int8_t motor_off_del; /* Motor off delay */
+ u_int8_t trdy_ssn_mo; /* XXX ??? */
+ u_int8_t spc; /* XXX ??? */
+ u_int8_t write_comp; /* Write compensation */
+ u_int8_t head_load_del; /* Head load delay */
+ u_int8_t head_uload_del;/* Head un-load delay */
+ u_int8_t pin32_pin2;
+ u_int8_t pin4_pint1;
+ u_int8_t medium_rot_rate_1; /* medium rotation rate (RPM) (MSB) */
+ u_int8_t medium_rot_rate_0; /* medium rotation rate (RPM) (LSB) */
+ u_int8_t reserved30;
+ u_int8_t reserved31;
+ } flexible_disk;
+};
+
+struct scsi_da_rw_recovery_page {
+ u_int8_t page_code;
+#define SMS_RW_ERROR_RECOVERY_PAGE 0x01
+ u_int8_t page_length;
+ u_int8_t byte3;
+#define SMS_RWER_AWRE 0x80
+#define SMS_RWER_ARRE 0x40
+#define SMS_RWER_TB 0x20
+#define SMS_RWER_RC 0x10
+#define SMS_RWER_EER 0x08
+#define SMS_RWER_PER 0x04
+#define SMS_RWER_DTE 0x02
+#define SMS_RWER_DCR 0x01
+ u_int8_t read_retry_count;
+ u_int8_t correction_span;
+ u_int8_t head_offset_count;
+ u_int8_t data_strobe_offset_cnt;
+ u_int8_t reserved;
+ u_int8_t write_retry_count;
+ u_int8_t reserved2;
+ u_int8_t recovery_time_limit[2];
+};
+
+__BEGIN_DECLS
+void scsi_read_write(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int readop, u_int8_t byte2,
+ int minimum_cmd_size, u_int32_t lba,
+ u_int32_t block_count, u_int8_t *data_ptr,
+ u_int32_t dxfer_len, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int start, int load_eject,
+ int immediate, u_int8_t sense_len, u_int32_t timeout);
+__END_DECLS
+
+#endif /* _SCSI_SCSI_DA_H */
diff --git a/sys/cam/scsi/scsi_message.h b/sys/cam/scsi/scsi_message.h
new file mode 100644
index 0000000..2dad159
--- /dev/null
+++ b/sys/cam/scsi/scsi_message.h
@@ -0,0 +1,42 @@
+/* Messages (1 byte) */ /* I/T (M)andatory or (O)ptional */
+#define MSG_CMDCOMPLETE 0x00 /* M/M */
+#define MSG_EXTENDED 0x01 /* O/O */
+#define MSG_SAVEDATAPOINTER 0x02 /* O/O */
+#define MSG_RESTOREPOINTERS 0x03 /* O/O */
+#define MSG_DISCONNECT 0x04 /* O/O */
+#define MSG_INITIATOR_DET_ERR 0x05 /* M/M */
+#define MSG_ABORT 0x06 /* O/M */
+#define MSG_MESSAGE_REJECT 0x07 /* M/M */
+#define MSG_NOOP 0x08 /* M/M */
+#define MSG_PARITY_ERROR 0x09 /* M/M */
+#define MSG_LINK_CMD_COMPLETE 0x0a /* O/O */
+#define MSG_LINK_CMD_COMPLETEF 0x0b /* O/O */
+#define MSG_BUS_DEV_RESET 0x0c /* O/M */
+#define MSG_ABORT_TAG 0x0d /* O/O */
+#define MSG_CLEAR_QUEUE 0x0e /* O/O */
+#define MSG_INIT_RECOVERY 0x0f /* O/O */
+#define MSG_REL_RECOVERY 0x10 /* O/O */
+#define MSG_TERM_IO_PROC 0x11 /* O/O */
+
+/* Messages (2 byte) */
+#define MSG_SIMPLE_Q_TAG 0x20 /* O/O */
+#define MSG_HEAD_OF_Q_TAG 0x21 /* O/O */
+#define MSG_ORDERED_Q_TAG 0x22 /* O/O */
+#define MSG_IGN_WIDE_RESIDUE 0x23 /* O/O */
+
+/* Identify message */ /* M/M */
+#define MSG_IDENTIFYFLAG 0x80
+#define MSG_IDENTIFY_DISCFLAG 0x40
+#define MSG_IDENTIFY(lun, disc) (((disc) ? 0xc0 : MSG_IDENTIFYFLAG) | (lun))
+#define MSG_ISIDENTIFY(m) ((m) & MSG_IDENTIFYFLAG)
+#define MSG_IDENTIFY_LUNMASK 0x01F
+
+/* Extended messages (opcode and length) */
+#define MSG_EXT_SDTR 0x01
+#define MSG_EXT_SDTR_LEN 0x03
+
+#define MSG_EXT_WDTR 0x03
+#define MSG_EXT_WDTR_LEN 0x02
+#define MSG_EXT_WDTR_BUS_8_BIT 0x00
+#define MSG_EXT_WDTR_BUS_16_BIT 0x01
+#define MSG_EXT_WDTR_BUS_32_BIT 0x02
diff --git a/sys/cam/scsi/scsi_pass.c b/sys/cam/scsi/scsi_pass.c
new file mode 100644
index 0000000..f16b11e
--- /dev/null
+++ b/sys/cam/scsi/scsi_pass.c
@@ -0,0 +1,787 @@
+/*
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * Copyright (c) 1997, 1998 Kenneth D. Merry.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/buf.h>
+#include <sys/dkbad.h>
+#include <sys/disklabel.h>
+#include <sys/diskslice.h>
+#include <sys/malloc.h>
+#include <sys/fcntl.h>
+#include <sys/stat.h>
+#include <sys/conf.h>
+#include <sys/buf.h>
+#include <sys/proc.h>
+#include <sys/cdio.h>
+#include <sys/errno.h>
+#include <sys/devicestat.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_extend.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/cam_debug.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/scsi/scsi_pass.h>
+
+typedef enum {
+ PASS_FLAG_OPEN = 0x01,
+ PASS_FLAG_LOCKED = 0x02,
+ PASS_FLAG_INVALID = 0x04
+} pass_flags;
+
+typedef enum {
+ PASS_STATE_NORMAL
+} pass_state;
+
+typedef enum {
+ PASS_CCB_BUFFER_IO,
+ PASS_CCB_WAITING
+} pass_ccb_types;
+
+#define ccb_type ppriv_field0
+#define ccb_bp ppriv_ptr1
+
+struct pass_softc {
+ pass_state state;
+ pass_flags flags;
+ u_int8_t pd_type;
+ struct buf_queue_head buf_queue;
+ union ccb saved_ccb;
+ struct devstat device_stats;
+#ifdef DEVFS
+ void *pass_devfs_token;
+ void *ctl_devfs_token;
+#endif
+};
+
+#ifndef MIN
+#define MIN(x,y) ((x<y) ? x : y)
+#endif
+
+#define PASS_CDEV_MAJOR 31
+
+static d_open_t passopen;
+static d_read_t passread;
+static d_write_t passwrite;
+static d_close_t passclose;
+static d_ioctl_t passioctl;
+static d_strategy_t passstrategy;
+
+static periph_init_t passinit;
+static periph_ctor_t passregister;
+static periph_dtor_t passcleanup;
+static periph_start_t passstart;
+static void passasync(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg);
+static void passdone(struct cam_periph *periph,
+ union ccb *done_ccb);
+static int passerror(union ccb *ccb, u_int32_t cam_flags,
+ u_int32_t sense_flags);
+static int passsendccb(struct cam_periph *periph, union ccb *ccb,
+ union ccb *inccb);
+
+static struct periph_driver passdriver =
+{
+ passinit, "pass",
+ TAILQ_HEAD_INITIALIZER(passdriver.units), /* generation */ 0
+};
+
+DATA_SET(periphdriver_set, passdriver);
+
+static struct cdevsw pass_cdevsw =
+{
+ /*d_open*/ passopen,
+ /*d_close*/ passclose,
+ /*d_read*/ passread,
+ /*d_write*/ passwrite,
+ /*d_ioctl*/ passioctl,
+ /*d_stop*/ nostop,
+ /*d_reset*/ noreset,
+ /*d_devtotty*/ nodevtotty,
+ /*d_poll*/ seltrue,
+ /*d_mmap*/ nommap,
+ /*d_strategy*/ passstrategy,
+ /*d_name*/ "pass",
+ /*d_spare*/ NULL,
+ /*d_maj*/ -1,
+ /*d_dump*/ nodump,
+ /*d_psize*/ nopsize,
+ /*d_flags*/ 0,
+ /*d_maxio*/ 0,
+ /*b_maj*/ -1
+};
+
+static struct extend_array *passperiphs;
+
+static void
+passinit(void)
+{
+ cam_status status;
+ struct cam_path *path;
+
+ /*
+ * Create our extend array for storing the devices we attach to.
+ */
+ passperiphs = cam_extend_new();
+ if (passperiphs == NULL) {
+ printf("passm: Failed to alloc extend array!\n");
+ return;
+ }
+
+ /*
+ * Install a global async callback. This callback will
+ * receive async callbacks like "new device found".
+ */
+ status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
+
+ if (status == CAM_REQ_CMP) {
+ struct ccb_setasync csa;
+
+ xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_FOUND_DEVICE;
+ csa.callback = passasync;
+ csa.callback_arg = NULL;
+ xpt_action((union ccb *)&csa);
+ status = csa.ccb_h.status;
+ xpt_free_path(path);
+ }
+
+ if (status != CAM_REQ_CMP) {
+ printf("pass: Failed to attach master async callback "
+ "due to status 0x%x!\n", status);
+ } else {
+ dev_t dev;
+
+ /* If we were successfull, register our devsw */
+ dev = makedev(PASS_CDEV_MAJOR, 0);
+ cdevsw_add(&dev, &pass_cdevsw, NULL);
+ }
+
+}
+
+static void
+passcleanup(struct cam_periph *periph)
+{
+ cam_extend_release(passperiphs, periph->unit_number);
+
+ if (bootverbose) {
+ xpt_print_path(periph->path);
+ printf("removing device entry\n");
+ }
+ free(periph->softc, M_DEVBUF);
+}
+
+static void
+passasync(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg)
+{
+ struct cam_periph *periph;
+
+ periph = (struct cam_periph *)callback_arg;
+
+ switch (code) {
+ case AC_FOUND_DEVICE:
+ {
+ struct ccb_getdev *cgd;
+ cam_status status;
+
+ cgd = (struct ccb_getdev *)arg;
+
+ /*
+ * Allocate a peripheral instance for
+ * this device and start the probe
+ * process.
+ */
+ status = cam_periph_alloc(passregister, passcleanup, passstart,
+ "pass", CAM_PERIPH_BIO,
+ cgd->ccb_h.path, passasync,
+ AC_FOUND_DEVICE, cgd);
+
+ if (status != CAM_REQ_CMP
+ && status != CAM_REQ_INPROG)
+ printf("passasync: Unable to attach new device "
+ "due to status 0x%x\n", status);
+
+ break;
+ }
+ case AC_LOST_DEVICE:
+ {
+ int s;
+ struct pass_softc *softc;
+ struct buf *q_bp;
+ struct ccb_setasync csa;
+
+ softc = (struct pass_softc *)periph->softc;
+
+ /*
+ * Insure that no other async callbacks that
+ * might affect this peripheral can come through.
+ */
+ s = splcam();
+
+ /*
+ * De-register any async callbacks.
+ */
+ xpt_setup_ccb(&csa.ccb_h, periph->path,
+ /* priority */ 5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = 0;
+ csa.callback = passasync;
+ csa.callback_arg = periph;
+ xpt_action((union ccb *)&csa);
+
+ softc->flags |= PASS_FLAG_INVALID;
+
+ /*
+ * Return all queued I/O with ENXIO.
+ * XXX Handle any transactions queued to the card
+ * with XPT_ABORT_CCB.
+ */
+ while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){
+ bufq_remove(&softc->buf_queue, q_bp);
+ q_bp->b_resid = q_bp->b_bcount;
+ q_bp->b_error = ENXIO;
+ q_bp->b_flags |= B_ERROR;
+ biodone(q_bp);
+ }
+ devstat_remove_entry(&softc->device_stats);
+
+ if (bootverbose) {
+ xpt_print_path(periph->path);
+ printf("lost device\n");
+ }
+
+ splx(s);
+
+ cam_periph_invalidate(periph);
+ break;
+ }
+ case AC_TRANSFER_NEG:
+ case AC_SENT_BDR:
+ case AC_SCSI_AEN:
+ case AC_UNSOL_RESEL:
+ case AC_BUS_RESET:
+ default:
+ break;
+ }
+}
+
+static cam_status
+passregister(struct cam_periph *periph, void *arg)
+{
+ int s;
+ struct pass_softc *softc;
+ struct ccb_setasync csa;
+ struct ccb_getdev *cgd;
+
+ cgd = (struct ccb_getdev *)arg;
+ if (periph == NULL) {
+ printf("passregister: periph was NULL!!\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ if (cgd == NULL) {
+ printf("passregister: no getdev CCB, can't register device\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ softc = (struct pass_softc *)malloc(sizeof(*softc),
+ M_DEVBUF, M_NOWAIT);
+
+ if (softc == NULL) {
+ printf("passregister: Unable to probe new device. "
+ "Unable to allocate softc\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ bzero(softc, sizeof(*softc));
+ softc->state = PASS_STATE_NORMAL;
+ softc->pd_type = cgd->pd_type;
+ bufq_init(&softc->buf_queue);
+
+ periph->softc = softc;
+
+ cam_extend_set(passperiphs, periph->unit_number, periph);
+ /*
+ * We pass in 0 for a blocksize, since we don't
+ * know what the blocksize of this device is, if
+ * it even has a blocksize.
+ */
+ devstat_add_entry(&softc->device_stats, "pass", periph->unit_number,
+ 0, DEVSTAT_NO_BLOCKSIZE | DEVSTAT_NO_ORDERED_TAGS,
+ cgd->pd_type |
+ DEVSTAT_TYPE_IF_SCSI |
+ DEVSTAT_TYPE_PASS);
+ /*
+ * Add an async callback so that we get
+ * notified if this device goes away.
+ */
+ xpt_setup_ccb(&csa.ccb_h, periph->path, /* priority */ 5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_LOST_DEVICE;
+ csa.callback = passasync;
+ csa.callback_arg = periph;
+ xpt_action((union ccb *)&csa);
+
+ if (bootverbose)
+ xpt_announce_periph(periph, NULL);
+
+ return(CAM_REQ_CMP);
+}
+
+static int
+passopen(dev_t dev, int flags, int fmt, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct pass_softc *softc;
+ int unit, error;
+
+ error = 0; /* default to no error */
+
+ /* unit = dkunit(dev); */
+ /* XXX KDM fix this */
+ unit = minor(dev) & 0xff;
+
+ periph = cam_extend_get(passperiphs, unit);
+
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct pass_softc *)periph->softc;
+
+ if (softc->flags & PASS_FLAG_INVALID)
+ return(ENXIO);
+
+ /*
+ * We don't allow nonblocking access.
+ */
+ if ((flags & O_NONBLOCK) != 0) {
+ printf("%s%d: can't do nonblocking accesss\n",
+ periph->periph_name,
+ periph->unit_number);
+ return(ENODEV);
+ }
+
+ if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0)
+ return (error);
+
+ if ((softc->flags & PASS_FLAG_OPEN) == 0) {
+ if (cam_periph_acquire(periph) != CAM_REQ_CMP)
+ return(ENXIO);
+ softc->flags |= PASS_FLAG_OPEN;
+ }
+
+ cam_periph_unlock(periph);
+
+ return (error);
+}
+
+static int
+passclose(dev_t dev, int flag, int fmt, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct pass_softc *softc;
+ int unit, error;
+
+ /* unit = dkunit(dev); */
+ /* XXX KDM fix this */
+ unit = minor(dev) & 0xff;
+
+ periph = cam_extend_get(passperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct pass_softc *)periph->softc;
+
+ if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
+ return (error);
+
+ softc->flags &= ~PASS_FLAG_OPEN;
+
+ cam_periph_unlock(periph);
+ cam_periph_release(periph);
+
+ return (0);
+}
+
+static int
+passread(dev_t dev, struct uio *uio, int ioflag)
+{
+ return(physio(passstrategy, NULL, dev, 1, minphys, uio));
+}
+
+static int
+passwrite(dev_t dev, struct uio *uio, int ioflag)
+{
+ return(physio(passstrategy, NULL, dev, 0, minphys, uio));
+}
+
+/*
+ * Actually translate the requested transfer into one the physical driver
+ * can understand. The transfer is described by a buf and will include
+ * only one physical transfer.
+ */
+static void
+passstrategy(struct buf *bp)
+{
+ struct cam_periph *periph;
+ struct pass_softc *softc;
+ u_int unit;
+ int s;
+
+ /*
+ * The read/write interface for the passthrough driver doesn't
+ * really work right now. So, we just pass back EINVAL to tell the
+ * user to go away.
+ */
+ bp->b_error = EINVAL;
+ goto bad;
+
+ /* unit = dkunit(bp->b_dev); */
+ /* XXX KDM fix this */
+ unit = minor(bp->b_dev) & 0xff;
+
+ periph = cam_extend_get(passperiphs, unit);
+ if (periph == NULL) {
+ bp->b_error = ENXIO;
+ goto bad;
+ }
+ softc = (struct pass_softc *)periph->softc;
+
+ /*
+ * Odd number of bytes or negative offset
+ */
+ /* valid request? */
+ if (bp->b_blkno < 0) {
+ bp->b_error = EINVAL;
+ goto bad;
+ }
+
+ /*
+ * Mask interrupts so that the pack cannot be invalidated until
+ * after we are in the queue. Otherwise, we might not properly
+ * clean up one of the buffers.
+ */
+ s = splbio();
+
+ bufq_insert_tail(&softc->buf_queue, bp);
+
+ splx(s);
+
+ /*
+ * Schedule ourselves for performing the work.
+ */
+ xpt_schedule(periph, /* XXX priority */1);
+
+ return;
+bad:
+ bp->b_flags |= B_ERROR;
+
+ /*
+ * Correctly set the buf to indicate a completed xfer
+ */
+ bp->b_resid = bp->b_bcount;
+ biodone(bp);
+ return;
+}
+
+static void
+passstart(struct cam_periph *periph, union ccb *start_ccb)
+{
+ struct pass_softc *softc;
+ int s;
+
+ softc = (struct pass_softc *)periph->softc;
+
+ switch (softc->state) {
+ case PASS_STATE_NORMAL:
+ {
+ struct buf *bp;
+
+ s = splbio();
+ bp = bufq_first(&softc->buf_queue);
+ if (periph->immediate_priority <= periph->pinfo.priority) {
+ start_ccb->ccb_h.ccb_type = PASS_CCB_WAITING;
+ SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
+ periph_links.sle);
+ periph->immediate_priority = CAM_PRIORITY_NONE;
+ splx(s);
+ wakeup(&periph->ccb_list);
+ } else if (bp == NULL) {
+ splx(s);
+ xpt_release_ccb(start_ccb);
+ } else {
+
+ bufq_remove(&softc->buf_queue, bp);
+
+ devstat_start_transaction(&softc->device_stats);
+
+ /*
+ * XXX JGibbs -
+ * Interpret the contents of the bp as a CCB
+ * and pass it to a routine shared by our ioctl
+ * code and passtart.
+ * For now, just biodone it with EIO so we don't
+ * hang.
+ */
+ bp->b_error = EIO;
+ bp->b_flags |= B_ERROR;
+ bp->b_resid = bp->b_bcount;
+ biodone(bp);
+ bp = bufq_first(&softc->buf_queue);
+ splx(s);
+
+ xpt_action(start_ccb);
+
+ }
+ if (bp != NULL) {
+ /* Have more work to do, so ensure we stay scheduled */
+ xpt_schedule(periph, /* XXX priority */1);
+ }
+ break;
+ }
+ }
+}
+static void
+passdone(struct cam_periph *periph, union ccb *done_ccb)
+{
+ struct pass_softc *softc;
+ struct ccb_scsiio *csio;
+
+ softc = (struct pass_softc *)periph->softc;
+ csio = &done_ccb->csio;
+ switch (csio->ccb_h.ccb_type) {
+ case PASS_CCB_BUFFER_IO:
+ {
+ struct buf *bp;
+ cam_status status;
+ u_int8_t scsi_status;
+ devstat_trans_flags ds_flags;
+
+ status = done_ccb->ccb_h.status;
+ scsi_status = done_ccb->csio.scsi_status;
+ bp = (struct buf *)done_ccb->ccb_h.ccb_bp;
+ /* XXX handle errors */
+ if (!(((status & CAM_STATUS_MASK) == CAM_REQ_CMP)
+ && (scsi_status == SCSI_STATUS_OK))) {
+ int error;
+
+ if ((error = passerror(done_ccb, 0, 0)) == ERESTART) {
+ /*
+ * A retry was scheuled, so
+ * just return.
+ */
+ return;
+ }
+
+ /*
+ * XXX unfreeze the queue after we complete
+ * the abort process
+ */
+ bp->b_error = error;
+ bp->b_flags |= B_ERROR;
+ }
+
+ if ((done_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
+ ds_flags = DEVSTAT_READ;
+ else if ((done_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
+ ds_flags = DEVSTAT_WRITE;
+ else
+ ds_flags = DEVSTAT_NO_DATA;
+
+ devstat_end_transaction(&softc->device_stats, bp->b_bcount,
+ done_ccb->csio.tag_action & 0xf,
+ ds_flags);
+
+ biodone(bp);
+ break;
+ }
+ case PASS_CCB_WAITING:
+ {
+ /* Caller will release the CCB */
+ wakeup(&done_ccb->ccb_h.cbfcnp);
+ return;
+ }
+ }
+ xpt_release_ccb(done_ccb);
+}
+
+static int
+passioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct pass_softc *softc;
+ u_int8_t unit;
+ int error;
+
+
+ /* unit = dkunit(dev); */
+ /* XXX KDM fix this */
+ unit = minor(dev) & 0xff;
+
+ periph = cam_extend_get(passperiphs, unit);
+
+ if (periph == NULL)
+ return(ENXIO);
+
+ softc = (struct pass_softc *)periph->softc;
+
+ error = 0;
+
+ switch (cmd) {
+
+ case CAMIOCOMMAND:
+ {
+ union ccb *inccb;
+ union ccb *ccb;
+
+ inccb = (union ccb *)addr;
+ ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority);
+
+ error = passsendccb(periph, ccb, inccb);
+
+ xpt_release_ccb(ccb);
+
+ break;
+ }
+ default:
+ error = cam_periph_ioctl(periph, cmd, addr, passerror);
+ break;
+ }
+
+ return(error);
+}
+
+/*
+ * Generally, "ccb" should be the CCB supplied by the kernel. "inccb"
+ * should be the CCB that is copied in from the user.
+ */
+static int
+passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
+{
+ struct buf *bp[2];
+ struct pass_softc *softc;
+ struct cam_periph_map_info mapinfo;
+ int error, need_unmap;
+
+ softc = (struct pass_softc *)periph->softc;
+
+ need_unmap = 0;
+
+ /*
+ * There are some fields in the CCB header that need to be
+ * preserved, the rest we get from the user.
+ */
+ xpt_merge_ccb(ccb, inccb);
+
+ /*
+ * There's no way for the user to have a completion
+ * function, so we put our own completion function in here.
+ */
+ ccb->ccb_h.cbfcnp = passdone;
+
+ /*
+ * We only attempt to map the user memory into kernel space
+ * if they haven't passed in a physical memory pointer,
+ * and if there is actually an I/O operation to perform.
+ * Right now cam_periph_mapmem() only supports SCSI and device
+ * match CCBs. For the SCSI CCBs, we only pass the CCB in if
+ * there's actually data to map. cam_periph_mapmem() will do the
+ * right thing, even if there isn't data to map, but since CCBs
+ * without data are a reasonably common occurance (e.g. test unit
+ * ready), it will save a few cycles if we check for it here.
+ */
+ if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
+ && (((ccb->ccb_h.func_code == XPT_SCSI_IO)
+ && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
+ || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
+
+ bzero(&mapinfo, sizeof(mapinfo));
+
+ error = cam_periph_mapmem(ccb, &mapinfo);
+
+ /*
+ * cam_periph_mapmem returned an error, we can't continue.
+ * Return the error to the user.
+ */
+ if (error)
+ return(error);
+
+ /*
+ * We successfully mapped the memory in, so we need to
+ * unmap it when the transaction is done.
+ */
+ need_unmap = 1;
+ }
+
+ /*
+ * If the user wants us to perform any error recovery, then honor
+ * that request. Otherwise, it's up to the user to perform any
+ * error recovery.
+ */
+ error = cam_periph_runccb(ccb,
+ (ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ?
+ passerror : NULL,
+ /* cam_flags */ 0,
+ /* sense_flags */SF_RETRY_UA,
+ &softc->device_stats);
+
+ if (need_unmap != 0)
+ cam_periph_unmapmem(ccb, &mapinfo);
+
+ ccb->ccb_h.cbfcnp = NULL;
+ ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
+ bcopy(ccb, inccb, sizeof(union ccb));
+
+ return(error);
+}
+
+static int
+passerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
+{
+ struct cam_periph *periph;
+ struct pass_softc *softc;
+
+ periph = xpt_path_periph(ccb->ccb_h.path);
+ softc = (struct pass_softc *)periph->softc;
+
+ return(cam_periph_error(ccb, cam_flags, sense_flags,
+ &softc->saved_ccb));
+}
diff --git a/sys/cam/scsi/scsi_pass.h b/sys/cam/scsi/scsi_pass.h
new file mode 100644
index 0000000..501598b
--- /dev/null
+++ b/sys/cam/scsi/scsi_pass.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 1997 Kenneth D. Merry.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#ifndef _SCSI_PASS_H
+#define _SCSI_PASS_H 1
+
+#include <sys/ioccom.h>
+
+#include <cam/cam_ccb.h>
+
+#define CAMIOCOMMAND _IOWR('Q', 2, union ccb)
+#define CAMGETPASSTHRU _IOWR('Q', 3, union ccb)
+
+#endif
diff --git a/sys/cam/scsi/scsi_pt.c b/sys/cam/scsi/scsi_pt.c
new file mode 100644
index 0000000..f07b210
--- /dev/null
+++ b/sys/cam/scsi/scsi_pt.c
@@ -0,0 +1,723 @@
+/*
+ * Implementation of SCSI Processor Target Peripheral driver for CAM.
+ *
+ * Copyright (c) 1998 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/buf.h>
+#include <sys/devicestat.h>
+#include <sys/malloc.h>
+#include <sys/conf.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_extend.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/cam_debug.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+#include <cam/scsi/scsi_pt.h>
+
+typedef enum {
+ PT_STATE_PROBE,
+ PT_STATE_NORMAL
+} pt_state;
+
+typedef enum {
+ PT_FLAG_NONE = 0x00,
+ PT_FLAG_OPEN = 0x01,
+ PT_FLAG_DEVICE_INVALID = 0x02,
+ PT_FLAG_RETRY_UA = 0x04
+} pt_flags;
+
+typedef enum {
+ PT_CCB_BUFFER_IO = 0x01,
+ PT_CCB_WAITING = 0x02,
+ PT_CCB_RETRY_UA = 0x04,
+ PT_CCB_BUFFER_IO_UA = PT_CCB_BUFFER_IO|PT_CCB_RETRY_UA
+} pt_ccb_state;
+
+/* Offsets into our private area for storing information */
+#define ccb_state ppriv_field0
+#define ccb_bp ppriv_ptr1
+
+struct pt_softc {
+ struct buf_queue_head buf_queue;
+ struct devstat device_stats;
+ LIST_HEAD(, ccb_hdr) pending_ccbs;
+ pt_state state;
+ pt_flags flags;
+ union ccb saved_ccb;
+};
+
+static d_open_t ptopen;
+static d_read_t ptread;
+static d_write_t ptwrite;
+static d_close_t ptclose;
+static d_strategy_t ptstrategy;
+static periph_init_t ptinit;
+static void ptasync(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg);
+static periph_ctor_t ptctor;
+static periph_dtor_t ptdtor;
+static periph_start_t ptstart;
+static void ptdone(struct cam_periph *periph,
+ union ccb *done_ccb);
+static int pterror(union ccb *ccb, u_int32_t cam_flags,
+ u_int32_t sense_flags);
+
+void scsi_send_receive(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int tag_action, int readop, u_int byte2,
+ u_int32_t xfer_len, u_int8_t *data_ptr,
+ u_int8_t sense_len, u_int32_t timeout);
+
+static struct periph_driver ptdriver =
+{
+ ptinit, "pt",
+ TAILQ_HEAD_INITIALIZER(ptdriver.units), /* generation */ 0
+};
+
+DATA_SET(periphdriver_set, ptdriver);
+
+#define PT_CDEV_MAJOR 61
+
+static struct cdevsw pt_cdevsw =
+{
+ /*d_open*/ ptopen,
+ /*d_close*/ ptclose,
+ /*d_read*/ ptread,
+ /*d_write*/ ptwrite,
+ /*d_ioctl*/ noioctl,
+ /*d_stop*/ nostop,
+ /*d_reset*/ noreset,
+ /*d_devtotty*/ nodevtotty,
+ /*d_poll*/ seltrue,
+ /*d_mmap*/ nommap,
+ /*d_strategy*/ ptstrategy,
+ /*d_name*/ "pt",
+ /*d_spare*/ NULL,
+ /*d_maj*/ -1,
+ /*d_dump*/ nodump,
+ /*d_psize*/ nopsize,
+ /*d_flags*/ 0,
+ /*d_maxio*/ 0,
+ /*b_maj*/ -1
+};
+
+static struct extend_array *ptperiphs;
+
+static int
+ptopen(dev_t dev, int flags, int fmt, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct pt_softc *softc;
+ int unit;
+ int error;
+
+ unit = minor(dev);
+ periph = cam_extend_get(ptperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct pt_softc *)periph->softc;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
+ ("ptopen: dev=0x%x (unit %d)\n", dev, unit));
+
+ if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0)
+ return (error); /* error code from tsleep */
+
+ if ((softc->flags & PT_FLAG_OPEN) == 0) {
+ if (cam_periph_acquire(periph) != CAM_REQ_CMP)
+ error = ENXIO;
+ else
+ softc->flags |= PT_FLAG_OPEN;
+ } else
+ error = EBUSY;
+
+ cam_periph_unlock(periph);
+ return (error);
+}
+
+static int
+ptclose(dev_t dev, int flag, int fmt, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct pt_softc *softc;
+ union ccb *ccb;
+ int unit;
+ int error;
+
+ unit = minor(dev);
+ periph = cam_extend_get(ptperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct pt_softc *)periph->softc;
+
+ if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
+ return (error); /* error code from tsleep */
+
+ softc->flags &= ~PT_FLAG_OPEN;
+ cam_periph_unlock(periph);
+ cam_periph_release(periph);
+ return (0);
+}
+
+static int
+ptread(dev_t dev, struct uio *uio, int ioflag)
+{
+ return(physio(ptstrategy, NULL, dev, 1, minphys, uio));
+}
+
+static int
+ptwrite(dev_t dev, struct uio *uio, int ioflag)
+{
+ return(physio(ptstrategy, NULL, dev, 0, minphys, uio));
+}
+
+/*
+ * Actually translate the requested transfer into one the physical driver
+ * can understand. The transfer is described by a buf and will include
+ * only one physical transfer.
+ */
+static void
+ptstrategy(struct buf *bp)
+{
+ struct cam_periph *periph;
+ struct pt_softc *softc;
+ u_int unit;
+ int s;
+
+ unit = minor(bp->b_dev);
+ periph = cam_extend_get(ptperiphs, unit);
+ if (periph == NULL) {
+ bp->b_error = ENXIO;
+ goto bad;
+ }
+ softc = (struct pt_softc *)periph->softc;
+
+ /*
+ * Mask interrupts so that the pack cannot be invalidated until
+ * after we are in the queue. Otherwise, we might not properly
+ * clean up one of the buffers.
+ */
+ s = splbio();
+
+ /*
+ * If the device has been made invalid, error out
+ */
+ if ((softc->flags & PT_FLAG_DEVICE_INVALID)) {
+ splx(s);
+ bp->b_error = ENXIO;
+ goto bad;
+ }
+
+ /*
+ * Place it in the queue of disk activities for this disk
+ */
+ bufq_insert_tail(&softc->buf_queue, bp);
+
+ splx(s);
+
+ /*
+ * Schedule ourselves for performing the work.
+ */
+ xpt_schedule(periph, /* XXX priority */1);
+
+ return;
+bad:
+ bp->b_flags |= B_ERROR;
+
+ /*
+ * Correctly set the buf to indicate a completed xfer
+ */
+ bp->b_resid = bp->b_bcount;
+ biodone(bp);
+}
+
+static void
+ptinit(void)
+{
+ cam_status status;
+ struct cam_path *path;
+
+ /*
+ * Create our extend array for storing the devices we attach to.
+ */
+ ptperiphs = cam_extend_new();
+ if (ptperiphs == NULL) {
+ printf("pt: Failed to alloc extend array!\n");
+ return;
+ }
+
+ /*
+ * Install a global async callback. This callback will
+ * receive async callbacks like "new device found".
+ */
+ status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
+
+ if (status == CAM_REQ_CMP) {
+ struct ccb_setasync csa;
+
+ xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_FOUND_DEVICE;
+ csa.callback = ptasync;
+ csa.callback_arg = NULL;
+ xpt_action((union ccb *)&csa);
+ status = csa.ccb_h.status;
+ xpt_free_path(path);
+ }
+
+ if (status != CAM_REQ_CMP) {
+ printf("pt: Failed to attach master async callback "
+ "due to status 0x%x!\n", status);
+ } else {
+ /* If we were successfull, register our devsw */
+ dev_t dev;
+
+ dev = makedev(PT_CDEV_MAJOR, 0);
+ cdevsw_add(&dev,&pt_cdevsw, NULL);
+ }
+}
+
+static cam_status
+ptctor(struct cam_periph *periph, void *arg)
+{
+ int s;
+ struct pt_softc *softc;
+ struct ccb_setasync csa;
+ struct ccb_getdev *cgd;
+
+ cgd = (struct ccb_getdev *)arg;
+ if (periph == NULL) {
+ printf("ptregister: periph was NULL!!\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ if (cgd == NULL) {
+ printf("ptregister: no getdev CCB, can't register device\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ softc = (struct pt_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
+
+ if (softc == NULL) {
+ printf("daregister: Unable to probe new device. "
+ "Unable to allocate softc\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ bzero(softc, sizeof(*softc));
+ LIST_INIT(&softc->pending_ccbs);
+ softc->state = PT_STATE_NORMAL;
+ bufq_init(&softc->buf_queue);
+
+ periph->softc = softc;
+
+ cam_extend_set(ptperiphs, periph->unit_number, periph);
+
+ /*
+ * The DA driver supports a blocksize, but
+ * we don't know the blocksize until we do
+ * a read capacity. So, set a flag to
+ * indicate that the blocksize is
+ * unavailable right now. We'll clear the
+ * flag as soon as we've done a read capacity.
+ */
+ devstat_add_entry(&softc->device_stats, "pt",
+ periph->unit_number, 0,
+ DEVSTAT_NO_BLOCKSIZE,
+ cgd->pd_type | DEVSTAT_TYPE_IF_SCSI);
+
+ /*
+ * Add async callbacks for bus reset and
+ * bus device reset calls. I don't bother
+ * checking if this fails as, in most cases,
+ * the system will function just fine without
+ * them and the only alternative would be to
+ * not attach the device on failure.
+ */
+ xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE;
+ csa.callback = ptasync;
+ csa.callback_arg = periph;
+ xpt_action((union ccb *)&csa);
+
+ /* Tell the user we've attached to the device */
+ xpt_announce_periph(periph, NULL);
+
+ return(CAM_REQ_CMP);
+}
+
+static void
+ptdtor(struct cam_periph *periph)
+{
+ cam_extend_release(ptperiphs, periph->unit_number);
+ xpt_print_path(periph->path);
+ printf("removing device entry\n");
+ free(periph->softc, M_DEVBUF);
+}
+
+static void
+ptasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
+{
+ struct cam_periph *periph;
+
+ periph = (struct cam_periph *)callback_arg;
+ switch (code) {
+ case AC_FOUND_DEVICE:
+ {
+ struct ccb_getdev *cgd;
+ cam_status status;
+
+ cgd = (struct ccb_getdev *)arg;
+
+ if (cgd->pd_type != T_PROCESSOR)
+ break;
+
+ /*
+ * Allocate a peripheral instance for
+ * this device and start the probe
+ * process.
+ */
+ status = cam_periph_alloc(ptctor, ptdtor, ptstart,
+ "pt", CAM_PERIPH_BIO, cgd->ccb_h.path,
+ ptasync, AC_FOUND_DEVICE, cgd);
+
+ if (status != CAM_REQ_CMP
+ && status != CAM_REQ_INPROG)
+ printf("ptasync: Unable to attach to new device "
+ "due to status 0x%x\n", status);
+ break;
+ }
+ case AC_LOST_DEVICE:
+ {
+ int s;
+ struct pt_softc *softc;
+ struct buf *q_bp;
+ struct ccb_setasync csa;
+
+ softc = (struct pt_softc *)periph->softc;
+
+ /*
+ * Insure that no other async callbacks that
+ * might affect this peripheral can come through.
+ */
+ s = splcam();
+
+ /*
+ * De-register any async callbacks.
+ */
+ xpt_setup_ccb(&csa.ccb_h, periph->path,
+ /* priority */ 5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = 0;
+ csa.callback = ptasync;
+ csa.callback_arg = periph;
+ xpt_action((union ccb *)&csa);
+
+ softc->flags |= PT_FLAG_DEVICE_INVALID;
+
+ /*
+ * Return all queued I/O with ENXIO.
+ * XXX Handle any transactions queued to the card
+ * with XPT_ABORT_CCB.
+ */
+ while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){
+ bufq_remove(&softc->buf_queue, q_bp);
+ q_bp->b_resid = q_bp->b_bcount;
+ q_bp->b_error = ENXIO;
+ q_bp->b_flags |= B_ERROR;
+ biodone(q_bp);
+ }
+ devstat_remove_entry(&softc->device_stats);
+
+ xpt_print_path(periph->path);
+ printf("lost device\n");
+
+ splx(s);
+
+ cam_periph_invalidate(periph);
+ break;
+ }
+ case AC_SENT_BDR:
+ case AC_BUS_RESET:
+ {
+ struct pt_softc *softc;
+ struct ccb_hdr *ccbh;
+ int s;
+
+ softc = (struct pt_softc *)periph->softc;
+ s = splsoftcam();
+ /*
+ * Don't fail on the expected unit attention
+ * that will occur.
+ */
+ softc->flags |= PT_FLAG_RETRY_UA;
+ for (ccbh = LIST_FIRST(&softc->pending_ccbs);
+ ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le))
+ ccbh->ccb_state |= PT_CCB_RETRY_UA;
+ splx(s);
+ break;
+ }
+ case AC_TRANSFER_NEG:
+ case AC_SCSI_AEN:
+ case AC_UNSOL_RESEL:
+ default:
+ break;
+ }
+}
+
+static void
+ptstart(struct cam_periph *periph, union ccb *start_ccb)
+{
+ struct pt_softc *softc;
+ struct buf *bp;
+ int s;
+
+ softc = (struct pt_softc *)periph->softc;
+
+ /*
+ * See if there is a buf with work for us to do..
+ */
+ s = splbio();
+ bp = bufq_first(&softc->buf_queue);
+ if (periph->immediate_priority <= periph->pinfo.priority) {
+ CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
+ ("queuing for immediate ccb\n"));
+ start_ccb->ccb_h.ccb_state = PT_CCB_WAITING;
+ SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
+ periph_links.sle);
+ periph->immediate_priority = CAM_PRIORITY_NONE;
+ splx(s);
+ wakeup(&periph->ccb_list);
+ } else if (bp == NULL) {
+ splx(s);
+ xpt_release_ccb(start_ccb);
+ } else {
+ int oldspl;
+
+ bufq_remove(&softc->buf_queue, bp);
+
+ devstat_start_transaction(&softc->device_stats);
+
+ scsi_send_receive(&start_ccb->csio,
+ /*retries*/4,
+ ptdone,
+ MSG_SIMPLE_Q_TAG,
+ bp->b_flags & B_READ,
+ /*byte2*/0,
+ bp->b_bcount,
+ bp->b_data,
+ /*sense_len*/SSD_FULL_SIZE,
+ /*timeout*/10000);
+
+ start_ccb->ccb_h.ccb_state = PT_CCB_BUFFER_IO;
+
+ /*
+ * Block out any asyncronous callbacks
+ * while we touch the pending ccb list.
+ */
+ oldspl = splcam();
+ LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h,
+ periph_links.le);
+ splx(oldspl);
+
+ start_ccb->ccb_h.ccb_bp = bp;
+ bp = bufq_first(&softc->buf_queue);
+ splx(s);
+
+ xpt_action(start_ccb);
+
+ if (bp != NULL) {
+ /* Have more work to do, so ensure we stay scheduled */
+ xpt_schedule(periph, /* XXX priority */1);
+ }
+ }
+}
+
+static void
+ptdone(struct cam_periph *periph, union ccb *done_ccb)
+{
+ struct pt_softc *softc;
+ struct ccb_scsiio *csio;
+
+ softc = (struct pt_softc *)periph->softc;
+ csio = &done_ccb->csio;
+ switch (csio->ccb_h.ccb_state) {
+ case PT_CCB_BUFFER_IO:
+ case PT_CCB_BUFFER_IO_UA:
+ {
+ struct buf *bp;
+ int oldspl;
+
+ bp = (struct buf *)done_ccb->ccb_h.ccb_bp;
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ int error;
+ int s;
+ int sf;
+
+ if ((csio->ccb_h.ccb_state & PT_CCB_RETRY_UA) != 0)
+ sf = SF_RETRY_UA;
+ else
+ sf = 0;
+
+ if ((error = pterror(done_ccb, 0, sf)) == ERESTART) {
+ /*
+ * A retry was scheuled, so
+ * just return.
+ */
+ return;
+ }
+ if (error != 0) {
+ struct buf *q_bp;
+
+ s = splbio();
+
+ if (error == ENXIO) {
+ /*
+ * Catastrophic error. Mark our device
+ * as invalid.
+ */
+ xpt_print_path(periph->path);
+ printf("Invalidating device\n");
+ softc->flags |= PT_FLAG_DEVICE_INVALID;
+ }
+
+ /*
+ * return all queued I/O with EIO, so that
+ * the client can retry these I/Os in the
+ * proper order should it attempt to recover.
+ */
+ while ((q_bp = bufq_first(&softc->buf_queue))
+ != NULL) {
+ bufq_remove(&softc->buf_queue, q_bp);
+ q_bp->b_resid = q_bp->b_bcount;
+ q_bp->b_error = EIO;
+ q_bp->b_flags |= B_ERROR;
+ biodone(q_bp);
+ }
+ splx(s);
+ bp->b_error = error;
+ bp->b_resid = bp->b_bcount;
+ bp->b_flags |= B_ERROR;
+ } else {
+ bp->b_resid = csio->resid;
+ bp->b_error = 0;
+ if (bp->b_resid != 0) {
+ /* Short transfer ??? */
+ bp->b_flags |= B_ERROR;
+ }
+ }
+ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ } else {
+ bp->b_resid = csio->resid;
+ if (bp->b_resid != 0)
+ bp->b_flags |= B_ERROR;
+ }
+
+ /*
+ * Block out any asyncronous callbacks
+ * while we touch the pending ccb list.
+ */
+ oldspl = splcam();
+ LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
+ splx(oldspl);
+
+ devstat_end_transaction(&softc->device_stats,
+ bp->b_bcount - bp->b_resid,
+ done_ccb->csio.tag_action & 0xf,
+ (bp->b_flags & B_READ) ? DEVSTAT_READ
+ : DEVSTAT_WRITE);
+
+ biodone(bp);
+ break;
+ }
+ case PT_CCB_WAITING:
+ /* Caller will release the CCB */
+ wakeup(&done_ccb->ccb_h.cbfcnp);
+ return;
+ }
+ xpt_release_ccb(done_ccb);
+}
+
+static int
+pterror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
+{
+ struct pt_softc *softc;
+ struct cam_periph *periph;
+
+ periph = xpt_path_periph(ccb->ccb_h.path);
+ softc = (struct pt_softc *)periph->softc;
+
+ return(cam_periph_error(ccb, cam_flags, sense_flags,
+ &softc->saved_ccb));
+}
+
+void
+scsi_send_receive(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int tag_action, int readop, u_int byte2,
+ u_int32_t xfer_len, u_int8_t *data_ptr, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ struct scsi_send_receive *scsi_cmd;
+
+ scsi_cmd = (struct scsi_send_receive *)&csio->cdb_io.cdb_bytes;
+ scsi_cmd->opcode = readop ? RECEIVE : SEND;
+ scsi_cmd->byte2 = byte2;
+ scsi_ulto3b(xfer_len, scsi_cmd->xfer_len);
+ scsi_cmd->control = 0;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT,
+ tag_action,
+ data_ptr,
+ xfer_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
diff --git a/sys/cam/scsi/scsi_pt.h b/sys/cam/scsi/scsi_pt.h
new file mode 100644
index 0000000..d965ee2
--- /dev/null
+++ b/sys/cam/scsi/scsi_pt.h
@@ -0,0 +1,48 @@
+/*
+ * Structure and function declartaions for Processor type devices.
+ *
+ * Copyright (c) 1998 Justin T. Gibbs
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#ifndef _SCSI_SCSI_PT_H
+#define _SCSI_SCSI_PT_H 1
+
+struct scsi_send_receive
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t xfer_len[3];
+ u_int8_t control;
+};
+
+/*
+ * Opcodes
+ */
+#define RECEIVE 0x08
+#define SEND 0x0A
+
+#endif /* _SCSI_SCSI_PT_H */
diff --git a/sys/cam/scsi/scsi_sa.c b/sys/cam/scsi/scsi_sa.c
new file mode 100644
index 0000000..b91b594
--- /dev/null
+++ b/sys/cam/scsi/scsi_sa.c
@@ -0,0 +1,2337 @@
+/*
+ * Implementation of SCSI Sequential Access Peripheral driver for CAM.
+ *
+ * Copyright (c) 1997 Justin T. Gibbs
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#ifdef KERNEL
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#endif
+#include <sys/types.h>
+#include <sys/buf.h>
+#include <sys/malloc.h>
+#include <sys/mtio.h>
+#include <sys/conf.h>
+#include <sys/buf.h>
+#include <sys/devicestat.h>
+#include <machine/limits.h>
+
+#ifndef KERNEL
+#include <stdio.h>
+#include <string.h>
+#endif
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_extend.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/cam_debug.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+#include <cam/scsi/scsi_sa.h>
+
+#ifdef KERNEL
+
+#define SAUNIT(DEV) ((minor(DEV)&0xF0) >> 4) /* 4 bit unit. */
+#define SASETUNIT(DEV, U) makedev(major(DEV), ((U) << 4))
+
+typedef enum {
+ SA_STATE_NORMAL
+} sa_state;
+
+typedef enum {
+ SA_CCB_BUFFER_IO,
+ SA_CCB_WAITING
+} sa_ccb_types;
+
+#define ccb_type ppriv_field0
+#define ccb_bp ppriv_ptr1
+
+typedef enum {
+ SA_FLAG_OPEN = 0x0001,
+ SA_FLAG_FIXED = 0x0002,
+ SA_FLAG_TAPE_LOCKED = 0x0004,
+ SA_FLAG_TAPE_MOUNTED = 0x0008,
+ SA_FLAG_TAPE_WP = 0x0010,
+ SA_FLAG_TAPE_WRITTEN = 0x0020,
+ SA_FLAG_2FM_AT_EOD = 0x0040,
+ SA_FLAG_EOM_PENDING = 0x0080,
+ SA_FLAG_EIO_PENDING = 0x0100,
+ SA_FLAG_EOF_PENDING = 0x0200,
+ SA_FLAG_ERR_PENDING = (SA_FLAG_EOM_PENDING|SA_FLAG_EIO_PENDING|
+ SA_FLAG_EOF_PENDING),
+ SA_FLAG_INVALID = 0x0400,
+ SA_FLAG_COMP_ENABLED = 0x0800,
+ SA_FLAG_COMP_UNSUPP = 0x1000
+} sa_flags;
+
+typedef enum {
+ SA_MODE_REWIND = 0x00,
+ SA_MODE_NOREWIND = 0x01,
+ SA_MODE_OFFLINE = 0x02
+} sa_mode;
+
+typedef enum {
+ SA_PARAM_NONE = 0x00,
+ SA_PARAM_BLOCKSIZE = 0x01,
+ SA_PARAM_DENSITY = 0x02,
+ SA_PARAM_COMPRESSION = 0x04,
+ SA_PARAM_BUFF_MODE = 0x08,
+ SA_PARAM_NUMBLOCKS = 0x10,
+ SA_PARAM_WP = 0x20,
+ SA_PARAM_SPEED = 0x40,
+ SA_PARAM_ALL = 0x7f
+} sa_params;
+
+typedef enum {
+ SA_QUIRK_NONE = 0x00,
+ SA_QUIRK_NOCOMP = 0x01
+} sa_quirks;
+
+struct sa_softc {
+ sa_state state;
+ sa_flags flags;
+ sa_quirks quirks;
+ struct buf_queue_head buf_queue;
+ struct devstat device_stats;
+ int blk_gran;
+ int blk_mask;
+ int blk_shift;
+ u_int32_t max_blk;
+ u_int32_t min_blk;
+ u_int8_t media_density;
+ u_int32_t media_blksize;
+ u_int32_t media_numblks;
+ u_int32_t comp_algorithm;
+ u_int32_t saved_comp_algorithm;
+ u_int8_t speed;
+ int buffer_mode;
+ int filemarks;
+ union ccb saved_ccb;
+};
+
+struct sa_quirk_entry {
+ struct scsi_inquiry_pattern inq_pat;
+ sa_quirks quirks;
+};
+
+static struct sa_quirk_entry sa_quirk_table[] =
+{
+ {
+ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE",
+ "Python 25601*", "*"}, /*quirks*/SA_QUIRK_NOCOMP
+ }
+};
+
+static d_open_t saopen;
+static d_read_t saread;
+static d_write_t sawrite;
+static d_close_t saclose;
+static d_strategy_t sastrategy;
+static d_ioctl_t saioctl;
+static periph_init_t sainit;
+static periph_ctor_t saregister;
+static periph_dtor_t sacleanup;
+static periph_start_t sastart;
+static void saasync(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg);
+static void sadone(struct cam_periph *periph,
+ union ccb *start_ccb);
+static int saerror(union ccb *ccb, u_int32_t cam_flags,
+ u_int32_t sense_flags);
+static int sacheckeod(struct cam_periph *periph);
+static int sagetparams(struct cam_periph *periph,
+ sa_params params_to_get,
+ u_int32_t *blocksize, u_int8_t *density,
+ u_int32_t *numblocks, int *buff_mode,
+ u_int8_t *write_protect, u_int8_t *speed,
+ int *comp_supported, int *comp_enabled,
+ u_int32_t *comp_algorithm,
+ struct scsi_data_compression_page *comp_page);
+static int sasetparams(struct cam_periph *periph,
+ sa_params params_to_set,
+ u_int32_t blocksize, u_int8_t density,
+ u_int32_t comp_algorithm);
+static void saprevent(struct cam_periph *periph, int action);
+static int sarewind(struct cam_periph *periph);
+static int saspace(struct cam_periph *periph, int count,
+ scsi_space_code code);
+static int samount(struct cam_periph *periph);
+static int saretension(struct cam_periph *periph);
+static int sareservereleaseunit(struct cam_periph *periph,
+ int reserve);
+static int saloadunload(struct cam_periph *periph, int load);
+static int saerase(struct cam_periph *periph, int longerase);
+static int sawritefilemarks(struct cam_periph *periph,
+ int nmarks, int setmarks);
+
+static struct periph_driver sadriver =
+{
+ sainit, "sa",
+ TAILQ_HEAD_INITIALIZER(sadriver.units), /* generation */ 0
+};
+
+DATA_SET(periphdriver_set, sadriver);
+
+#define SAUNIT(DEV) ((minor(DEV)&0xF0) >> 4) /* 4 bit unit. */
+#define SASETUNIT(DEV, U) makedev(major(DEV), ((U) << 4))
+
+#define SAMODE(z) ((minor(z) & 0x03))
+#define SADENSITY(z) (((minor(z) >> 2) & 0x03))
+
+/* For 2.2-stable support */
+#ifndef D_TAPE
+#define D_TAPE 0
+#endif
+
+#define CTLMODE 3
+#define SA_CDEV_MAJOR 14
+#define SA_BDEV_MAJOR 5
+
+static struct cdevsw sa_cdevsw =
+{
+ /*d_open*/ saopen,
+ /*d_close*/ saclose,
+ /*d_read*/ saread,
+ /*d_write*/ sawrite,
+ /*d_ioctl*/ saioctl,
+ /*d_stop*/ nostop,
+ /*d_reset*/ noreset,
+ /*d_devtotty*/ nodevtotty,
+ /*d_poll*/ seltrue,
+ /*d_mmap*/ nommap,
+ /*d_strategy*/ sastrategy,
+ /*d_name*/ "sa",
+ /*d_spare*/ NULL,
+ /*d_maj*/ -1,
+ /*d_dump*/ nodump,
+ /*d_psize*/ nopsize,
+ /*d_flags*/ D_TAPE,
+ /*d_maxio*/ 0,
+ /*b_maj*/ -1
+};
+
+static struct extend_array *saperiphs;
+
+static int
+saopen(dev_t dev, int flags, int fmt, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct sa_softc *softc;
+ int unit;
+ int mode;
+ int density;
+ int error;
+
+ unit = SAUNIT(dev);
+ mode = SAMODE(dev);
+ density = SADENSITY(dev);
+
+ periph = cam_extend_get(saperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct sa_softc *)periph->softc;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
+ ("saaopen: dev=0x%x (unit %d , mode %d, density %d)\n", dev,
+ unit, mode, density));
+
+ if (softc->flags & SA_FLAG_INVALID)
+ return(ENXIO);
+
+ if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
+ return (error); /* error code from tsleep */
+ }
+
+ if ((softc->flags & SA_FLAG_OPEN) == 0) {
+ if (cam_periph_acquire(periph) != CAM_REQ_CMP)
+ return(ENXIO);
+
+ if ((error = sareservereleaseunit(periph, TRUE)) != 0) {
+ cam_periph_unlock(periph);
+ cam_periph_release(periph);
+ return(error);
+ }
+ }
+
+ if (error == 0) {
+ if ((softc->flags & SA_FLAG_OPEN) != 0) {
+ error = EBUSY;
+ }
+
+ if (error == 0) {
+ error = samount(periph);
+ }
+ /* Perform other checking... */
+ }
+
+ if (error == 0) {
+ saprevent(periph, PR_PREVENT);
+ softc->flags |= SA_FLAG_OPEN;
+ }
+
+ cam_periph_unlock(periph);
+ return (error);
+}
+
+static int
+saclose(dev_t dev, int flag, int fmt, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct sa_softc *softc;
+ int unit;
+ int mode;
+ int error;
+
+ unit = SAUNIT(dev);
+ mode = SAMODE(dev);
+ periph = cam_extend_get(saperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct sa_softc *)periph->softc;
+
+ if ((error = cam_periph_lock(periph, PRIBIO)) != 0) {
+ return (error); /* error code from tsleep */
+ }
+
+ sacheckeod(periph);
+
+ saprevent(periph, PR_ALLOW);
+
+ switch (mode) {
+ case SA_MODE_REWIND:
+ sarewind(periph);
+ break;
+ case SA_MODE_OFFLINE:
+ sarewind(periph);
+ saloadunload(periph, /*load*/FALSE);
+ break;
+ case SA_MODE_NOREWIND:
+ default:
+ break;
+ }
+
+ softc->flags &= ~SA_FLAG_OPEN;
+
+ /* release the device */
+ sareservereleaseunit(periph, FALSE);
+
+ cam_periph_unlock(periph);
+ cam_periph_release(periph);
+
+ return (0);
+}
+
+static int
+saread(dev_t dev, struct uio *uio, int ioflag)
+{
+ return(physio(sastrategy, NULL, dev, 1, minphys, uio));
+}
+
+static int
+sawrite(dev_t dev, struct uio *uio, int ioflag)
+{
+ return(physio(sastrategy, NULL, dev, 0, minphys, uio));
+}
+
+/*
+ * Actually translate the requested transfer into one the physical driver
+ * can understand. The transfer is described by a buf and will include
+ * only one physical transfer.
+ */
+static void
+sastrategy(struct buf *bp)
+{
+ struct cam_periph *periph;
+ struct sa_softc *softc;
+ u_int unit;
+ int s;
+
+ unit = SAUNIT(bp->b_dev);
+ periph = cam_extend_get(saperiphs, unit);
+ if (periph == NULL) {
+ bp->b_error = ENXIO;
+ goto bad;
+ }
+ softc = (struct sa_softc *)periph->softc;
+
+ /*
+ * If it's a null transfer, return immediatly
+ */
+ if (bp->b_bcount == 0)
+ goto done;
+
+ /* valid request? */
+ if (softc->flags & SA_FLAG_FIXED) {
+ /*
+ * Fixed block device. The byte count must
+ * be a multiple of our block size.
+ */
+ if (((softc->blk_mask != ~0)
+ && ((bp->b_bcount & softc->blk_mask) != 0))
+ || ((softc->blk_mask == ~0)
+ && ((bp->b_bcount % softc->min_blk) != 0))) {
+ xpt_print_path(periph->path);
+ printf("Invalid request. Fixed block device "
+ "requests must be a multiple "
+ "of %d bytes\n", softc->min_blk);
+ bp->b_error = EINVAL;
+ goto bad;
+ }
+ } else if ((bp->b_bcount > softc->max_blk)
+ || (bp->b_bcount < softc->min_blk)
+ || (bp->b_bcount & softc->blk_mask) != 0) {
+
+ xpt_print_path(periph->path);
+ printf("Invalid request. Variable block device "
+ "requests must be ");
+ if (softc->blk_mask != 0) {
+ printf("a multiple of %d ",
+ (0x1 << softc->blk_gran));
+ }
+ printf("between %d and %d bytes\n",
+ softc->min_blk, softc->max_blk);
+ bp->b_error = EINVAL;
+ goto bad;
+ }
+
+ /*
+ * Mask interrupts so that the pack cannot be invalidated until
+ * after we are in the queue. Otherwise, we might not properly
+ * clean up one of the buffers.
+ */
+ s = splbio();
+
+ /*
+ * Place it in the queue of disk activities for this disk
+ */
+ bufq_insert_tail(&softc->buf_queue, bp);
+
+ splx(s);
+
+ /*
+ * Schedule ourselves for performing the work.
+ */
+ xpt_schedule(periph, /* XXX priority */1);
+
+ return;
+bad:
+ bp->b_flags |= B_ERROR;
+done:
+
+ /*
+ * Correctly set the buf to indicate a completed xfer
+ */
+ bp->b_resid = bp->b_bcount;
+ biodone(bp);
+}
+
+static int
+saioctl(dev_t dev, u_long cmd, caddr_t arg, int flag, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct sa_softc *softc;
+ int unit;
+ int mode;
+ int density;
+ int error;
+
+ unit = SAUNIT(dev);
+ mode = SAMODE(dev);
+ density = SADENSITY(dev);
+
+ periph = cam_extend_get(saperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+
+ softc = (struct sa_softc *)periph->softc;
+
+ /*
+ * Find the device that the user is talking about
+ */
+ switch (cmd) {
+ case MTIOCGET:
+ {
+ struct mtget *g = (struct mtget *)arg;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
+ ("saioctl: MTIOGET\n"));
+
+ bzero(g, sizeof(struct mtget));
+ g->mt_type = 0x7; /* Ultrix compat *//*? */
+ g->mt_density = softc->media_density;
+ g->mt_blksiz = softc->media_blksize;
+ if (softc->flags & SA_FLAG_COMP_UNSUPP) {
+ g->mt_comp = MT_COMP_UNSUPP;
+ g->mt_comp0 = MT_COMP_UNSUPP;
+ g->mt_comp1 = MT_COMP_UNSUPP;
+ g->mt_comp2 = MT_COMP_UNSUPP;
+ g->mt_comp3 = MT_COMP_UNSUPP;
+ } else if ((softc->flags & SA_FLAG_COMP_ENABLED) == 0) {
+ g->mt_comp = MT_COMP_DISABLED;
+ g->mt_comp0 = MT_COMP_DISABLED;
+ g->mt_comp1 = MT_COMP_DISABLED;
+ g->mt_comp2 = MT_COMP_DISABLED;
+ g->mt_comp3 = MT_COMP_DISABLED;
+ } else {
+ g->mt_comp = softc->comp_algorithm;
+ g->mt_comp0 = softc->comp_algorithm;
+ g->mt_comp1 = softc->comp_algorithm;
+ g->mt_comp2 = softc->comp_algorithm;
+ g->mt_comp3 = softc->comp_algorithm;
+ }
+ g->mt_density0 = softc->media_density;
+ g->mt_density1 = softc->media_density;
+ g->mt_density2 = softc->media_density;
+ g->mt_density3 = softc->media_density;
+ g->mt_blksiz0 = softc->media_blksize;
+ g->mt_blksiz1 = softc->media_blksize;
+ g->mt_blksiz2 = softc->media_blksize;
+ g->mt_blksiz3 = softc->media_blksize;
+ error = 0;
+ break;
+ }
+ case MTIOCTOP:
+ {
+ struct mtop *mt;
+ int count;
+
+ mt = (struct mtop *)arg;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
+ ("saioctl: op=0x%x count=0x%x\n",
+ mt->mt_op, mt->mt_count));
+
+ count = mt->mt_count;
+ switch (mt->mt_op) {
+ case MTWEOF: /* write an end-of-file record */
+ error = sawritefilemarks(periph, count,
+ /*setmarks*/FALSE);
+ break;
+ case MTBSR: /* backward space record */
+ case MTFSR: /* forward space record */
+ case MTBSF: /* backward space file */
+ case MTFSF: /* forward space file */
+ case MTEOD: /* space to end of recorded medium */
+ {
+ int nmarks;
+ scsi_space_code spaceop;
+
+ nmarks = softc->filemarks;
+ error = sacheckeod(periph);
+ nmarks -= softc->filemarks;
+
+ if ((mt->mt_op == MTBSR) || (mt->mt_op == MTBSF))
+ count = -count;
+
+ if ((mt->mt_op == MTBSF) || (mt->mt_op == MTFSF))
+ spaceop = SS_FILEMARKS;
+ else if ((mt->mt_op == MTBSR) || (mt->mt_op == MTFSR))
+ spaceop = SS_BLOCKS;
+ else {
+ spaceop = SS_EOD;
+ count = 0;
+ nmarks = 0;
+ }
+
+ nmarks = softc->filemarks;
+ error = sacheckeod(periph);
+ nmarks -= softc->filemarks;
+ if (error == 0)
+ error = saspace(periph, count - nmarks,
+ spaceop);
+ break;
+ }
+ case MTREW: /* rewind */
+ error = sarewind(periph);
+ break;
+ case MTERASE: /* erase */
+ error = saerase(periph, count);
+ break;
+ case MTRETENS: /* re-tension tape */
+ error = saretension(periph);
+ break;
+ case MTOFFL: /* rewind and put the drive offline */
+ /*
+ * Be sure to allow media removal before
+ * attempting the eject.
+ */
+ saprevent(periph, PR_ALLOW);
+ error = sarewind(periph);
+
+ if (error == 0)
+ error = saloadunload(periph, /*load*/FALSE);
+ else
+ break;
+
+ /* XXX KDM */
+ softc->flags &= ~SA_FLAG_TAPE_LOCKED;
+ softc->flags &= ~SA_FLAG_TAPE_MOUNTED;
+ break;
+ case MTNOP: /* no operation, sets status only */
+ case MTCACHE: /* enable controller cache */
+ case MTNOCACHE: /* disable controller cache */
+ error = 0;
+ break;
+ case MTSETBSIZ: /* Set block size for device */
+
+ error = sasetparams(periph, SA_PARAM_BLOCKSIZE, count,
+ 0, 0);
+ break;
+ case MTSETDNSTY: /* Set density for device and mode */
+ if (count > UCHAR_MAX) {
+ error = EINVAL;
+ break;
+ } else {
+ error = sasetparams(periph, SA_PARAM_DENSITY,
+ 0, count, 0);
+ }
+ break;
+ case MTCOMP: /* enable compression */
+ /*
+ * Some devices don't support compression, and
+ * don't like it if you ask them for the
+ * compression page.
+ */
+ if ((softc->quirks & SA_QUIRK_NOCOMP)
+ || (softc->flags & SA_FLAG_COMP_UNSUPP)) {
+ error = ENODEV;
+ break;
+ }
+ error = sasetparams(periph, SA_PARAM_COMPRESSION,
+ 0, 0, count);
+ break;
+ default:
+ error = EINVAL;
+ }
+ break;
+ }
+ case MTIOCIEOT:
+ case MTIOCEEOT:
+ error = 0;
+ break;
+ default:
+ error = cam_periph_ioctl(periph, cmd, arg, saerror);
+ break;
+ }
+ return (error);
+}
+
+static void
+sainit(void)
+{
+ cam_status status;
+ struct cam_path *path;
+
+ /*
+ * Create our extend array for storing the devices we attach to.
+ */
+ saperiphs = cam_extend_new();
+ if (saperiphs == NULL) {
+ printf("sa: Failed to alloc extend array!\n");
+ return;
+ }
+
+ /*
+ * Install a global async callback.
+ */
+ status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
+
+ if (status == CAM_REQ_CMP) {
+ /* Register the async callbacks of interrest */
+ struct ccb_setasync csa; /*
+ * This is an immediate CCB,
+ * so using the stack is OK
+ */
+ xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_FOUND_DEVICE;
+ csa.callback = saasync;
+ csa.callback_arg = NULL;
+ xpt_action((union ccb *)&csa);
+ status = csa.ccb_h.status;
+ xpt_free_path(path);
+ }
+
+ if (status != CAM_REQ_CMP) {
+ printf("sa: Failed to attach master async callback "
+ "due to status 0x%x!\n", status);
+ } else {
+ /* If we were successfull, register our devsw */
+ cdevsw_add_generic(SA_BDEV_MAJOR, SA_CDEV_MAJOR, &sa_cdevsw);
+ }
+}
+
+static void
+sacleanup(struct cam_periph *periph)
+{
+ cam_extend_release(saperiphs, periph->unit_number);
+ xpt_print_path(periph->path);
+ printf("removing device entry\n");
+ free(periph->softc, M_DEVBUF);
+}
+
+static void
+saasync(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg)
+{
+ struct cam_periph *periph;
+
+ periph = (struct cam_periph *)callback_arg;
+ switch (code) {
+ case AC_FOUND_DEVICE:
+ {
+ struct ccb_getdev *cgd;
+ cam_status status;
+
+ cgd = (struct ccb_getdev *)arg;
+
+ if (cgd->pd_type != T_SEQUENTIAL)
+ break;
+
+ /*
+ * Allocate a peripheral instance for
+ * this device and start the probe
+ * process.
+ */
+ status = cam_periph_alloc(saregister, sacleanup, sastart,
+ "sa", CAM_PERIPH_BIO, cgd->ccb_h.path,
+ saasync, AC_FOUND_DEVICE, cgd);
+
+ if (status != CAM_REQ_CMP
+ && status != CAM_REQ_INPROG)
+ printf("saasync: Unable to probe new device "
+ "due to status 0x%x\n", status);
+ break;
+ }
+ case AC_LOST_DEVICE:
+ {
+ int s;
+ struct sa_softc *softc;
+ struct buf *q_bp;
+ struct ccb_setasync csa;
+
+ softc = (struct sa_softc *)periph->softc;
+
+ /*
+ * Insure that no other async callbacks that
+ * might affect this peripheral can come through.
+ */
+ s = splcam();
+
+ /*
+ * De-register any async callbacks.
+ */
+ xpt_setup_ccb(&csa.ccb_h, periph->path,
+ /* priority */ 5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = 0;
+ csa.callback = saasync;
+ csa.callback_arg = periph;
+ xpt_action((union ccb *)&csa);
+
+ softc->flags |= SA_FLAG_INVALID;
+
+ /*
+ * Return all queued I/O with ENXIO.
+ * XXX Handle any transactions queued to the card
+ * with XPT_ABORT_CCB.
+ */
+ while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){
+ bufq_remove(&softc->buf_queue, q_bp);
+ q_bp->b_resid = q_bp->b_bcount;
+ q_bp->b_error = ENXIO;
+ q_bp->b_flags |= B_ERROR;
+ biodone(q_bp);
+ }
+ devstat_remove_entry(&softc->device_stats);
+
+ xpt_print_path(periph->path);
+ printf("lost device\n");
+
+ splx(s);
+
+ cam_periph_invalidate(periph);
+ }
+ case AC_TRANSFER_NEG:
+ case AC_SENT_BDR:
+ case AC_SCSI_AEN:
+ case AC_UNSOL_RESEL:
+ case AC_BUS_RESET:
+ default:
+ break;
+ }
+}
+
+static cam_status
+saregister(struct cam_periph *periph, void *arg)
+{
+ int s;
+ struct sa_softc *softc;
+ struct ccb_setasync csa;
+ struct ccb_getdev *cgd;
+ caddr_t match;
+
+ cgd = (struct ccb_getdev *)arg;
+ if (periph == NULL) {
+ printf("saregister: periph was NULL!!\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ if (cgd == NULL) {
+ printf("saregister: no getdev CCB, can't register device\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ softc = (struct sa_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
+
+ if (softc == NULL) {
+ printf("saregister: Unable to probe new device. "
+ "Unable to allocate softc\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ bzero(softc, sizeof(*softc));
+ softc->state = SA_STATE_NORMAL;
+ bufq_init(&softc->buf_queue);
+ periph->softc = softc;
+ cam_extend_set(saperiphs, periph->unit_number, periph);
+
+ /*
+ * See if this device has any quirks.
+ */
+ match = cam_quirkmatch((caddr_t)&cgd->inq_data,
+ (caddr_t)sa_quirk_table,
+ sizeof(sa_quirk_table)/sizeof(*sa_quirk_table),
+ sizeof(*sa_quirk_table), scsi_inquiry_match);
+
+ if (match != NULL)
+ softc->quirks = ((struct sa_quirk_entry *)match)->quirks;
+ else
+ softc->quirks = SA_QUIRK_NONE;
+
+ /*
+ * The SA driver supports a blocksize, but we don't know the
+ * blocksize until we sense the media. So, set a flag to
+ * indicate that the blocksize is unavailable right now.
+ * We'll clear the flag as soon as we've done a read capacity.
+ */
+ devstat_add_entry(&softc->device_stats, "sa",
+ periph->unit_number, 0,
+ DEVSTAT_BS_UNAVAILABLE,
+ cgd->pd_type | DEVSTAT_TYPE_IF_SCSI);
+
+ /*
+ * Add an async callback so that we get
+ * notified if this device goes away.
+ */
+ xpt_setup_ccb(&csa.ccb_h, periph->path, /* priority */ 5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_LOST_DEVICE;
+ csa.callback = saasync;
+ csa.callback_arg = periph;
+ xpt_action((union ccb *)&csa);
+
+ xpt_announce_periph(periph, NULL);
+
+ return(CAM_REQ_CMP);
+}
+
+static void
+sastart(struct cam_periph *periph, union ccb *start_ccb)
+{
+ struct sa_softc *softc;
+
+ softc = (struct sa_softc *)periph->softc;
+
+
+ switch (softc->state) {
+ case SA_STATE_NORMAL:
+ {
+ /* Pull a buffer from the queue and get going on it */
+ struct buf *bp;
+ int s;
+
+ /*
+ * See if there is a buf with work for us to do..
+ */
+ s = splbio();
+ bp = bufq_first(&softc->buf_queue);
+ if (periph->immediate_priority <= periph->pinfo.priority) {
+ CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
+ ("queuing for immediate ccb\n"));
+ start_ccb->ccb_h.ccb_type = SA_CCB_WAITING;
+ SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
+ periph_links.sle);
+ periph->immediate_priority = CAM_PRIORITY_NONE;
+ splx(s);
+ wakeup(&periph->ccb_list);
+ } else if (bp == NULL) {
+ splx(s);
+ xpt_release_ccb(start_ccb);
+ } else if ((softc->flags & SA_FLAG_ERR_PENDING) != 0) {
+
+ bufq_remove(&softc->buf_queue, bp);
+ bp->b_resid = bp->b_bcount;
+ bp->b_flags |= B_ERROR;
+ if ((softc->flags & SA_FLAG_EOM_PENDING) != 0) {
+ if ((bp->b_flags & B_READ) == 0)
+ bp->b_error = ENOSPC;
+ }
+ if ((softc->flags & SA_FLAG_EIO_PENDING) != 0) {
+ bp->b_error = EIO;
+ }
+ softc->flags &= ~SA_FLAG_ERR_PENDING;
+ bp = bufq_first(&softc->buf_queue);
+ splx(s);
+ biodone(bp);
+ } else {
+ u_int32_t length;
+
+ bufq_remove(&softc->buf_queue, bp);
+
+ if ((softc->flags & SA_FLAG_FIXED) != 0) {
+ if (softc->blk_shift != 0) {
+ length =
+ bp->b_bcount >> softc->blk_shift;
+ } else {
+ length =
+ bp->b_bcount / softc->min_blk;
+ }
+ } else {
+ length = bp->b_bcount;
+ }
+
+ devstat_start_transaction(&softc->device_stats);
+
+ /*
+ * XXX - Perhaps we should...
+ * suppress illegal length indication if we are
+ * running in variable block mode so that we don't
+ * have to request sense every time our requested
+ * block size is larger than the written block.
+ * The residual information from the ccb allows
+ * us to identify this situation anyway. The only
+ * problem with this is that we will not get
+ * information about blocks that are larger than
+ * our read buffer unless we set the block size
+ * in the mode page to something other than 0.
+ */
+ scsi_sa_read_write(&start_ccb->csio,
+ /*retries*/4,
+ sadone,
+ MSG_SIMPLE_Q_TAG,
+ bp->b_flags & B_READ,
+ /*SILI*/FALSE,
+ softc->flags & SA_FLAG_FIXED,
+ length,
+ bp->b_data,
+ bp->b_bcount,
+ SSD_FULL_SIZE,
+ 120 * 60 * 1000); /* 2min */
+ start_ccb->ccb_h.ccb_type = SA_CCB_BUFFER_IO;
+ start_ccb->ccb_h.ccb_bp = bp;
+ bp = bufq_first(&softc->buf_queue);
+ splx(s);
+
+ xpt_action(start_ccb);
+ }
+
+ if (bp != NULL) {
+ /* Have more work to do, so ensure we stay scheduled */
+ xpt_schedule(periph, /* XXX priority */1);
+ }
+ break;
+ }
+ }
+}
+
+
+static void
+sadone(struct cam_periph *periph, union ccb *done_ccb)
+{
+ struct sa_softc *softc;
+ struct ccb_scsiio *csio;
+
+ softc = (struct sa_softc *)periph->softc;
+ csio = &done_ccb->csio;
+ switch (csio->ccb_h.ccb_type) {
+ case SA_CCB_BUFFER_IO:
+ {
+ struct buf *bp;
+ int error;
+
+ bp = (struct buf *)done_ccb->ccb_h.ccb_bp;
+ error = 0;
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+
+ if ((error = saerror(done_ccb, 0, 0)) == ERESTART) {
+ /*
+ * A retry was scheuled, so
+ * just return.
+ */
+ return;
+ }
+ }
+
+ if (error == EIO) {
+ int s;
+ struct buf *q_bp;
+
+ /*
+ * Catastrophic error. Mark our pack as invalid,
+ * return all queued I/O with EIO, and unfreeze
+ * our queue so that future transactions that
+ * attempt to fix this problem can get to the
+ * device.
+ *
+ */
+
+ s = splbio();
+ softc->flags &= ~SA_FLAG_TAPE_MOUNTED;
+
+ while ((q_bp = bufq_first(&softc->buf_queue)) != NULL) {
+ bufq_remove(&softc->buf_queue, q_bp);
+ q_bp->b_resid = q_bp->b_bcount;
+ q_bp->b_error = EIO;
+ q_bp->b_flags |= B_ERROR;
+ biodone(q_bp);
+ }
+ splx(s);
+ }
+ if (error != 0) {
+ bp->b_resid = bp->b_bcount;
+ bp->b_error = error;
+ bp->b_flags |= B_ERROR;
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ } else {
+ bp->b_resid = csio->resid;
+ bp->b_error = 0;
+ if (csio->resid != 0) {
+ bp->b_flags |= B_ERROR;
+ }
+ if ((bp->b_flags & B_READ) == 0) {
+ softc->flags |= SA_FLAG_TAPE_WRITTEN;
+ softc->filemarks = 0;
+ }
+ }
+
+ devstat_end_transaction(&softc->device_stats,
+ bp->b_bcount - bp->b_resid,
+ done_ccb->csio.tag_action & 0xf,
+ (bp->b_flags & B_READ) ? DEVSTAT_READ
+ : DEVSTAT_WRITE);
+ biodone(bp);
+ break;
+ }
+ case SA_CCB_WAITING:
+ {
+ /* Caller will release the CCB */
+ wakeup(&done_ccb->ccb_h.cbfcnp);
+ return;
+ }
+ }
+ xpt_release_ccb(done_ccb);
+}
+
+static int
+samount(struct cam_periph *periph)
+{
+ struct sa_softc *softc;
+ union ccb *ccb;
+ struct ccb_scsiio *csio;
+ int error;
+
+ softc = (struct sa_softc *)periph->softc;
+ ccb = cam_periph_getccb(periph, /* priority */1);
+ csio = &ccb->csio;
+ error = 0;
+
+ /*
+ * Determine if something has happend since the last
+ * open/mount that would invalidate a mount. This
+ * will also eat any pending UAs.
+ */
+ scsi_test_unit_ready(csio,
+ /*retries*/1,
+ sadone,
+ MSG_SIMPLE_Q_TAG,
+ SSD_FULL_SIZE,
+ /*timeout*/5000);
+
+ cam_periph_runccb(ccb, /*error handler*/NULL, /*cam_flags*/0,
+ /*sense_flags*/0, &softc->device_stats);
+
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ cam_release_devq(ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ softc->flags &= ~SA_FLAG_TAPE_MOUNTED;
+ }
+
+ if ((softc->flags & SA_FLAG_TAPE_MOUNTED) == 0) {
+ struct scsi_read_block_limits_data *rblim;
+ int buff_mode, comp_enabled, comp_supported;
+ u_int8_t write_protect;
+
+ /*
+ * Clear out old state.
+ */
+ softc->flags &= ~(SA_FLAG_TAPE_WP|SA_FLAG_TAPE_WRITTEN|
+ SA_FLAG_ERR_PENDING|SA_FLAG_COMP_ENABLED|
+ SA_FLAG_COMP_UNSUPP);
+ softc->filemarks = 0;
+
+ /*
+ * First off, determine block limits.
+ */
+ rblim = (struct scsi_read_block_limits_data *)
+ malloc(sizeof(*rblim), M_TEMP, M_WAITOK);
+
+ scsi_read_block_limits(csio,
+ /*retries*/1,
+ sadone,
+ MSG_SIMPLE_Q_TAG,
+ rblim,
+ SSD_FULL_SIZE,
+ /*timeout*/5000);
+
+ error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA,
+ &softc->device_stats);
+
+ xpt_release_ccb(ccb);
+
+ if (error != 0)
+ goto exit;
+
+ softc->blk_gran = RBL_GRAN(rblim);
+ softc->max_blk = scsi_3btoul(rblim->maximum);
+ softc->min_blk = scsi_2btoul(rblim->minimum);
+ if (softc->max_blk == softc->min_blk) {
+ softc->flags |= SA_FLAG_FIXED;
+ if (powerof2(softc->min_blk)) {
+ softc->blk_mask = softc->min_blk - 1;
+ softc->blk_shift = 0;
+ softc->blk_shift = ffs(softc->min_blk) - 1;
+ } else {
+ softc->blk_mask = ~0;
+ softc->blk_shift = 0;
+ }
+ } else {
+ /*
+ * SCSI-III spec allows 0
+ * to mean "unspecified"
+ */
+ if (softc->max_blk == 0) {
+ softc->max_blk = ~0;
+ }
+ softc->blk_shift = 0;
+ if (softc->blk_gran != 0) {
+ softc->blk_mask = softc->blk_gran - 1;
+ } else {
+ softc->blk_mask = 0;
+ }
+ }
+
+ /*
+ * Next, perform a mode sense to determine
+ * current density, blocksize, compression etc.
+ */
+ error = sagetparams(periph, SA_PARAM_ALL,
+ &softc->media_blksize,
+ &softc->media_density,
+ &softc->media_numblks,
+ &softc->buffer_mode, &write_protect,
+ &softc->speed, &comp_supported,
+ &comp_enabled, &softc->comp_algorithm,
+ NULL);
+
+ if (error != 0)
+ goto exit;
+
+ if (write_protect)
+ softc->flags |= SA_FLAG_TAPE_WP;
+
+ if (comp_supported) {
+ if (comp_enabled) {
+ softc->flags |= SA_FLAG_COMP_ENABLED;
+
+ if (softc->saved_comp_algorithm == 0)
+ softc->saved_comp_algorithm =
+ softc->comp_algorithm;
+ }
+ } else
+ softc->flags |= SA_FLAG_COMP_UNSUPP;
+
+ if (softc->buffer_mode != SMH_SA_BUF_MODE_NOBUF)
+ goto exit;
+
+ error = sasetparams(periph, SA_PARAM_BUFF_MODE, 0, 0, 0);
+
+ if (error == 0)
+ softc->buffer_mode = SMH_SA_BUF_MODE_SIBUF;
+exit:
+ if (rblim != NULL)
+ free(rblim, M_TEMP);
+
+ if (error != 0) {
+ cam_release_devq(ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ } else
+ xpt_release_ccb(ccb);
+
+ return (error);
+}
+
+static int
+sacheckeod(struct cam_periph *periph)
+{
+ int error;
+ int markswanted;
+ struct sa_softc *softc;
+
+ softc = (struct sa_softc *)periph->softc;
+ markswanted = 0;
+
+ if ((softc->flags & SA_FLAG_TAPE_WRITTEN) != 0) {
+ markswanted++;
+
+ if ((softc->flags & SA_FLAG_2FM_AT_EOD) != 0)
+ markswanted++;
+ }
+
+ if (softc->filemarks < markswanted) {
+ markswanted -= softc->filemarks;
+ error = sawritefilemarks(periph, markswanted,
+ /*setmarks*/FALSE);
+ } else {
+ error = 0;
+ }
+ return (error);
+}
+
+static int
+saerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
+{
+ struct cam_periph *periph;
+ struct sa_softc *softc;
+ struct ccb_scsiio *csio;
+ struct scsi_sense_data *sense;
+ int error_code, sense_key, asc, ascq;
+ int error;
+
+ periph = xpt_path_periph(ccb->ccb_h.path);
+ softc = (struct sa_softc *)periph->softc;
+ csio = &ccb->csio;
+ sense = &csio->sense_data;
+ scsi_extract_sense(sense, &error_code, &sense_key, &asc, &ascq);
+ error = 0;
+
+ if (((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR)
+ && ((sense->flags & (SSD_EOM|SSD_FILEMARK|SSD_ILI)) != 0)
+ && ((sense_key == SSD_KEY_NO_SENSE)
+ || (sense_key == SSD_KEY_BLANK_CHECK))) {
+ u_int32_t info;
+ u_int32_t resid;
+ int defer_action;
+
+ /*
+ * Filter out some sense codes of interest.
+ */
+ if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
+ info = scsi_4btoul(sense->info);
+ resid = info;
+ if ((softc->flags & SA_FLAG_FIXED) != 0)
+ resid *= softc->media_blksize;
+ } else {
+ resid = csio->dxfer_len;
+ info = resid;
+ if ((softc->flags & SA_FLAG_FIXED) != 0)
+ info /= softc->media_blksize;
+ }
+ if ((resid > 0 && resid < csio->dxfer_len)
+ && (softc->flags & SA_FLAG_FIXED) != 0)
+ defer_action = TRUE;
+ else
+ defer_action = FALSE;
+
+ if ((sense->flags & SSD_EOM) != 0
+ || (sense_key == 0x8 /* BLANK CHECK*/)) {
+ csio->resid = resid;
+ if (defer_action) {
+ softc->flags |= SA_FLAG_EOM_PENDING;
+ } else {
+ if (csio->cdb_io.cdb_bytes[0] == SA_WRITE)
+ error = ENOSPC;
+ }
+ }
+ if ((sense->flags & SSD_FILEMARK) != 0) {
+ csio->resid = resid;
+ if (defer_action)
+ softc->flags |= SA_FLAG_EOF_PENDING;
+ }
+ if (sense->flags & SSD_ILI) {
+ if (info < 0) {
+ /*
+ * The record was too big.
+ */
+ xpt_print_path(csio->ccb_h.path);
+ printf("%d-byte tape record bigger "
+ "than suplied read buffer\n",
+ csio->dxfer_len - info);
+ csio->resid = csio->dxfer_len;
+ error = EIO;
+ } else {
+ csio->resid = resid;
+ if ((softc->flags & SA_FLAG_FIXED) != 0) {
+ if (defer_action)
+ softc->flags |=
+ SA_FLAG_EIO_PENDING;
+ else
+ error = EIO;
+ }
+ }
+ }
+ }
+ if (error == 0)
+ error = cam_periph_error(ccb, cam_flags, sense_flags,
+ &softc->saved_ccb);
+
+ return (error);
+}
+
+static int
+sagetparams(struct cam_periph *periph, sa_params params_to_get,
+ u_int32_t *blocksize, u_int8_t *density, u_int32_t *numblocks,
+ int *buff_mode, u_int8_t *write_protect, u_int8_t *speed,
+ int *comp_supported, int *comp_enabled, u_int32_t *comp_algorithm,
+ struct scsi_data_compression_page *comp_page)
+{
+ union ccb *ccb;
+ void *mode_buffer;
+ struct scsi_mode_header_6 *mode_hdr;
+ struct scsi_mode_blk_desc *mode_blk;
+ struct scsi_data_compression_page *ncomp_page;
+ int mode_buffer_len;
+ struct sa_softc *softc;
+ int error;
+ cam_status status;
+
+ softc = (struct sa_softc *)periph->softc;
+
+ ccb = cam_periph_getccb(periph, /*priority*/ 1);
+
+retry:
+ mode_buffer_len = sizeof(*mode_hdr) + sizeof(*mode_blk);
+
+ if (params_to_get & SA_PARAM_COMPRESSION) {
+ if (softc->quirks & SA_QUIRK_NOCOMP) {
+ *comp_supported = FALSE;
+ params_to_get &= ~SA_PARAM_COMPRESSION;
+ } else
+ mode_buffer_len +=
+ sizeof(struct scsi_data_compression_page);
+ }
+
+ mode_buffer = malloc(mode_buffer_len, M_TEMP, M_WAITOK);
+
+ bzero(mode_buffer, mode_buffer_len);
+
+ mode_hdr = (struct scsi_mode_header_6 *)mode_buffer;
+ mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1];
+
+ if (params_to_get & SA_PARAM_COMPRESSION)
+ ncomp_page = (struct scsi_data_compression_page *)&mode_blk[1];
+ else
+ ncomp_page = NULL;
+
+ scsi_mode_sense(&ccb->csio,
+ /*retries*/ 1,
+ /*cbfcnp*/ sadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*dbd*/ FALSE,
+ /*page_code*/ SMS_PAGE_CTRL_CURRENT,
+ /*page*/ (params_to_get & SA_PARAM_COMPRESSION) ?
+ SA_DATA_COMPRESSION_PAGE :
+ SMS_VENDOR_SPECIFIC_PAGE,
+ /*param_buf*/ mode_buffer,
+ /*param_len*/ mode_buffer_len,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ 5000);
+
+ error = cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0,
+ /*sense_flags*/SF_NO_PRINT,
+ &softc->device_stats);
+
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(ccb->ccb_h.path,
+ /* relsim_flags */0,
+ /* opening_reduction */0,
+ /* timeout */0,
+ /* getcount_only */ FALSE);
+
+ status = ccb->ccb_h.status & CAM_STATUS_MASK;
+
+ if (error == EINVAL
+ && (params_to_get & SA_PARAM_COMPRESSION) != 0) {
+ /*
+ * Most likely doesn't support the compression
+ * page. Remeber this for the future and attempt
+ * the request without asking for compression info.
+ */
+ softc->quirks |= SA_QUIRK_NOCOMP;
+ free(mode_buffer, M_TEMP);
+ goto retry;
+ } else if (error == 0) {
+ struct scsi_data_compression_page *temp_comp_page;
+
+ temp_comp_page = NULL;
+
+ /*
+ * If the user only wants the compression information, and
+ * the device doesn't send back the block descriptor, it's
+ * no big deal. If the user wants more than just
+ * compression, though, and the device doesn't pass back the
+ * block descriptor, we need to send another mode sense to
+ * get the block descriptor.
+ */
+ if ((mode_hdr->blk_desc_len == 0)
+ && (params_to_get & SA_PARAM_COMPRESSION)
+ && ((params_to_get & ~(SA_PARAM_COMPRESSION)) != 0)) {
+
+ /*
+ * Decrease the mode buffer length by the size of
+ * the compression page, to make sure the data
+ * there doesn't get overwritten.
+ */
+ mode_buffer_len -= sizeof(*ncomp_page);
+
+ /*
+ * Now move the compression page that we presumably
+ * got back down the memory chunk a little bit so
+ * it doesn't get spammed.
+ */
+ temp_comp_page =
+ (struct scsi_data_compression_page *)&mode_hdr[1];
+ bcopy(temp_comp_page, ncomp_page, sizeof(*ncomp_page));
+
+ /*
+ * Now, we issue another mode sense and just ask
+ * for the block descriptor, etc.
+ */
+ scsi_mode_sense(&ccb->csio,
+ /*retries*/ 1,
+ /*cbfcnp*/ sadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*dbd*/ FALSE,
+ /*page_code*/ SMS_PAGE_CTRL_CURRENT,
+ /*page*/ SMS_VENDOR_SPECIFIC_PAGE,
+ /*param_buf*/ mode_buffer,
+ /*param_len*/ mode_buffer_len,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ 5000);
+
+ error = cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0,
+ /*sense_flags*/ 0,
+ &softc->device_stats);
+
+ if (error != 0)
+ goto sagetparamsexit;
+
+ }
+
+ if (params_to_get & SA_PARAM_BLOCKSIZE)
+ *blocksize = scsi_3btoul(mode_blk->blklen);
+
+ if (params_to_get & SA_PARAM_NUMBLOCKS)
+ *numblocks = scsi_3btoul(mode_blk->nblocks);
+
+ if (params_to_get & SA_PARAM_BUFF_MODE)
+ *buff_mode = mode_hdr->dev_spec & SMH_SA_BUF_MODE_MASK;
+
+ if (params_to_get & SA_PARAM_DENSITY)
+ *density = mode_blk->density;
+
+ if (params_to_get & SA_PARAM_WP)
+ *write_protect = (mode_hdr->dev_spec & SMH_SA_WP) ?
+ TRUE : FALSE;
+ if (params_to_get & SA_PARAM_SPEED)
+ *speed = mode_hdr->dev_spec & SMH_SA_SPEED_MASK;
+
+ if (params_to_get & SA_PARAM_COMPRESSION) {
+ *comp_supported =(ncomp_page->dce_and_dcc & SA_DCP_DCC)?
+ TRUE : FALSE;
+ *comp_enabled = (ncomp_page->dce_and_dcc & SA_DCP_DCE)?
+ TRUE : FALSE;
+ *comp_algorithm =
+ scsi_4btoul(ncomp_page->comp_algorithm);
+ if (comp_page != NULL)
+ bcopy(ncomp_page, comp_page,sizeof(*comp_page));
+ }
+
+ } else if (status == CAM_SCSI_STATUS_ERROR) {
+ /* Tell the user about the fatal error. */
+ scsi_sense_print(&ccb->csio);
+ }
+
+sagetparamsexit:
+
+ xpt_release_ccb(ccb);
+ free(mode_buffer, M_TEMP);
+ return(error);
+}
+
+/*
+ * The purpose of this function is to set one of four different parameters
+ * for a tape drive:
+ * - blocksize
+ * - density
+ * - compression / compression algorithm
+ * - buffering mode
+ *
+ * The assumption is that this will be called from saioctl(), and therefore
+ * from a process context. Thus the waiting malloc calls below. If that
+ * assumption ever changes, the malloc calls should be changed to be
+ * NOWAIT mallocs.
+ *
+ * Any or all of the four parameters may be set when this function is
+ * called. It should handle setting more than one parameter at once.
+ */
+static int
+sasetparams(struct cam_periph *periph, sa_params params_to_set,
+ u_int32_t blocksize, u_int8_t density, u_int32_t comp_algorithm)
+{
+ struct sa_softc *softc;
+ u_int32_t current_blocksize;
+ u_int32_t current_comp_algorithm;
+ u_int8_t current_density;
+ u_int8_t current_speed;
+ int comp_enabled, comp_supported;
+ void *mode_buffer;
+ int mode_buffer_len;
+ struct scsi_mode_header_6 *mode_hdr;
+ struct scsi_mode_blk_desc *mode_blk;
+ struct scsi_data_compression_page *comp_page;
+ struct scsi_data_compression_page *current_comp_page;
+ int buff_mode;
+ union ccb *ccb;
+ int error;
+
+ softc = (struct sa_softc *)periph->softc;
+
+ /* silence the compiler */
+ ccb = NULL;
+
+ current_comp_page = malloc(sizeof(*current_comp_page),M_TEMP, M_WAITOK);
+
+ /*
+ * Since it doesn't make sense to set the number of blocks, or
+ * write protection, we won't try to get the current value. We
+ * always want to get the blocksize, so we can set it back to the
+ * proper value.
+ */
+ error = sagetparams(periph, params_to_set | SA_PARAM_BLOCKSIZE |
+ SA_PARAM_SPEED, &current_blocksize,
+ &current_density, NULL, &buff_mode, NULL,
+ &current_speed, &comp_supported, &comp_enabled,
+ &current_comp_algorithm, current_comp_page);
+
+ if (error != 0) {
+ free(current_comp_page, M_TEMP);
+ return(error);
+ }
+
+ mode_buffer_len = sizeof(*mode_hdr) + sizeof(*mode_blk);
+ if (params_to_set & SA_PARAM_COMPRESSION)
+ mode_buffer_len += sizeof(struct scsi_data_compression_page);
+
+ mode_buffer = malloc(mode_buffer_len, M_TEMP, M_WAITOK);
+
+ bzero(mode_buffer, mode_buffer_len);
+
+ mode_hdr = (struct scsi_mode_header_6 *)mode_buffer;
+ mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1];
+
+ if (params_to_set & SA_PARAM_COMPRESSION) {
+ comp_page = (struct scsi_data_compression_page *)&mode_blk[1];
+ bcopy(current_comp_page, comp_page, sizeof(*comp_page));
+ } else
+ comp_page = NULL;
+
+ /*
+ * If the caller wants us to set the blocksize, use the one they
+ * pass in. Otherwise, use the blocksize we got back from the
+ * mode select above.
+ */
+ if (params_to_set & SA_PARAM_BLOCKSIZE)
+ scsi_ulto3b(blocksize, mode_blk->blklen);
+ else
+ scsi_ulto3b(current_blocksize, mode_blk->blklen);
+
+ /*
+ * 0x7f means "same as before"
+ */
+ if (params_to_set & SA_PARAM_DENSITY)
+ mode_blk->density = density;
+ else
+ mode_blk->density = 0x7f;
+
+ /*
+ * For mode selects, these two fields must be zero.
+ */
+ mode_hdr->data_length = 0;
+ mode_hdr->medium_type = 0;
+
+ /* set the speed to the current value */
+ mode_hdr->dev_spec = current_speed;
+
+ /* set single-initiator buffering mode */
+ mode_hdr->dev_spec |= SMH_SA_BUF_MODE_SIBUF;
+
+ mode_hdr->blk_desc_len = sizeof(struct scsi_mode_blk_desc);
+
+ /*
+ * First, if the user wants us to set the compression algorithm or
+ * just turn compression on, check to make sure that this drive
+ * supports compression.
+ */
+ if ((params_to_set & SA_PARAM_COMPRESSION)
+ && (current_comp_page->dce_and_dcc & SA_DCP_DCC)) {
+
+ /*
+ * If the compression algorithm is 0, disable compression.
+ * If the compression algorithm is non-zero, enable
+ * compression and set the compression type to the
+ * specified compression algorithm, unless the algorithm is
+ * MT_COMP_ENABLE. In that case, we look at the
+ * compression algorithm that is currently set and if it is
+ * non-zero, we leave it as-is. If it is zero, and we have
+ * saved a compression algorithm from a time when
+ * compression was enabled before, set the compression to
+ * the saved value.
+ */
+ if (comp_algorithm == 0) {
+ /* disable compression */
+ comp_page->dce_and_dcc &= ~SA_DCP_DCE;
+ } else {
+ /* enable compression */
+ comp_page->dce_and_dcc |= SA_DCP_DCE;
+
+ /* enable decompression */
+ comp_page->dde_and_red |= SA_DCP_DDE;
+
+ if (comp_algorithm != MT_COMP_ENABLE) {
+ /* set the compression algorithm */
+ scsi_ulto4b(comp_algorithm,
+ comp_page->comp_algorithm);
+
+ } else if ((scsi_4btoul(comp_page->comp_algorithm) == 0)
+ && (softc->saved_comp_algorithm != 0)) {
+ scsi_ulto4b(softc->saved_comp_algorithm,
+ comp_page->comp_algorithm);
+ }
+ }
+ } else if (params_to_set & SA_PARAM_COMPRESSION) {
+ /*
+ * The drive doesn't support compression, so turn off the
+ * set compression bit.
+ */
+ params_to_set &= ~SA_PARAM_COMPRESSION;
+
+ /*
+ * Should probably do something other than a printf...like
+ * set a flag in the softc saying that this drive doesn't
+ * support compression.
+ */
+ xpt_print_path(periph->path);
+ printf("sasetparams: device does not support compression\n");
+
+ /*
+ * If that was the only thing the user wanted us to set,
+ * clean up allocated resources and return with 'operation
+ * not supported'.
+ */
+ if (params_to_set == SA_PARAM_NONE) {
+ free(mode_buffer, M_TEMP);
+ return(ENODEV);
+ }
+
+ /*
+ * That wasn't the only thing the user wanted us to set.
+ * So, decrease the stated mode buffer length by the size
+ * of the compression mode page.
+ */
+ mode_buffer_len -= sizeof(*comp_page);
+ }
+
+ ccb = cam_periph_getccb(periph, /*priority*/ 1);
+
+ scsi_mode_select(&ccb->csio,
+ /*retries*/1,
+ /*cbfcnp*/ sadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*scsi_page_fmt*/(params_to_set & SA_PARAM_COMPRESSION)?
+ TRUE : FALSE,
+ /*save_pages*/ FALSE,
+ /*param_buf*/ mode_buffer,
+ /*param_len*/ mode_buffer_len,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ 5000);
+
+ error = cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0,
+ /*sense_flags*/ 0, &softc->device_stats);
+
+ if (error == 0) {
+ xpt_release_ccb(ccb);
+ } else {
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ /*
+ * If we were setting the blocksize, and that failed, we
+ * want to set it to its original value. If we weren't
+ * setting the blocksize, we don't want to change it.
+ */
+ scsi_ulto3b(current_blocksize, mode_blk->blklen);
+
+ /*
+ * 0x7f means "same as before".
+ */
+ if (params_to_set & SA_PARAM_DENSITY)
+ mode_blk->density = current_density;
+ else
+ mode_blk->density = 0x7f;
+
+ if (params_to_set & SA_PARAM_COMPRESSION)
+ bcopy(current_comp_page, comp_page,
+ sizeof(struct scsi_data_compression_page));
+
+ /*
+ * The retry count is the only CCB field that might have been
+ * changed that we care about, so reset it back to 1.
+ */
+ ccb->ccb_h.retry_count = 1;
+ cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0,
+ /*sense_flags*/ 0, &softc->device_stats);
+
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+
+ xpt_release_ccb(ccb);
+ }
+
+ if (params_to_set & SA_PARAM_COMPRESSION)
+ free(current_comp_page, M_TEMP);
+
+ free(mode_buffer, M_TEMP);
+ return(error);
+}
+
+static void
+saprevent(struct cam_periph *periph, int action)
+{
+ struct sa_softc *softc;
+ union ccb *ccb;
+ int error;
+
+ softc = (struct sa_softc *)periph->softc;
+
+ if (((action == PR_ALLOW)
+ && (softc->flags & SA_FLAG_TAPE_LOCKED) == 0)
+ || ((action == PR_PREVENT)
+ && (softc->flags & SA_FLAG_TAPE_LOCKED) != 0)) {
+ return;
+ }
+
+ ccb = cam_periph_getccb(periph, /*priority*/1);
+
+ scsi_prevent(&ccb->csio,
+ /*retries*/0,
+ /*cbcfp*/sadone,
+ MSG_SIMPLE_Q_TAG,
+ action,
+ SSD_FULL_SIZE,
+ 60000);
+
+ error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
+ /*sense_flags*/0, &softc->device_stats);
+
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+
+
+ if (error == 0) {
+ if (action == PR_ALLOW)
+ softc->flags &= ~SA_FLAG_TAPE_LOCKED;
+ else
+ softc->flags |= SA_FLAG_TAPE_LOCKED;
+ }
+
+ xpt_release_ccb(ccb);
+}
+
+static int
+sarewind(struct cam_periph *periph)
+{
+ union ccb *ccb;
+ struct sa_softc *softc;
+ int error;
+
+ softc = (struct sa_softc *)periph->softc;
+
+ ccb = cam_periph_getccb(periph, /*priority*/1);
+
+ /*
+ * Put in a 2 hour timeout to deal with especially slow tape drives.
+ */
+ scsi_rewind(&ccb->csio,
+ /*retries*/1,
+ /*cbcfp*/sadone,
+ MSG_SIMPLE_Q_TAG,
+ /*immediate*/FALSE,
+ SSD_FULL_SIZE,
+ (120 * 60 * 1000)); /* 2 hours */
+
+ error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
+ /*sense_flags*/0, &softc->device_stats);
+
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+
+ xpt_release_ccb(ccb);
+
+ return (error);
+}
+
+static int
+saspace(struct cam_periph *periph, int count, scsi_space_code code)
+{
+ union ccb *ccb;
+ struct sa_softc *softc;
+ int error;
+
+ softc = (struct sa_softc *)periph->softc;
+
+ ccb = cam_periph_getccb(periph, /*priority*/1);
+
+ scsi_space(&ccb->csio,
+ /*retries*/1,
+ /*cbcfp*/sadone,
+ MSG_SIMPLE_Q_TAG,
+ code, count,
+ SSD_FULL_SIZE,
+ 60 * 60 *1000);
+
+ error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
+ /*sense_flags*/0, &softc->device_stats);
+
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+
+ xpt_release_ccb(ccb);
+
+ return (error);
+}
+
+static int
+sawritefilemarks(struct cam_periph *periph, int nmarks, int setmarks)
+{
+ union ccb *ccb;
+ struct sa_softc *softc;
+ int error;
+
+ softc = (struct sa_softc *)periph->softc;
+
+ ccb = cam_periph_getccb(periph, /*priority*/1);
+
+ scsi_write_filemarks(&ccb->csio,
+ /*retries*/1,
+ /*cbcfp*/sadone,
+ MSG_SIMPLE_Q_TAG,
+ /*immediate*/FALSE,
+ setmarks,
+ nmarks,
+ SSD_FULL_SIZE,
+ 60000);
+
+ error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
+ /*sense_flags*/0, &softc->device_stats);
+
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+
+ if (error == 0) {
+ struct sa_softc *softc;
+
+ softc = (struct sa_softc *)periph->softc;
+ softc->filemarks += nmarks;
+ }
+
+ xpt_release_ccb(ccb);
+
+ return (error);
+}
+
+static int
+saretension(struct cam_periph *periph)
+{
+ union ccb *ccb;
+ struct sa_softc *softc;
+ int error;
+
+ softc = (struct sa_softc *)periph->softc;
+
+ ccb = cam_periph_getccb(periph, /*priority*/1);
+
+ scsi_load_unload(&ccb->csio,
+ /*retries*/ 1,
+ /*cbfcnp*/ sadone,
+ MSG_SIMPLE_Q_TAG,
+ /*immediate*/ FALSE,
+ /*eot*/ FALSE,
+ /*reten*/ TRUE,
+ /*load*/ TRUE,
+ SSD_FULL_SIZE,
+ 60000);
+
+ error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
+ /*sense_flags*/0, &softc->device_stats);
+
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+
+ xpt_release_ccb(ccb);
+
+ return(error);
+}
+
+static int
+sareservereleaseunit(struct cam_periph *periph, int reserve)
+{
+ union ccb *ccb;
+ struct sa_softc *softc;
+ int error;
+
+ softc = (struct sa_softc *)periph->softc;
+
+ ccb = cam_periph_getccb(periph, /*priority*/ 1);
+
+ scsi_reserve_release_unit(&ccb->csio,
+ /*retries*/ 1,
+ /*cbfcnp*/ sadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*third_party*/ FALSE,
+ /*third_party_id*/ 0,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ 5000,
+ reserve);
+
+ /*
+ * We set SF_RETRY_UA, since this is often the first command run
+ * when a tape device is opened, and there may be a unit attention
+ * condition pending.
+ */
+ error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
+ /*sense_flags*/SF_RETRY_UA,
+ &softc->device_stats);
+
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+
+ xpt_release_ccb(ccb);
+
+ return (error);
+}
+
+static int
+saloadunload(struct cam_periph *periph, int load)
+{
+ union ccb *ccb;
+ struct sa_softc *softc;
+ int error;
+
+ softc = (struct sa_softc *)periph->softc;
+
+ ccb = cam_periph_getccb(periph, /*priority*/1);
+
+ scsi_load_unload(&ccb->csio,
+ /*retries*/1,
+ /*cbfcnp*/sadone,
+ MSG_SIMPLE_Q_TAG,
+ /*immediate*/FALSE,
+ /*eot*/FALSE,
+ /*reten*/FALSE,
+ load,
+ SSD_FULL_SIZE,
+ 60000);
+
+ error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
+ /*sense_flags*/0, &softc->device_stats);
+
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+
+ xpt_release_ccb(ccb);
+
+ return (error);
+}
+
+static int
+saerase(struct cam_periph *periph, int longerase)
+{
+
+ union ccb *ccb;
+ struct sa_softc *softc;
+ int error;
+
+ softc = (struct sa_softc *)periph->softc;
+
+ ccb = cam_periph_getccb(periph, /*priority*/ 1);
+
+ scsi_erase(&ccb->csio,
+ /*retries*/ 1,
+ /*cbfcnp*/ sadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*immediate*/ FALSE,
+ /*long_erase*/ longerase,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ 4 * 60 * 60 * 1000); /* 4 hours */
+
+ error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
+ /*sense_flags*/0, &softc->device_stats);
+
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+
+ xpt_release_ccb(ccb);
+
+ return (error);
+}
+
+#endif /* KERNEL */
+
+/*
+ * Read tape block limits command.
+ */
+void
+scsi_read_block_limits(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action,
+ struct scsi_read_block_limits_data *rlimit_buf,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_read_block_limits *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_IN,
+ tag_action,
+ /*data_ptr*/(u_int8_t *)rlimit_buf,
+ /*dxfer_len*/sizeof(*rlimit_buf),
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+ scsi_cmd = (struct scsi_read_block_limits *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = READ_BLOCK_LIMITS;
+}
+
+void
+scsi_sa_read_write(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int readop, int sli,
+ int fixed, u_int32_t length, u_int8_t *data_ptr,
+ u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_sa_rw *scsi_cmd;
+
+ scsi_cmd = (struct scsi_sa_rw *)&csio->cdb_io.cdb_bytes;
+ scsi_cmd->opcode = readop ? SA_READ : SA_WRITE;
+ scsi_cmd->sli_fixed = 0;
+ if (sli && readop)
+ scsi_cmd->sli_fixed |= SAR_SLI;
+ if (fixed)
+ scsi_cmd->sli_fixed |= SARW_FIXED;
+ scsi_ulto3b(length, scsi_cmd->length);
+ scsi_cmd->control = 0;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT,
+ tag_action,
+ data_ptr,
+ dxfer_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+void
+scsi_load_unload(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int immediate, int eot,
+ int reten, int load, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ struct scsi_load_unload *scsi_cmd;
+
+ scsi_cmd = (struct scsi_load_unload *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = LOAD_UNLOAD;
+ if (immediate)
+ scsi_cmd->immediate = SLU_IMMED;
+ if (eot)
+ scsi_cmd->eot_reten_load |= SLU_EOT;
+ if (reten)
+ scsi_cmd->eot_reten_load |= SLU_RETEN;
+ if (load)
+ scsi_cmd->eot_reten_load |= SLU_LOAD;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+void
+scsi_rewind(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int immediate, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ struct scsi_rewind *scsi_cmd;
+
+ scsi_cmd = (struct scsi_rewind *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = REWIND;
+ if (immediate)
+ scsi_cmd->immediate = SREW_IMMED;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+void
+scsi_space(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, scsi_space_code code,
+ u_int32_t count, u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_space *scsi_cmd;
+
+ scsi_cmd = (struct scsi_space *)&csio->cdb_io.cdb_bytes;
+ scsi_cmd->opcode = SPACE;
+ scsi_cmd->code = code;
+ scsi_ulto3b(count, scsi_cmd->count);
+ scsi_cmd->control = 0;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+void
+scsi_write_filemarks(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int immediate, int setmark,
+ u_int32_t num_marks, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ struct scsi_write_filemarks *scsi_cmd;
+
+ scsi_cmd = (struct scsi_write_filemarks *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = WRITE_FILEMARKS;
+ if (immediate)
+ scsi_cmd->byte2 |= SWFMRK_IMMED;
+ if (setmark)
+ scsi_cmd->byte2 |= SWFMRK_WSMK;
+
+ scsi_ulto3b(num_marks, scsi_cmd->num_marks);
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+/*
+ * The reserve and release unit commands differ only by their opcodes.
+ */
+void
+scsi_reserve_release_unit(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int third_party,
+ int third_party_id, u_int8_t sense_len,
+ u_int32_t timeout, int reserve)
+{
+ struct scsi_reserve_release_unit *scsi_cmd;
+
+ scsi_cmd = (struct scsi_reserve_release_unit *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+ if (reserve)
+ scsi_cmd->opcode = RESERVE_UNIT;
+ else
+ scsi_cmd->opcode = RELEASE_UNIT;
+
+ if (third_party) {
+ scsi_cmd->lun_thirdparty |= SRRU_3RD_PARTY;
+ scsi_cmd->lun_thirdparty |=
+ ((third_party_id << SRRU_3RD_SHAMT) & SRRU_3RD_MASK);
+ }
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/ NULL,
+ /*dxfer_len*/ 0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+void
+scsi_erase(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int immediate, int long_erase,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_erase *scsi_cmd;
+
+ scsi_cmd = (struct scsi_erase *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->opcode = ERASE;
+
+ if (immediate)
+ scsi_cmd->lun_imm_long |= SE_IMMED;
+
+ if (long_erase)
+ scsi_cmd->lun_imm_long |= SE_LONG;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/ NULL,
+ /*dxfer_len*/ 0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
diff --git a/sys/cam/scsi/scsi_sa.h b/sys/cam/scsi/scsi_sa.h
new file mode 100644
index 0000000..cc5f14d
--- /dev/null
+++ b/sys/cam/scsi/scsi_sa.h
@@ -0,0 +1,254 @@
+/*
+ * Structure and function declartaions for the
+ * SCSI Sequential Access Peripheral driver for CAM.
+ *
+ * Copyright (c) 1997 Justin T. Gibbs
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#ifndef _SCSI_SCSI_SA_H
+#define _SCSI_SCSI_SA_H 1
+
+#include <sys/cdefs.h>
+
+struct scsi_read_block_limits
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[3];
+ u_int8_t control;
+};
+
+struct scsi_read_block_limits_data
+{
+ u_int8_t gran;
+#define RBL_GRAN_MASK 0x1F
+#define RBL_GRAN(rblim) ((rblim)->gran & RBL_GRAN_MASK)
+ u_int8_t maximum[3];
+ u_int8_t minimum[2];
+};
+
+struct scsi_sa_rw
+{
+ u_int8_t opcode;
+ u_int8_t sli_fixed;
+#define SAR_SLI 0x02
+#define SARW_FIXED 0x01
+ u_int8_t length[3];
+ u_int8_t control;
+};
+
+struct scsi_load_unload
+{
+ u_int8_t opcode;
+ u_int8_t immediate;
+#define SLU_IMMED 0x01
+ u_int8_t reserved[2];
+ u_int8_t eot_reten_load;
+#define SLU_EOT 0x04
+#define SLU_RETEN 0x02
+#define SLU_LOAD 0x01
+ u_int8_t control;
+};
+
+struct scsi_rewind
+{
+ u_int8_t opcode;
+ u_int8_t immediate;
+#define SREW_IMMED 0x01
+ u_int8_t reserved[3];
+ u_int8_t control;
+};
+
+typedef enum {
+ SS_BLOCKS,
+ SS_FILEMARKS,
+ SS_SEQFILEMARKS,
+ SS_EOD,
+ SS_SETMARKS,
+ SS_SEQSETMARKS
+} scsi_space_code;
+
+struct scsi_space
+{
+ u_int8_t opcode;
+ u_int8_t code;
+#define SREW_IMMED 0x01
+ u_int8_t count[3];
+ u_int8_t control;
+};
+
+struct scsi_write_filemarks
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define SWFMRK_IMMED 0x01
+#define SWFMRK_WSMK 0x02
+ u_int8_t num_marks[3];
+ u_int8_t control;
+};
+
+/*
+ * Reserve and release unit have the same exact cdb format, but different
+ * opcodes.
+ */
+struct scsi_reserve_release_unit
+{
+ u_int8_t opcode;
+ u_int8_t lun_thirdparty;
+#define SRRU_LUN_MASK 0xE0
+#define SRRU_3RD_PARTY 0x10
+#define SRRU_3RD_SHAMT 1
+#define SRRU_3RD_MASK 0xE
+ u_int8_t reserved[3];
+ u_int8_t control;
+};
+
+/*
+ * Erase a tape
+ */
+struct scsi_erase
+{
+ u_int8_t opcode;
+ u_int8_t lun_imm_long;
+#define SE_LUN_MASK 0xE0
+#define SE_LONG 0x1
+#define SE_IMMED 0x2
+ u_int8_t reserved[3];
+ u_int8_t control;
+};
+
+/*
+ * Dev specific mode page masks.
+ */
+#define SMH_SA_WP 0x80
+#define SMH_SA_BUF_MODE_MASK 0x70
+#define SMH_SA_BUF_MODE_NOBUF 0x00
+#define SMH_SA_BUF_MODE_SIBUF 0x10 /* Single-Initiator buffering */
+#define SMH_SA_BUF_MODE_MIBUF 0x20 /* Multi-Initiator buffering */
+#define SMH_SA_SPEED_MASK 0x0F
+#define SMH_SA_SPEED_DEFAULT 0x00
+
+/*
+ * Sequential-access specific mode page numbers.
+ */
+#define SA_DATA_COMPRESSION_PAGE 0x0f
+#define SA_DEVICE_CONFIGURATION_PAGE 0x10
+#define SA_MEDIUM_PARTITION_PAGE_1 0x11
+#define SA_MEDIUM_PARTITION_PAGE_2 0x12
+#define SA_MEDIUM_PARTITION_PAGE_3 0x13
+#define SA_MEDIUM_PARTITION_PAGE_4 0x14
+
+/*
+ * Mode page definitions.
+ */
+
+struct scsi_data_compression_page {
+ u_int8_t page_code;
+ u_int8_t page_length;
+#define SA_DCP_DCE 0x80 /* Data compression enable */
+#define SA_DCP_DCC 0x40 /* Data compression capable */
+ u_int8_t dce_and_dcc;
+#define SA_DCP_DDE 0x80 /* Data decompression enable */
+#define SA_DCP_RED_MASK 0x60 /* Report Exception on Decomp. */
+#define SA_DCP_RED_SHAMT 5
+#define SA_DCP_RED_0 0x00
+#define SA_DCP_RED_1 0x20
+#define SA_DCP_RED_2 0x40
+ u_int8_t dde_and_red;
+ u_int8_t comp_algorithm[4];
+ u_int8_t decomp_algorithm[4];
+ u_int8_t reserved[4];
+};
+
+/*
+ * Opcodes
+ */
+#define REWIND 0x01
+#define READ_BLOCK_LIMITS 0x05
+#define SA_READ 0x08
+#define SA_WRITE 0x0A
+#define WRITE_FILEMARKS 0x10
+#define SPACE 0x11
+#define RESERVE_UNIT 0x16
+#define RELEASE_UNIT 0x17
+#define ERASE 0x19
+#define LOAD_UNLOAD 0x1B
+
+__BEGIN_DECLS
+void scsi_read_block_limits(struct ccb_scsiio *, u_int32_t,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t, struct scsi_read_block_limits_data *,
+ u_int8_t , u_int32_t);
+
+void scsi_sa_read_write(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int readop, int sli,
+ int fixed, u_int32_t length, u_int8_t *data_ptr,
+ u_int32_t dxfer_len, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_rewind(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int immediate, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_space(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, scsi_space_code code,
+ u_int32_t count, u_int8_t sense_len, u_int32_t timeout);
+
+void scsi_load_unload(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int immediate, int eot,
+ int reten, int load, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_write_filemarks(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int immediate, int setmark,
+ u_int32_t num_marks, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_reserve_release_unit(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *), u_int8_t tag_action,
+ int third_party, int third_party_id,
+ u_int8_t sense_len, u_int32_t timeout,
+ int reserve);
+
+void scsi_erase(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int immediate, int long_erase,
+ u_int8_t sense_len, u_int32_t timeout);
+
+void scsi_data_comp_page(struct scsi_data_compression_page *page,
+ u_int8_t dce, u_int8_t dde, u_int8_t red,
+ u_int32_t comp_algorithm,
+ u_int32_t decomp_algorithm);
+__END_DECLS
+
+#endif /* _SCSI_SCSI_SA_H */
diff --git a/sys/cam/scsi/scsi_target.c b/sys/cam/scsi/scsi_target.c
new file mode 100644
index 0000000..5600de5
--- /dev/null
+++ b/sys/cam/scsi/scsi_target.c
@@ -0,0 +1,1459 @@
+/*
+ * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM.
+ *
+ * Copyright (c) 1998 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+#include <stddef.h> /* For offsetof */
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/buf.h>
+#include <sys/conf.h>
+#include <sys/devicestat.h>
+#include <sys/malloc.h>
+#include <sys/poll.h>
+#include <sys/select.h> /* For struct selinfo. */
+#include <sys/uio.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_extend.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/cam_debug.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_pt.h>
+#include <cam/scsi/scsi_targetio.h>
+#include <cam/scsi/scsi_message.h>
+
+typedef enum {
+ TARG_STATE_NORMAL,
+ TARG_STATE_EXCEPTION,
+ TARG_STATE_TEARDOWN
+} targ_state;
+
+typedef enum {
+ TARG_FLAG_NONE = 0x00,
+ TARG_FLAG_SEND_EOF = 0x01,
+ TARG_FLAG_RECEIVE_EOF = 0x02
+} targ_flags;
+
+typedef enum {
+ TARG_CCB_WORKQ,
+ TARG_CCB_WAITING
+} targ_ccb_types;
+
+#define MAX_ACCEPT 16
+#define MAX_IMMEDIATE 16
+#define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */
+#define MAX_INITIATORS 16 /* XXX More for Fibre-Channel */
+
+#define MIN(a, b) ((a > b) ? b : a)
+
+/* Offsets into our private CCB area for storing accept information */
+#define ccb_type ppriv_field0
+#define ccb_descr ppriv_ptr1
+
+/* We stick a pointer to the originating accept TIO in each continue I/O CCB */
+#define ccb_atio ppriv_ptr1
+
+TAILQ_HEAD(ccb_queue, ccb_hdr);
+
+struct targ_softc {
+ struct ccb_queue pending_queue;
+ struct ccb_queue work_queue;
+ struct ccb_queue snd_ccb_queue;
+ struct ccb_queue rcv_ccb_queue;
+ struct ccb_queue unknown_atio_queue;
+ struct buf_queue_head snd_buf_queue;
+ struct buf_queue_head rcv_buf_queue;
+ struct devstat device_stats;
+ struct selinfo snd_select;
+ struct selinfo rcv_select;
+ targ_state state;
+ targ_flags flags;
+ targ_exception exceptions;
+ u_int init_level;
+ u_int inq_data_len;
+ struct scsi_inquiry_data *inq_data;
+ struct initiator_state istate[MAX_INITIATORS];
+};
+
+struct targ_cmd_desc {
+ SLIST_ENTRY(targ_cmd_desc) links;
+ u_int data_resid; /* How much left to transfer */
+ u_int data_increment;/* Amount to send before next disconnect */
+ void* data; /* The data. Can be from backing_store or not */
+ void* backing_store;/* Backing store allocated for this descriptor*/
+ struct buf *bp; /* Buffer for this transfer */
+ u_int max_size; /* Size of backing_store */
+ u_int32_t timeout;
+ u_int8_t status; /* Status to return to initiator */
+};
+
+static d_open_t targopen;
+static d_close_t targclose;
+static d_read_t targread;
+static d_write_t targwrite;
+static d_ioctl_t targioctl;
+static d_poll_t targpoll;
+static d_strategy_t targstrategy;
+
+#define TARG_CDEV_MAJOR 65
+static struct cdevsw targ_cdevsw = {
+ /*d_open*/ targopen,
+ /*d_close*/ targclose,
+ /*d_read*/ targread,
+ /*d_write*/ targwrite,
+ /*d_ioctl*/ targioctl,
+ /*d_stop*/ nostop,
+ /*d_reset*/ noreset,
+ /*d_devtotty*/ nodevtotty,
+ /*d_poll*/ targpoll,
+ /*d_mmap*/ nommap,
+ /*d_strategy*/ targstrategy,
+ /*d_name*/ "targ",
+ /*d_spare*/ NULL,
+ /*d_maj*/ -1,
+ /*d_dump*/ nodump,
+ /*d_psize*/ nopsize,
+ /*d_flags*/ 0,
+ /*d_maxio*/ 0,
+ /*b_maj*/ -1
+};
+
+static int targsendccb(struct cam_periph *periph, union ccb *ccb,
+ union ccb *inccb);
+static periph_init_t targinit;
+static void targasync(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg);
+static periph_ctor_t targctor;
+static periph_dtor_t targdtor;
+static void targrunqueue(struct cam_periph *periph,
+ struct targ_softc *softc);
+static periph_start_t targstart;
+static void targdone(struct cam_periph *periph,
+ union ccb *done_ccb);
+static void targfireexception(struct cam_periph *periph,
+ struct targ_softc *softc);
+static int targerror(union ccb *ccb, u_int32_t cam_flags,
+ u_int32_t sense_flags);
+static struct targ_cmd_desc* allocdescr(void);
+static void freedescr(struct targ_cmd_desc *buf);
+static void fill_sense(struct scsi_sense_data *sense,
+ u_int error_code, u_int sense_key,
+ u_int asc, u_int ascq);
+
+static struct periph_driver targdriver =
+{
+ targinit, "targ",
+ TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
+};
+
+DATA_SET(periphdriver_set, targdriver);
+
+static struct extend_array *targperiphs;
+
+static void
+targinit(void)
+{
+ cam_status status;
+ struct cam_path *path;
+
+ /*
+ * Create our extend array for storing the devices we attach to.
+ */
+ targperiphs = cam_extend_new();
+ if (targperiphs == NULL) {
+ printf("targ: Failed to alloc extend array!\n");
+ return;
+ }
+
+ /*
+ * Install a global async callback. This callback will
+ * receive async callbacks like "new path registered".
+ */
+ status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
+
+ if (status == CAM_REQ_CMP) {
+ struct ccb_setasync csa;
+
+ xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_PATH_REGISTERED;
+ csa.callback = targasync;
+ csa.callback_arg = NULL;
+ xpt_action((union ccb *)&csa);
+ status = csa.ccb_h.status;
+ xpt_free_path(path);
+ }
+
+ if (status != CAM_REQ_CMP) {
+ printf("targ: Failed to attach master async callback "
+ "due to status 0x%x!\n", status);
+ } else {
+ /* If we were successfull, register our devsw */
+ dev_t dev;
+
+ dev = makedev(TARG_CDEV_MAJOR, 0);
+ cdevsw_add(&dev,&targ_cdevsw, NULL);
+ }
+}
+
+static void
+targasync(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg)
+{
+ struct cam_periph *periph;
+
+ periph = (struct cam_periph *)callback_arg;
+ switch (code) {
+ case AC_PATH_REGISTERED:
+ {
+ struct ccb_pathinq *cpi;
+ struct cam_path *new_path;
+ cam_status status;
+
+ cpi = (struct ccb_pathinq *)arg;
+
+ /* Only attach to controllers that support target mode */
+ if ((cpi->target_sprt & PIT_PROCESSOR) == 0)
+ break;
+
+ /*
+ * Allocate a peripheral instance for
+ * this target instance.
+ */
+ status = xpt_create_path(&new_path, NULL,
+ xpt_path_path_id(path),
+ cpi->initiator_id, /*lun*/0);
+ if (status != CAM_REQ_CMP) {
+ printf("targasync: Unable to create path "
+ "due to status 0x%x\n", status);
+ break;
+ }
+ status = cam_periph_alloc(targctor, targdtor, targstart,
+ "targ", CAM_PERIPH_BIO,
+ new_path, targasync,
+ AC_PATH_REGISTERED,
+ cpi);
+ xpt_free_path(new_path);
+ if (status != CAM_REQ_CMP
+ && status != CAM_REQ_INPROG)
+ printf("targasync: Unable to attach to new device "
+ "due to status 0x%x\n", status);
+ break;
+ }
+ case AC_PATH_DEREGISTERED:
+ {
+ /* XXX Implement */
+ break;
+ }
+ case AC_BUS_RESET:
+ {
+ /* Flush transaction queue */
+ }
+ default:
+ break;
+ }
+}
+
+static cam_status
+targctor(struct cam_periph *periph, void *arg)
+{
+ union ccb immed_ccb;
+ struct targ_softc *softc;
+ cam_status status;
+ int i;
+
+ /* Allocate our per-instance private storage */
+ softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT);
+ if (softc == NULL) {
+ printf("targctor: unable to malloc softc\n");
+ return (CAM_REQ_CMP_ERR);
+ }
+
+ bzero(softc, sizeof(softc));
+ TAILQ_INIT(&softc->pending_queue);
+ TAILQ_INIT(&softc->work_queue);
+ TAILQ_INIT(&softc->snd_ccb_queue);
+ TAILQ_INIT(&softc->rcv_ccb_queue);
+ TAILQ_INIT(&softc->unknown_atio_queue);
+ bufq_init(&softc->snd_buf_queue);
+ bufq_init(&softc->rcv_buf_queue);
+ softc->state = TARG_STATE_NORMAL;
+ periph->softc = softc;
+ softc->init_level++;
+
+ cam_extend_set(targperiphs, periph->unit_number, periph);
+
+ /*
+ * We start out life with a UA to indicate power-on/reset.
+ */
+ for (i = 0; i < MAX_INITIATORS; i++)
+ softc->istate[i].pending_ua = UA_POWER_ON;
+
+ /*
+ * Allocate an initial inquiry data buffer. We might allow the
+ * user to override this later via an ioctl.
+ */
+ softc->inq_data_len = sizeof(*softc->inq_data);
+ softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT);
+ if (softc->inq_data == NULL) {
+ printf("targctor - Unable to malloc inquiry data\n");
+ targdtor(periph);
+ }
+ bzero(softc->inq_data, softc->inq_data_len);
+ softc->inq_data->device = T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5);
+ softc->inq_data->version = 2;
+ softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */
+ softc->inq_data->additional_length = softc->inq_data_len - 4;
+ strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE);
+ strncpy(softc->inq_data->product, "TM-PT ", SID_PRODUCT_SIZE);
+ strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE);
+ softc->init_level++;
+
+ /* Attempt to enable the lun of interrest */
+ xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1);
+ immed_ccb.ccb_h.func_code = XPT_EN_LUN;
+
+ /* Don't need support for any vendor specific commands */
+ immed_ccb.cel.grp6_len = 0;
+ immed_ccb.cel.grp7_len = 0;
+ immed_ccb.cel.enable = 1;
+ xpt_action(&immed_ccb);
+ status = immed_ccb.ccb_h.status;
+
+ if (status != CAM_REQ_CMP) {
+ xpt_print_path(periph->path);
+ printf("targctor - Enable Lun Rejected for status 0x%x\n",
+ status);
+ targdtor(periph);
+ return (status);
+ }
+
+ softc->init_level++;
+
+ /*
+ * Build up a buffer of accept target I/O
+ * operations for incoming selections.
+ */
+ for (i = 0; i < MAX_ACCEPT; i++) {
+ struct ccb_accept_tio *atio;
+
+ atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF,
+ M_NOWAIT);
+ if (atio == NULL) {
+ status = CAM_RESRC_UNAVAIL;
+ break;
+ }
+
+ atio->ccb_h.ccb_descr = allocdescr();
+
+ if (atio->ccb_h.ccb_descr == NULL) {
+ free(atio, M_DEVBUF);
+ status = CAM_RESRC_UNAVAIL;
+ break;
+ }
+
+ xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1);
+ atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
+ atio->ccb_h.cbfcnp = targdone;
+ xpt_action((union ccb *)atio);
+ status = atio->ccb_h.status;
+ if (status != CAM_REQ_INPROG) {
+ free(atio, M_DEVBUF);
+ break;
+ }
+ }
+
+ if (i == 0) {
+ xpt_print_path(periph->path);
+ printf("targctor - Could not allocate accept tio CCBs: "
+ "status = 0x%x\n", status);
+ targdtor(periph);
+ return (CAM_REQ_CMP_ERR);
+ }
+
+ /*
+ * Build up a buffer of immediate notify CCBs
+ * so the SIM can tell us of asynchronous target mode events.
+ */
+ for (i = 0; i < MAX_ACCEPT; i++) {
+ struct ccb_immed_notify *inot;
+
+ inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF,
+ M_NOWAIT);
+
+ if (inot == NULL) {
+ status = CAM_RESRC_UNAVAIL;
+ break;
+ }
+
+ xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1);
+ inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
+ inot->ccb_h.cbfcnp = targdone;
+ xpt_action((union ccb *)inot);
+ status = inot->ccb_h.status;
+ if (status != CAM_REQ_INPROG) {
+ free(inot, M_DEVBUF);
+ break;
+ }
+ }
+
+ if (i == 0) {
+ xpt_print_path(periph->path);
+ printf("targctor - Could not allocate immediate notify CCBs: "
+ "status = 0x%x\n", status);
+ targdtor(periph);
+ return (CAM_REQ_CMP_ERR);
+ }
+
+ return (CAM_REQ_CMP);
+}
+
+static void
+targdtor(struct cam_periph *periph)
+{
+ struct targ_softc *softc;
+
+ softc = (struct targ_softc *)periph->softc;
+
+ softc->state = TARG_STATE_TEARDOWN;
+
+ switch (softc->init_level) {
+ default:
+ /* FALLTHROUGH */
+ case 3:
+ {
+ struct ccb_en_lun cel;
+ /*
+ * XXX Spec requires abort of all ACCEPT and
+ * IMMEDIATE CCBS first. Act accordingly.
+ */
+ /*
+ * Dissable this lun.
+ */
+ xpt_setup_ccb(&cel.ccb_h, periph->path, /*priority*/1);
+ cel.ccb_h.func_code = XPT_EN_LUN;
+ cel.enable = 0;
+ xpt_action((union ccb *)&cel);
+ /* FALLTHROUGH */
+ }
+ case 2:
+ free(softc->inq_data, M_DEVBUF);
+ /* FALLTHROUGH */
+ case 1:
+ free(softc, M_DEVBUF);
+ break;
+ case 0:
+ panic("targdtor - impossible init level");;
+ }
+}
+
+static int
+targopen(dev_t dev, int flags, int fmt, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct targ_softc *softc;
+ u_int unit;
+ int s;
+
+ unit = minor(dev);
+ periph = cam_extend_get(targperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+ softc = (struct targ_softc *)periph->softc;
+
+ return (0);
+}
+
+static int
+targclose(dev_t dev, int flag, int fmt, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct targ_softc *softc;
+ u_int unit;
+ int s;
+
+ unit = minor(dev);
+ periph = cam_extend_get(targperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+ softc = (struct targ_softc *)periph->softc;
+
+ return (0);
+}
+
+static int
+targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct targ_softc *softc;
+ u_int unit;
+ int error;
+ int s;
+
+ unit = minor(dev);
+ periph = cam_extend_get(targperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+ softc = (struct targ_softc *)periph->softc;
+ error = 0;
+ switch (cmd) {
+ case TARGIOCFETCHEXCEPTION:
+ *((targ_exception *)addr) = softc->exceptions;
+ break;
+ case TARGIOCCLEAREXCEPTION:
+ {
+ targ_exception clear_mask;
+
+ clear_mask = *((targ_exception *)addr);
+ if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) {
+ struct ccb_hdr *ccbh;
+
+ ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
+ if (ccbh != NULL) {
+ TAILQ_REMOVE(&softc->unknown_atio_queue,
+ ccbh, periph_links.tqe);
+ ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
+ }
+ if (ccbh != NULL)
+ clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO;
+ }
+ softc->exceptions &= ~clear_mask;
+ if (softc->exceptions == TARG_EXCEPT_NONE
+ && softc->state == TARG_STATE_EXCEPTION) {
+ softc->state = TARG_STATE_NORMAL;
+ targrunqueue(periph, softc);
+ }
+ break;
+ }
+ case TARGIOCFETCHATIO:
+ {
+ struct ccb_hdr *ccbh;
+
+ ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
+ if (ccbh != NULL) {
+ bcopy(ccbh, addr, sizeof(struct ccb_accept_tio));
+ } else {
+ error = ENOENT;
+ }
+ break;
+ }
+ case TARGIOCCOMMAND:
+ {
+ union ccb *inccb;
+ union ccb *ccb;
+
+ /*
+ * XXX JGibbs
+ * This code is lifted directly from the pass-thru driver.
+ * Perhaps this should be moved to a library????
+ */
+ inccb = (union ccb *)addr;
+ ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority);
+
+ error = targsendccb(periph, ccb, inccb);
+
+ xpt_release_ccb(ccb);
+
+ break;
+ }
+ case TARGIOCGETISTATE:
+ case TARGIOCSETISTATE:
+ {
+ struct ioc_initiator_state *ioc_istate;
+
+ ioc_istate = (struct ioc_initiator_state *)addr;
+ if (ioc_istate->initiator_id > MAX_INITIATORS) {
+ error = EINVAL;
+ break;
+ }
+ xpt_print_path(periph->path);
+ printf("GET/SETISTATE for %d\n", ioc_istate->initiator_id);
+ if (cmd == TARGIOCGETISTATE) {
+ bcopy(&softc->istate[ioc_istate->initiator_id],
+ &ioc_istate->istate, sizeof(ioc_istate->istate));
+ } else {
+ bcopy(&ioc_istate->istate,
+ &softc->istate[ioc_istate->initiator_id],
+ sizeof(ioc_istate->istate));
+ xpt_print_path(periph->path);
+ printf("pending_ca now %x\n",
+ softc->istate[ioc_istate->initiator_id].pending_ca);
+ }
+ break;
+ }
+ default:
+ error = ENOTTY;
+ break;
+ }
+ return (error);
+}
+
+/*
+ * XXX JGibbs lifted from pass-thru driver.
+ * Generally, "ccb" should be the CCB supplied by the kernel. "inccb"
+ * should be the CCB that is copied in from the user.
+ */
+static int
+targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
+{
+ struct buf *bp[2];
+ struct targ_softc *softc;
+ struct cam_periph_map_info mapinfo;
+ int error, need_unmap;
+
+ softc = (struct targ_softc *)periph->softc;
+
+ need_unmap = 0;
+
+ /*
+ * There are some fields in the CCB header that need to be
+ * preserved, the rest we get from the user.
+ */
+ xpt_merge_ccb(ccb, inccb);
+
+ /*
+ * There's no way for the user to have a completion
+ * function, so we put our own completion function in here.
+ */
+ ccb->ccb_h.cbfcnp = targdone;
+
+ /*
+ * We only attempt to map the user memory into kernel space
+ * if they haven't passed in a physical memory pointer,
+ * and if there is actually an I/O operation to perform.
+ * Right now cam_periph_mapmem() only supports SCSI and device
+ * match CCBs. For the SCSI CCBs, we only pass the CCB in if
+ * there's actually data to map. cam_periph_mapmem() will do the
+ * right thing, even if there isn't data to map, but since CCBs
+ * without data are a reasonably common occurance (e.g. test unit
+ * ready), it will save a few cycles if we check for it here.
+ */
+ if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
+ && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
+ && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
+ || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
+
+ bzero(&mapinfo, sizeof(mapinfo));
+
+ error = cam_periph_mapmem(ccb, &mapinfo);
+
+ /*
+ * cam_periph_mapmem returned an error, we can't continue.
+ * Return the error to the user.
+ */
+ if (error)
+ return(error);
+
+ /*
+ * We successfully mapped the memory in, so we need to
+ * unmap it when the transaction is done.
+ */
+ need_unmap = 1;
+ }
+
+ /*
+ * If the user wants us to perform any error recovery, then honor
+ * that request. Otherwise, it's up to the user to perform any
+ * error recovery.
+ */
+ error = cam_periph_runccb(ccb,
+ (ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ?
+ targerror : NULL,
+ /* cam_flags */ 0,
+ /* sense_flags */SF_RETRY_UA,
+ &softc->device_stats);
+
+ if (need_unmap != 0)
+ cam_periph_unmapmem(ccb, &mapinfo);
+
+ ccb->ccb_h.cbfcnp = NULL;
+ ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
+ bcopy(ccb, inccb, sizeof(union ccb));
+
+ return(error);
+}
+
+
+static int
+targpoll(dev_t dev, int poll_events, struct proc *p)
+{
+ struct cam_periph *periph;
+ struct targ_softc *softc;
+ u_int unit;
+ int revents;
+ int s;
+
+ unit = minor(dev);
+ periph = cam_extend_get(targperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+ softc = (struct targ_softc *)periph->softc;
+
+ revents = 0;
+ s = splcam();
+ if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) {
+ if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL
+ && bufq_first(&softc->rcv_buf_queue) == NULL)
+ revents |= poll_events & (POLLOUT | POLLWRNORM);
+ }
+ if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
+ if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL
+ && bufq_first(&softc->snd_buf_queue) == NULL)
+ revents |= poll_events & (POLLIN | POLLRDNORM);
+ }
+
+ if (softc->state != TARG_STATE_NORMAL)
+ revents |= POLLERR;
+
+ if (revents == 0) {
+ if (poll_events & (POLLOUT | POLLWRNORM))
+ selrecord(p, &softc->rcv_select);
+ if (poll_events & (POLLIN | POLLRDNORM))
+ selrecord(p, &softc->snd_select);
+ }
+ splx(s);
+ return (revents);
+}
+
+static int
+targread(dev_t dev, struct uio *uio, int ioflag)
+{
+ if (uio->uio_iovcnt == 0
+ || uio->uio_iov->iov_len == 0) {
+ /* EOF */
+ struct cam_periph *periph;
+ struct targ_softc *softc;
+ u_int unit;
+ int s;
+
+ s = splcam();
+ unit = minor(dev);
+ periph = cam_extend_get(targperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+ softc = (struct targ_softc *)periph->softc;
+ softc->flags |= TARG_FLAG_SEND_EOF;
+ splx(s);
+ targrunqueue(periph, softc);
+ return (0);
+ }
+ return(physio(targstrategy, NULL, dev, 1, minphys, uio));
+}
+
+static int
+targwrite(dev_t dev, struct uio *uio, int ioflag)
+{
+ if (uio->uio_iovcnt == 0
+ || uio->uio_iov->iov_len == 0) {
+ /* EOF */
+ struct cam_periph *periph;
+ struct targ_softc *softc;
+ u_int unit;
+ int s;
+
+ s = splcam();
+ unit = minor(dev);
+ periph = cam_extend_get(targperiphs, unit);
+ if (periph == NULL)
+ return (ENXIO);
+ softc = (struct targ_softc *)periph->softc;
+ softc->flags |= TARG_FLAG_RECEIVE_EOF;
+ splx(s);
+ targrunqueue(periph, softc);
+ return (0);
+ }
+ return(physio(targstrategy, NULL, dev, 0, minphys, uio));
+}
+
+/*
+ * Actually translate the requested transfer into one the physical driver
+ * can understand. The transfer is described by a buf and will include
+ * only one physical transfer.
+ */
+static void
+targstrategy(struct buf *bp)
+{
+ struct cam_periph *periph;
+ struct targ_softc *softc;
+ u_int unit;
+ int s;
+
+ unit = minor(bp->b_dev);
+ periph = cam_extend_get(targperiphs, unit);
+ if (periph == NULL) {
+ bp->b_error = ENXIO;
+ goto bad;
+ }
+ softc = (struct targ_softc *)periph->softc;
+
+ /*
+ * Mask interrupts so that the device cannot be invalidated until
+ * after we are in the queue. Otherwise, we might not properly
+ * clean up one of the buffers.
+ */
+ s = splbio();
+
+ /*
+ * If there is an exception pending, error out
+ */
+ if (softc->state != TARG_STATE_NORMAL) {
+ splx(s);
+ if (softc->state == TARG_STATE_EXCEPTION
+ && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0)
+ bp->b_error = EBUSY;
+ else
+ bp->b_error = ENXIO;
+ goto bad;
+ }
+
+ /*
+ * Place it in the queue of buffers available for either
+ * SEND or RECEIVE commands.
+ *
+ */
+ bp->b_resid = bp->b_bcount;
+ if ((bp->b_flags & B_READ) != 0) {
+ xpt_print_path(periph->path);
+ printf("Queued a SEND buffer\n");
+ bufq_insert_tail(&softc->snd_buf_queue, bp);
+ } else {
+ xpt_print_path(periph->path);
+ printf("Queued a RECEIVE buffer\n");
+ bufq_insert_tail(&softc->rcv_buf_queue, bp);
+ }
+
+ splx(s);
+
+ /*
+ * Attempt to use the new buffer to service any pending
+ * target commands.
+ */
+ targrunqueue(periph, softc);
+
+ return;
+bad:
+ bp->b_flags |= B_ERROR;
+
+ /*
+ * Correctly set the buf to indicate a completed xfer
+ */
+ bp->b_resid = bp->b_bcount;
+ biodone(bp);
+}
+
+static void
+targrunqueue(struct cam_periph *periph, struct targ_softc *softc)
+{
+ struct ccb_queue *pending_queue;
+ struct ccb_accept_tio *atio;
+ struct buf_queue_head *bufq;
+ struct buf *bp;
+ struct targ_cmd_desc *desc;
+ struct ccb_hdr *ccbh;
+ int added;
+ int s;
+
+ s = splbio();
+ pending_queue = NULL;
+ bufq = NULL;
+ ccbh = NULL;
+ /* Only run one request at a time to maintain data ordering. */
+ if (softc->state != TARG_STATE_NORMAL
+ || TAILQ_FIRST(&softc->work_queue) != NULL
+ || TAILQ_FIRST(&softc->pending_queue) != NULL) {
+ splx(s);
+ return;
+ }
+
+ if (((bp = bufq_first(&softc->snd_buf_queue)) != NULL
+ || (softc->flags & TARG_FLAG_SEND_EOF) != 0)
+ && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) {
+
+ if (bp == NULL)
+ softc->flags &= ~TARG_FLAG_SEND_EOF;
+ else {
+ xpt_print_path(periph->path);
+ printf("De-Queued a SEND buffer %d\n",
+ bp->b_bcount);
+ }
+ bufq = &softc->snd_buf_queue;
+ pending_queue = &softc->snd_ccb_queue;
+ } else if (((bp = bufq_first(&softc->rcv_buf_queue)) != NULL
+ || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0)
+ && (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) {
+
+ if (bp == NULL)
+ softc->flags &= ~TARG_FLAG_RECEIVE_EOF;
+ else {
+ xpt_print_path(periph->path);
+ printf("De-Queued a RECEIVE buffer %d\n", bp->b_bcount);
+ }
+ bufq = &softc->rcv_buf_queue;
+ pending_queue = &softc->rcv_ccb_queue;
+ }
+
+ if (pending_queue != NULL) {
+ /* Process a request */
+ atio = (struct ccb_accept_tio *)ccbh;
+ TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe);
+ desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
+ desc->bp = bp;
+ if (bp == NULL) {
+ /* EOF */
+ desc->data = NULL;
+ desc->data_increment = 0;
+ desc->data_resid = 0;
+ atio->ccb_h.flags &= ~CAM_DIR_MASK;
+ atio->ccb_h.flags |= CAM_DIR_NONE;
+ } else {
+ bufq_remove(bufq, bp);
+ desc->data = &bp->b_data[bp->b_bcount - bp->b_resid];
+ desc->data_increment =
+ MIN(desc->data_resid, bp->b_resid);
+ }
+ xpt_print_path(periph->path);
+ printf("Buffer command: data %x: datacnt %d\n", desc->data,
+ desc->data_increment);
+ TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
+ periph_links.tqe);
+ }
+ if (TAILQ_FIRST(&softc->work_queue) != NULL) {
+ splx(s);
+ xpt_schedule(periph, /*XXX priority*/1);
+ } else
+ splx(s);
+}
+
+static void
+targstart(struct cam_periph *periph, union ccb *start_ccb)
+{
+ struct targ_softc *softc;
+ struct ccb_hdr *ccbh;
+ struct ccb_accept_tio *atio;
+ struct targ_cmd_desc *desc;
+ struct ccb_scsiio *csio;
+ ccb_flags flags;
+ int s;
+
+ softc = (struct targ_softc *)periph->softc;
+
+ s = splbio();
+ ccbh = TAILQ_FIRST(&softc->work_queue);
+ if (periph->immediate_priority <= periph->pinfo.priority) {
+ start_ccb->ccb_h.ccb_type = TARG_CCB_WAITING;
+ SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
+ periph_links.sle);
+ periph->immediate_priority = CAM_PRIORITY_NONE;
+ splx(s);
+ wakeup(&periph->ccb_list);
+ } else if (ccbh == NULL) {
+ splx(s);
+ xpt_release_ccb(start_ccb);
+ } else {
+ TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
+ TAILQ_INSERT_HEAD(&softc->pending_queue, ccbh,
+ periph_links.tqe);
+ splx(s);
+ atio = (struct ccb_accept_tio*)ccbh;
+ desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
+
+ /* Is this a tagged request? */
+ flags = atio->ccb_h.flags & (CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
+
+ /*
+ * If we are done with the transaction, tell the
+ * controller to send status and perform a CMD_CMPLT.
+ */
+ if (desc->data_resid == desc->data_increment)
+ flags |= CAM_SEND_STATUS;
+
+ csio = &start_ccb->csio;
+ cam_fill_ctio(csio,
+ /*retries*/2,
+ targdone,
+ flags,
+ /*tag_action*/MSG_SIMPLE_Q_TAG,
+ atio->tag_id,
+ atio->init_id,
+ desc->status,
+ /*data_ptr*/desc->data_increment == 0
+ ? NULL : desc->data,
+ /*dxfer_len*/desc->data_increment,
+ /*timeout*/desc->timeout);
+
+ start_ccb->ccb_h.ccb_type = TARG_CCB_WORKQ;
+ start_ccb->ccb_h.ccb_atio = atio;
+ xpt_print_path(periph->path);
+ printf("Sending a CTIO\n");
+ xpt_action(start_ccb);
+ s = splbio();
+ ccbh = TAILQ_FIRST(&softc->work_queue);
+ splx(s);
+ }
+ if (ccbh != NULL)
+ targrunqueue(periph, softc);
+}
+
+static void
+targdone(struct cam_periph *periph, union ccb *done_ccb)
+{
+ struct targ_softc *softc;
+
+ softc = (struct targ_softc *)periph->softc;
+
+ if (done_ccb->ccb_h.ccb_type == TARG_CCB_WAITING) {
+ /* Caller will release the CCB */
+ wakeup(&done_ccb->ccb_h.cbfcnp);
+ return;
+ }
+
+ switch (done_ccb->ccb_h.func_code) {
+ case XPT_ACCEPT_TARGET_IO:
+ {
+ struct ccb_accept_tio *atio;
+ struct targ_cmd_desc *descr;
+ struct initiator_state *istate;
+ u_int8_t *cdb;
+
+ atio = &done_ccb->atio;
+ descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr;
+ istate = &softc->istate[atio->init_id];
+ cdb = atio->cdb_io.cdb_bytes;
+ if (softc->state == TARG_STATE_TEARDOWN) {
+ freedescr(descr);
+ free(done_ccb, M_DEVBUF);
+ return;
+ }
+
+ if (istate->pending_ca == 0
+ && istate->pending_ua != 0
+ && cdb[0] != INQUIRY) {
+ /* Pending UA, tell initiator */
+ /* Direction is always relative to the initator */
+ istate->pending_ca = CA_UNIT_ATTN;
+ atio->ccb_h.flags &= ~CAM_DIR_MASK;
+ atio->ccb_h.flags |= CAM_DIR_NONE;
+ descr->data_resid = 0;
+ descr->data_increment = 0;
+ descr->timeout = 5 * 1000;
+ descr->status = SCSI_STATUS_CHECK_COND;
+ } else {
+ /*
+ * Save the current CA and UA status so
+ * they can be used by this command.
+ */
+ ua_types pending_ua;
+ ca_types pending_ca;
+
+ pending_ua = istate->pending_ua;
+ pending_ca = istate->pending_ca;
+
+ /*
+ * As per the SCSI2 spec, any command that occurs
+ * after a CA is reported, clears the CA. If the
+ * command is not an inquiry, we are also supposed
+ * to clear the UA condition, if any, that caused
+ * the CA to occur assuming the UA is not a
+ * persistant state.
+ */
+ istate->pending_ca = CA_NONE;
+ if ((pending_ca
+ & (CA_CMD_SENSE|CA_UNIT_ATTN)) == CA_UNIT_ATTN
+ && cdb[0] != INQUIRY)
+ istate->pending_ua = UA_NONE;
+
+ /*
+ * Determine the type of incoming command and
+ * setup our buffer for a response.
+ */
+ switch (cdb[0]) {
+ case INQUIRY:
+ {
+ struct scsi_inquiry *inq;
+ struct scsi_sense_data *sense;
+
+ inq = (struct scsi_inquiry *)cdb;
+ sense = &istate->sense_data;
+ xpt_print_path(periph->path);
+ printf("Saw an inquiry!\n");
+ /*
+ * Validate the command. We don't
+ * support any VPD pages, so complain
+ * if EVPD is set.
+ */
+ if ((inq->byte2 & SI_EVPD) != 0
+ || inq->page_code != 0) {
+ istate->pending_ca = CA_CMD_SENSE;
+ atio->ccb_h.flags &= ~CAM_DIR_MASK;
+ atio->ccb_h.flags |= CAM_DIR_NONE;
+ descr->data_resid = 0;
+ descr->data_increment = 0;
+ descr->status = SCSI_STATUS_CHECK_COND;
+ fill_sense(sense,
+ SSD_CURRENT_ERROR,
+ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/0x24, /*ascq*/0x00);
+ sense->extra_len =
+ offsetof(struct scsi_sense_data,
+ extra_bytes)
+ - offsetof(struct scsi_sense_data,
+ extra_len);
+ }
+
+ if ((inq->byte2 & SI_EVPD) != 0) {
+ sense->sense_key_spec[0] =
+ SSD_SCS_VALID|SSD_FIELDPTR_CMD
+ |SSD_BITPTR_VALID| /*bit value*/1;
+ sense->sense_key_spec[1] = 0;
+ sense->sense_key_spec[2] =
+ offsetof(struct scsi_inquiry,
+ byte2);
+ break;
+ } else if (inq->page_code != 0) {
+ sense->sense_key_spec[0] =
+ SSD_SCS_VALID|SSD_FIELDPTR_CMD;
+ sense->sense_key_spec[1] = 0;
+ sense->sense_key_spec[2] =
+ offsetof(struct scsi_inquiry,
+ page_code);
+ break;
+ }
+ /*
+ * Direction is always relative
+ * to the initator.
+ */
+ atio->ccb_h.flags &= ~CAM_DIR_MASK;
+ atio->ccb_h.flags |= CAM_DIR_IN;
+ descr->data = softc->inq_data;
+ descr->data_resid = MIN(softc->inq_data_len,
+ inq->length);
+ descr->data_increment = descr->data_resid;
+ descr->timeout = 5 * 1000;
+ descr->status = SCSI_STATUS_OK;
+ break;
+ }
+ case TEST_UNIT_READY:
+ atio->ccb_h.flags &= ~CAM_DIR_MASK;
+ atio->ccb_h.flags |= CAM_DIR_NONE;
+ descr->data_resid = 0;
+ descr->data_increment = 0;
+ descr->status = SCSI_STATUS_OK;
+ break;
+ case REQUEST_SENSE:
+ {
+ struct scsi_request_sense *rsense;
+ struct scsi_sense_data *sense;
+
+ rsense = (struct scsi_request_sense *)cdb;
+ sense = &istate->sense_data;
+ if (pending_ca == 0) {
+ fill_sense(sense, SSD_CURRENT_ERROR,
+ SSD_KEY_NO_SENSE, 0x00,
+ 0x00);
+ xpt_print_path(periph->path);
+ printf("No pending CA!\n");
+ } else if (pending_ca == CA_UNIT_ATTN) {
+ u_int ascq;
+
+ if (pending_ua == UA_POWER_ON)
+ ascq = 0x1;
+ else
+ ascq = 0x2;
+ fill_sense(sense, SSD_CURRENT_ERROR,
+ SSD_KEY_UNIT_ATTENTION,
+ 0x29, ascq);
+ xpt_print_path(periph->path);
+ printf("Pending UA!\n");
+ }
+ /*
+ * Direction is always relative
+ * to the initator.
+ */
+ atio->ccb_h.flags &= ~CAM_DIR_MASK;
+ atio->ccb_h.flags |= CAM_DIR_IN;
+ descr->data = sense;
+ descr->data_resid =
+ offsetof(struct scsi_sense_data,
+ extra_len)
+ + sense->extra_len;
+ descr->data_resid = MIN(descr->data_resid,
+ rsense->length);
+ descr->data_increment = descr->data_resid;
+ descr->timeout = 5 * 1000;
+ descr->status = SCSI_STATUS_OK;
+ break;
+ }
+ case RECEIVE:
+ case SEND:
+ {
+ struct scsi_send_receive *sr;
+
+ sr = (struct scsi_send_receive *)cdb;
+
+ /*
+ * Direction is always relative
+ * to the initator.
+ */
+ atio->ccb_h.flags &= ~CAM_DIR_MASK;
+ if (cdb[0] == SEND) {
+ atio->ccb_h.flags |= CAM_DIR_OUT;
+ xpt_print_path(periph->path);
+ printf("Saw a SEND!\n");
+ atio->ccb_h.flags |= CAM_DIR_OUT;
+ TAILQ_INSERT_TAIL(&softc->snd_ccb_queue,
+ &atio->ccb_h,
+ periph_links.tqe);
+ selwakeup(&softc->snd_select);
+ } else {
+ atio->ccb_h.flags |= CAM_DIR_IN;
+ xpt_print_path(periph->path);
+ printf("Saw a RECEIVE!\n");
+ TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue,
+ &atio->ccb_h,
+ periph_links.tqe);
+ selwakeup(&softc->rcv_select);
+ }
+ descr->data_resid = scsi_3btoul(sr->xfer_len);
+ descr->timeout = 5 * 1000;
+ descr->status = SCSI_STATUS_OK;
+ /*
+ * Attempt to satisfy this request with
+ * a user buffer.
+ */
+ targrunqueue(periph, softc);
+ return;
+ }
+ default:
+ /*
+ * Queue for consumption by our userland
+ * counterpart and transition to the exception
+ * state.
+ */
+ TAILQ_INSERT_TAIL(&softc->unknown_atio_queue,
+ &atio->ccb_h,
+ periph_links.tqe);
+ softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO;
+ targfireexception(periph, softc);
+ return;
+ }
+ }
+
+ /* Queue us up to receive a Continue Target I/O ccb. */
+ TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
+ periph_links.tqe);
+ xpt_schedule(periph, /*priority*/1);
+ break;
+ }
+ case XPT_CONT_TARGET_IO:
+ {
+ struct ccb_accept_tio *atio;
+ struct targ_cmd_desc *desc;
+ struct buf *bp;
+
+ xpt_print_path(done_ccb->ccb_h.path);
+ printf("Received completed CTIO\n");
+ atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio;
+ desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
+
+ TAILQ_REMOVE(&softc->pending_queue, &atio->ccb_h,
+ periph_links.tqe);
+
+ /* XXX Check for errors */
+ desc->data_resid -= desc->data_increment;
+ if ((bp = desc->bp) != NULL) {
+
+ bp->b_resid -= desc->data_increment;
+ bp->b_error = 0;
+
+ xpt_print_path(done_ccb->ccb_h.path);
+ printf("Buffer I/O Completed - Resid %d:%d\n",
+ bp->b_resid, desc->data_resid);
+ /*
+ * Send the buffer back to the client if
+ * either the command has completed or all
+ * buffer space has been consumed.
+ */
+ if (desc->data_resid == 0
+ || bp->b_resid == 0) {
+ if (bp->b_resid != 0)
+ /* Short transfer */
+ bp->b_flags |= B_ERROR;
+
+ xpt_print_path(done_ccb->ccb_h.path);
+ printf("Completing a buffer\n");
+ biodone(bp);
+ desc->bp = NULL;
+ }
+ }
+
+ xpt_release_ccb(done_ccb);
+ if (softc->state != TARG_STATE_TEARDOWN) {
+
+ if (desc->data_resid == 0) {
+ /*
+ * Send the original accept TIO back to the
+ * controller to handle more work.
+ */
+ xpt_print_path(atio->ccb_h.path);
+ printf("Returning ATIO to target\n");
+ xpt_action((union ccb *)atio);
+ break;
+ }
+
+ if (desc->bp != NULL)
+ panic("targ%d: desc->bp should be NULL",
+ periph->unit_number);
+
+ /* Queue us up for another buffer */
+ if (atio->cdb_io.cdb_bytes[0] == SEND) {
+ TAILQ_INSERT_HEAD(&softc->snd_ccb_queue,
+ &atio->ccb_h,
+ periph_links.tqe);
+ } else {
+ TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue,
+ &atio->ccb_h,
+ periph_links.tqe);
+ }
+ desc->bp = NULL;
+ targrunqueue(periph, softc);
+ } else {
+ if (desc->bp != NULL) {
+ bp->b_flags |= B_ERROR;
+ bp->b_error = ENXIO;
+ biodone(bp);
+ }
+ freedescr(desc);
+ free(atio, M_DEVBUF);
+ }
+ break;
+ }
+ case XPT_IMMED_NOTIFY:
+ {
+ if (softc->state == TARG_STATE_TEARDOWN) {
+ free(done_ccb, M_DEVBUF);
+ }
+ break;
+ }
+ }
+}
+
+/*
+ * Transition to the exception state and notify our symbiotic
+ * userland process of the change.
+ */
+static void
+targfireexception(struct cam_periph *periph, struct targ_softc *softc)
+{
+ /*
+ * return all pending buffers with short read/write status so our
+ * process unblocks, and do a selwakeup on any process queued
+ * waiting for reads or writes. When the selwakeup is performed,
+ * the waking process will wakeup, call our poll routine again,
+ * and pick up the exception.
+ */
+ struct buf *bp;
+
+ if (softc->state != TARG_STATE_NORMAL)
+ /* Already either tearing down or in exception state */
+ return;
+
+ softc->state = TARG_STATE_EXCEPTION;
+
+ while ((bp = bufq_first(&softc->snd_buf_queue)) != NULL) {
+ bufq_remove(&softc->snd_buf_queue, bp);
+ bp->b_flags |= B_ERROR;
+ biodone(bp);
+ }
+
+ while ((bp = bufq_first(&softc->rcv_buf_queue)) != NULL) {
+ bufq_remove(&softc->snd_buf_queue, bp);
+ bp->b_flags |= B_ERROR;
+ biodone(bp);
+ }
+
+ selwakeup(&softc->snd_select);
+ selwakeup(&softc->rcv_select);
+}
+
+static int
+targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
+{
+ return 0;
+}
+
+static struct targ_cmd_desc*
+allocdescr()
+{
+ struct targ_cmd_desc* descr;
+
+ /* Allocate the targ_descr structure */
+ descr = (struct targ_cmd_desc *)malloc(sizeof(*descr),
+ M_DEVBUF, M_NOWAIT);
+ if (descr == NULL)
+ return (NULL);
+
+ bzero(descr, sizeof(*descr));
+
+ /* Allocate buffer backing store */
+ descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT);
+ if (descr->backing_store == NULL) {
+ free(descr, M_DEVBUF);
+ return (NULL);
+ }
+ descr->max_size = MAX_BUF_SIZE;
+ return (descr);
+}
+
+static void
+freedescr(struct targ_cmd_desc *descr)
+{
+ free(descr->data, M_DEVBUF);
+ free(descr, M_DEVBUF);
+}
+
+static void
+fill_sense(struct scsi_sense_data *sense, u_int error_code, u_int sense_key,
+ u_int asc, u_int ascq)
+{
+ bzero(sense, sizeof(*sense));
+ sense->error_code = error_code;
+ sense->flags = sense_key;
+ sense->add_sense_code = asc;
+ sense->add_sense_code_qual = ascq;
+
+ sense->extra_len = offsetof(struct scsi_sense_data, fru)
+ - offsetof(struct scsi_sense_data, extra_len);
+}
diff --git a/sys/cam/scsi/scsi_targetio.h b/sys/cam/scsi/scsi_targetio.h
new file mode 100644
index 0000000..59826c7
--- /dev/null
+++ b/sys/cam/scsi/scsi_targetio.h
@@ -0,0 +1,103 @@
+/*
+ * Ioctl definitions for the Target Mode SCSI Proccessor Target driver for CAM.
+ *
+ * Copyright (c) 1998 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#ifndef _CAM_SCSI_SCSI_TARGETIO_H_
+#define _CAM_SCSI_SCSI_TARGETIO_H_
+#ifndef KERNEL
+#include <sys/types.h>
+#endif
+#include <sys/ioccom.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+
+/* Determine and clear exception state in the driver */
+typedef enum {
+ TARG_EXCEPT_NONE = 0x00,
+ TARG_EXCEPT_DEVICE_INVALID = 0x01,
+ TARG_EXCEPT_BDR_RECEIVED = 0x02,
+ TARG_EXCEPT_BUS_RESET_SEEN = 0x04,
+ TARG_EXCEPT_UNKNOWN_ATIO = 0x08,
+} targ_exception;
+
+#define TARGIOCFETCHEXCEPTION _IOR('C', 1, targ_exception)
+#define TARGIOCCLEAREXCEPTION _IOW('C', 2, targ_exception)
+
+/*
+ * Retreive an Accept Target I/O CCB for a command that is not handled
+ * directly by the kernel target driver.
+ */
+#define TARGIOCFETCHATIO _IOR('C', 3, struct ccb_accept_tio)
+
+/*
+ * Used for responding to incoming ATIO requests. XPT_CONTINUE_TARG_IO
+ * operations are the norm, but ccb types for manipulating the device
+ * queue, etc. can also be used if error handling is to be performed by the
+ * user land process.
+ */
+#define TARGIOCCOMMAND _IOWR('C', 4, union ccb)
+
+
+typedef enum {
+ UA_NONE = 0x00,
+ UA_POWER_ON = 0x01,
+ UA_BUS_RESET = 0x02
+} ua_types;
+
+typedef enum {
+ CA_NONE = 0x00,
+ CA_UNIT_ATTN = 0x01,
+ CA_CMD_SENSE = 0x02
+} ca_types;
+
+struct initiator_state {
+ ua_types pending_ua;
+ ca_types pending_ca;
+ struct scsi_sense_data sense_data;
+};
+
+struct ioc_initiator_state {
+ u_int initiator_id;
+ struct initiator_state istate;
+};
+
+/*
+ * Get and Set Contingent Allegiance and Unit Attention state
+ * presented by the target driver. This is usually used to
+ * properly report and error condition in response to an incoming
+ * ATIO request handled by the userland process.
+ *
+ * The initiator_id must be properly initialized in the ioc_initiator_state
+ * structure before calling TARGIOCGETISTATE.
+ */
+#define TARGIOCGETISTATE _IOWR('C', 6, struct ioc_initiator_state)
+#define TARGIOCSETISTATE _IOW('C', 5, struct ioc_initiator_state)
+
+#endif /* _CAM_SCSI_SCSI_TARGETIO_H_ */
OpenPOWER on IntegriCloud