summaryrefslogtreecommitdiffstats
path: root/sys/cam/ata
diff options
context:
space:
mode:
authorscottl <scottl@FreeBSD.org>2009-07-10 08:18:08 +0000
committerscottl <scottl@FreeBSD.org>2009-07-10 08:18:08 +0000
commite33e5dce327927280ca13509bde4fac5f9d39fe1 (patch)
tree65347229e3752769c4a701bd5f5308b2c8b4bf03 /sys/cam/ata
parentee1bfac31aec467d00137d267df6bbe2744c596a (diff)
downloadFreeBSD-src-e33e5dce327927280ca13509bde4fac5f9d39fe1.zip
FreeBSD-src-e33e5dce327927280ca13509bde4fac5f9d39fe1.tar.gz
Separate the parallel scsi knowledge out of the core of the XPT, and
modularize it so that new transports can be created. Add a transport for SATA Add a periph+protocol layer for ATA Add a driver for AHCI-compliant hardware. Add a maxio field to CAM so that drivers can advertise their max I/O capability. Modify various drivers so that they are insulated from the value of MAXPHYS. The new ATA/SATA code supports AHCI-compliant hardware, and will override the classic ATA driver if it is loaded as a module at boot time or compiled into the kernel. The stack now support NCQ (tagged queueing) for increased performance on modern SATA drives. It also supports port multipliers. ATA drives are accessed via 'ada' device nodes. ATAPI drives are accessed via 'cd' device nodes. They can all be enumerated and manipulated via camcontrol, just like SCSI drives. SCSI commands are not translated to their ATA equivalents; ATA native commands are used throughout the entire stack, including camcontrol. See the camcontrol manpage for further details. Testing this code may require that you update your fstab, and possibly modify your BIOS to enable AHCI functionality, if available. This code is very experimental at the moment. The userland ABI/API has changed, so applications will need to be recompiled. It may change further in the near future. The 'ada' device name may also change as more infrastructure is completed in this project. The goal is to eventually put all CAM busses and devices until newbus, allowing for interesting topology and management options. Few functional changes will be seen with existing SCSI/SAS/FC drivers, though the userland ABI has still changed. In the future, transports specific modules for SAS and FC may appear in order to better support the topologies and capabilities of these technologies. The modularization of CAM and the addition of the ATA/SATA modules is meant to break CAM out of the mold of being specific to SCSI, letting it grow to be a framework for arbitrary transports and protocols. It also allows drivers to be written to support discrete hardware without jeopardizing the stability of non-related hardware. While only an AHCI driver is provided now, a Silicon Image driver is also in the works. Drivers for ICH1-4, ICH5-6, PIIX, classic IDE, and any other hardware is possible and encouraged. Help with new transports is also encouraged. Submitted by: scottl, mav Approved by: re
Diffstat (limited to 'sys/cam/ata')
-rw-r--r--sys/cam/ata/ata_all.c304
-rw-r--r--sys/cam/ata/ata_all.h105
-rw-r--r--sys/cam/ata/ata_da.c1144
-rw-r--r--sys/cam/ata/ata_xpt.c1895
4 files changed, 3448 insertions, 0 deletions
diff --git a/sys/cam/ata/ata_all.c b/sys/cam/ata/ata_all.c
new file mode 100644
index 0000000..1e6eece
--- /dev/null
+++ b/sys/cam/ata/ata_all.c
@@ -0,0 +1,304 @@
+/*-
+ * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+
+#ifdef _KERNEL
+#include <opt_scsi.h>
+
+#include <sys/systm.h>
+#include <sys/libkern.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#else
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef min
+#define min(a,b) (((a)<(b))?(a):(b))
+#endif
+#endif
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_queue.h>
+#include <cam/cam_xpt.h>
+#include <sys/ata.h>
+#include <cam/ata/ata_all.h>
+#include <sys/sbuf.h>
+#include <sys/endian.h>
+
+int
+ata_version(int ver)
+{
+ int bit;
+
+ if (ver == 0xffff)
+ return 0;
+ for (bit = 15; bit >= 0; bit--)
+ if (ver & (1<<bit))
+ return bit;
+ return 0;
+}
+
+void
+ata_print_ident(struct ata_params *ident_data)
+{
+ char product[48], revision[16];
+
+ cam_strvis(product, ident_data->model, sizeof(ident_data->model),
+ sizeof(product));
+ cam_strvis(revision, ident_data->revision, sizeof(ident_data->revision),
+ sizeof(revision));
+ printf("<%s %s> ATA/ATAPI-%d",
+ product, revision, ata_version(ident_data->version_major));
+ if (ident_data->satacapabilities && ident_data->satacapabilities != 0xffff) {
+ if (ident_data->satacapabilities & ATA_SATA_GEN2)
+ printf(" SATA 2.x");
+ else if (ident_data->satacapabilities & ATA_SATA_GEN1)
+ printf(" SATA 1.x");
+ else
+ printf(" SATA");
+ }
+ printf(" device\n");
+}
+
+void
+ata_36bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint8_t features,
+ uint32_t lba, uint8_t sector_count)
+{
+ bzero(&ataio->cmd, sizeof(ataio->cmd));
+ ataio->cmd.flags = 0;
+ ataio->cmd.command = cmd;
+ ataio->cmd.features = features;
+ ataio->cmd.lba_low = lba;
+ ataio->cmd.lba_mid = lba >> 8;
+ ataio->cmd.lba_high = lba >> 16;
+ ataio->cmd.device = 0x40 | ((lba >> 24) & 0x0f);
+ ataio->cmd.sector_count = sector_count;
+}
+
+void
+ata_48bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint16_t features,
+ uint64_t lba, uint16_t sector_count)
+{
+ bzero(&ataio->cmd, sizeof(ataio->cmd));
+ ataio->cmd.flags = CAM_ATAIO_48BIT;
+ ataio->cmd.command = cmd;
+ ataio->cmd.features = features;
+ ataio->cmd.lba_low = lba;
+ ataio->cmd.lba_mid = lba >> 8;
+ ataio->cmd.lba_high = lba >> 16;
+ ataio->cmd.device = 0x40;
+ ataio->cmd.lba_low_exp = lba >> 24;
+ ataio->cmd.lba_mid_exp = lba >> 32;
+ ataio->cmd.lba_high_exp = lba >> 40;
+ ataio->cmd.features_exp = features >> 8;
+ ataio->cmd.sector_count = sector_count;
+ ataio->cmd.sector_count_exp = sector_count >> 8;
+}
+
+void
+ata_ncq_cmd(struct ccb_ataio *ataio, uint8_t cmd,
+ uint64_t lba, uint16_t sector_count)
+{
+ bzero(&ataio->cmd, sizeof(ataio->cmd));
+ ataio->cmd.flags = CAM_ATAIO_48BIT | CAM_ATAIO_FPDMA;
+ ataio->cmd.command = cmd;
+ ataio->cmd.features = sector_count;
+ ataio->cmd.lba_low = lba;
+ ataio->cmd.lba_mid = lba >> 8;
+ ataio->cmd.lba_high = lba >> 16;
+ ataio->cmd.device = 0x40;
+ ataio->cmd.lba_low_exp = lba >> 24;
+ ataio->cmd.lba_mid_exp = lba >> 32;
+ ataio->cmd.lba_high_exp = lba >> 40;
+ ataio->cmd.features_exp = sector_count >> 8;
+}
+
+void
+ata_reset_cmd(struct ccb_ataio *ataio)
+{
+ bzero(&ataio->cmd, sizeof(ataio->cmd));
+ ataio->cmd.flags = CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT;
+ ataio->cmd.control = 0x04;
+}
+
+void
+ata_pm_read_cmd(struct ccb_ataio *ataio, int reg, int port)
+{
+ bzero(&ataio->cmd, sizeof(ataio->cmd));
+ ataio->cmd.flags = CAM_ATAIO_48BIT | CAM_ATAIO_NEEDRESULT;
+ ataio->cmd.command = ATA_READ_PM;
+ ataio->cmd.features = reg;
+ ataio->cmd.features_exp = reg >> 8;
+ ataio->cmd.device = port & 0x0f;
+}
+
+void
+ata_pm_write_cmd(struct ccb_ataio *ataio, int reg, int port, uint64_t val)
+{
+ bzero(&ataio->cmd, sizeof(ataio->cmd));
+ ataio->cmd.flags = CAM_ATAIO_48BIT | CAM_ATAIO_NEEDRESULT;
+ ataio->cmd.command = ATA_WRITE_PM;
+ ataio->cmd.features = reg;
+ ataio->cmd.lba_low = val >> 8;
+ ataio->cmd.lba_mid = val >> 16;
+ ataio->cmd.lba_high = val >> 24;
+ ataio->cmd.device = port & 0x0f;
+ ataio->cmd.lba_low_exp = val >> 40;
+ ataio->cmd.lba_mid_exp = val >> 48;
+ ataio->cmd.lba_high_exp = val >> 56;
+ ataio->cmd.features_exp = reg >> 8;
+ ataio->cmd.sector_count = val;
+ ataio->cmd.sector_count_exp = val >> 32;
+}
+
+void
+ata_bswap(int8_t *buf, int len)
+{
+ u_int16_t *ptr = (u_int16_t*)(buf + len);
+
+ while (--ptr >= (u_int16_t*)buf)
+ *ptr = be16toh(*ptr);
+}
+
+void
+ata_btrim(int8_t *buf, int len)
+{
+ int8_t *ptr;
+
+ for (ptr = buf; ptr < buf+len; ++ptr)
+ if (!*ptr || *ptr == '_')
+ *ptr = ' ';
+ for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr)
+ *ptr = 0;
+}
+
+void
+ata_bpack(int8_t *src, int8_t *dst, int len)
+{
+ int i, j, blank;
+
+ for (i = j = blank = 0 ; i < len; i++) {
+ if (blank && src[i] == ' ') continue;
+ if (blank && src[i] != ' ') {
+ dst[j++] = src[i];
+ blank = 0;
+ continue;
+ }
+ if (src[i] == ' ') {
+ blank = 1;
+ if (i == 0)
+ continue;
+ }
+ dst[j++] = src[i];
+ }
+ while (j < len)
+ dst[j++] = 0x00;
+}
+
+int
+ata_max_pmode(struct ata_params *ap)
+{
+ if (ap->atavalid & ATA_FLAG_64_70) {
+ if (ap->apiomodes & 0x02)
+ return ATA_PIO4;
+ if (ap->apiomodes & 0x01)
+ return ATA_PIO3;
+ }
+ if (ap->mwdmamodes & 0x04)
+ return ATA_PIO4;
+ if (ap->mwdmamodes & 0x02)
+ return ATA_PIO3;
+ if (ap->mwdmamodes & 0x01)
+ return ATA_PIO2;
+ if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200)
+ return ATA_PIO2;
+ if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100)
+ return ATA_PIO1;
+ if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000)
+ return ATA_PIO0;
+ return ATA_PIO0;
+}
+
+int
+ata_max_wmode(struct ata_params *ap)
+{
+ if (ap->mwdmamodes & 0x04)
+ return ATA_WDMA2;
+ if (ap->mwdmamodes & 0x02)
+ return ATA_WDMA1;
+ if (ap->mwdmamodes & 0x01)
+ return ATA_WDMA0;
+ return -1;
+}
+
+int
+ata_max_umode(struct ata_params *ap)
+{
+ if (ap->atavalid & ATA_FLAG_88) {
+ if (ap->udmamodes & 0x40)
+ return ATA_UDMA6;
+ if (ap->udmamodes & 0x20)
+ return ATA_UDMA5;
+ if (ap->udmamodes & 0x10)
+ return ATA_UDMA4;
+ if (ap->udmamodes & 0x08)
+ return ATA_UDMA3;
+ if (ap->udmamodes & 0x04)
+ return ATA_UDMA2;
+ if (ap->udmamodes & 0x02)
+ return ATA_UDMA1;
+ if (ap->udmamodes & 0x01)
+ return ATA_UDMA0;
+ }
+ return -1;
+}
+
+int
+ata_max_mode(struct ata_params *ap, int mode, int maxmode)
+{
+
+ if (maxmode && mode > maxmode)
+ mode = maxmode;
+
+ if (mode >= ATA_UDMA0 && ata_max_umode(ap) > 0)
+ return (min(mode, ata_max_umode(ap)));
+
+ if (mode >= ATA_WDMA0 && ata_max_wmode(ap) > 0)
+ return (min(mode, ata_max_wmode(ap)));
+
+ if (mode > ata_max_pmode(ap))
+ return (min(mode, ata_max_pmode(ap)));
+
+ return (mode);
+}
+
diff --git a/sys/cam/ata/ata_all.h b/sys/cam/ata/ata_all.h
new file mode 100644
index 0000000..6012995
--- /dev/null
+++ b/sys/cam/ata/ata_all.h
@@ -0,0 +1,105 @@
+/*-
+ * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef CAM_ATA_ALL_H
+#define CAM_ATA_ALL_H 1
+
+#include <sys/ata.h>
+
+struct ccb_ataio;
+struct cam_periph;
+union ccb;
+
+struct ata_cmd {
+ u_int8_t flags; /* ATA command flags */
+#define CAM_ATAIO_48BIT 0x01 /* Command has 48-bit format */
+#define CAM_ATAIO_FPDMA 0x02 /* FPDMA command */
+#define CAM_ATAIO_CONTROL 0x04 /* Control, not a command */
+#define CAM_ATAIO_NEEDRESULT 0x08 /* Request requires result. */
+
+ u_int8_t command;
+ u_int8_t features;
+
+ u_int8_t lba_low;
+ u_int8_t lba_mid;
+ u_int8_t lba_high;
+ u_int8_t device;
+
+ u_int8_t lba_low_exp;
+ u_int8_t lba_mid_exp;
+ u_int8_t lba_high_exp;
+ u_int8_t features_exp;
+
+ u_int8_t sector_count;
+ u_int8_t sector_count_exp;
+ u_int8_t control;
+};
+
+struct ata_res {
+ u_int8_t flags; /* ATA command flags */
+#define CAM_ATAIO_48BIT 0x01 /* Command has 48-bit format */
+
+ u_int8_t status;
+ u_int8_t error;
+
+ u_int8_t lba_low;
+ u_int8_t lba_mid;
+ u_int8_t lba_high;
+ u_int8_t device;
+
+ u_int8_t lba_low_exp;
+ u_int8_t lba_mid_exp;
+ u_int8_t lba_high_exp;
+
+ u_int8_t sector_count;
+ u_int8_t sector_count_exp;
+};
+
+int ata_version(int ver);
+void ata_print_ident(struct ata_params *ident_data);
+
+void ata_36bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint8_t features,
+ uint32_t lba, uint8_t sector_count);
+void ata_48bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint16_t features,
+ uint64_t lba, uint16_t sector_count);
+void ata_ncq_cmd(struct ccb_ataio *ataio, uint8_t cmd,
+ uint64_t lba, uint16_t sector_count);
+void ata_reset_cmd(struct ccb_ataio *ataio);
+void ata_pm_read_cmd(struct ccb_ataio *ataio, int reg, int port);
+void ata_pm_write_cmd(struct ccb_ataio *ataio, int reg, int port, uint64_t val);
+
+void ata_bswap(int8_t *buf, int len);
+void ata_btrim(int8_t *buf, int len);
+void ata_bpack(int8_t *src, int8_t *dst, int len);
+
+int ata_max_pmode(struct ata_params *ap);
+int ata_max_wmode(struct ata_params *ap);
+int ata_max_umode(struct ata_params *ap);
+int ata_max_mode(struct ata_params *ap, int mode, int maxmode);
+
+#endif
diff --git a/sys/cam/ata/ata_da.c b/sys/cam/ata/ata_da.c
new file mode 100644
index 0000000..b72c316
--- /dev/null
+++ b/sys/cam/ata/ata_da.c
@@ -0,0 +1,1144 @@
+/*-
+ * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+
+#ifdef _KERNEL
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/bio.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/conf.h>
+#include <sys/devicestat.h>
+#include <sys/eventhandler.h>
+#include <sys/malloc.h>
+#include <sys/cons.h>
+#include <geom/geom_disk.h>
+#endif /* _KERNEL */
+
+#ifndef _KERNEL
+#include <stdio.h>
+#include <string.h>
+#endif /* _KERNEL */
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/cam_sim.h>
+
+#include <cam/ata/ata_all.h>
+
+#ifdef _KERNEL
+
+#define ATA_MAX_28BIT_LBA 268435455UL
+
+typedef enum {
+ ADA_STATE_NORMAL
+} ada_state;
+
+typedef enum {
+ ADA_FLAG_PACK_INVALID = 0x001,
+ ADA_FLAG_CAN_48BIT = 0x002,
+ ADA_FLAG_CAN_FLUSHCACHE = 0x004,
+ ADA_FLAG_CAN_NCQ = 0x008,
+ ADA_FLAG_TAGGED_QUEUING = 0x010,
+ ADA_FLAG_NEED_OTAG = 0x020,
+ ADA_FLAG_WENT_IDLE = 0x040,
+ ADA_FLAG_RETRY_UA = 0x080,
+ ADA_FLAG_OPEN = 0x100,
+ ADA_FLAG_SCTX_INIT = 0x200
+} ada_flags;
+
+typedef enum {
+ ADA_Q_NONE = 0x00,
+ ADA_Q_NO_SYNC_CACHE = 0x01,
+ ADA_Q_NO_6_BYTE = 0x02,
+ ADA_Q_NO_PREVENT = 0x04
+} ada_quirks;
+
+typedef enum {
+ ADA_CCB_PROBE = 0x01,
+ ADA_CCB_PROBE2 = 0x02,
+ ADA_CCB_BUFFER_IO = 0x03,
+ ADA_CCB_WAITING = 0x04,
+ ADA_CCB_DUMP = 0x05,
+ ADA_CCB_TYPE_MASK = 0x0F,
+ ADA_CCB_RETRY_UA = 0x10
+} ada_ccb_state;
+
+/* Offsets into our private area for storing information */
+#define ccb_state ppriv_field0
+#define ccb_bp ppriv_ptr1
+
+struct disk_params {
+ u_int8_t heads;
+ u_int32_t cylinders;
+ u_int8_t secs_per_track;
+ u_int32_t secsize; /* Number of bytes/sector */
+ u_int64_t sectors; /* total number sectors */
+};
+
+struct ada_softc {
+ struct bio_queue_head bio_queue;
+ SLIST_ENTRY(ada_softc) links;
+ LIST_HEAD(, ccb_hdr) pending_ccbs;
+ ada_state state;
+ ada_flags flags;
+ ada_quirks quirks;
+ int ordered_tag_count;
+ int outstanding_cmds;
+ struct disk_params params;
+ struct disk *disk;
+ union ccb saved_ccb;
+ struct task sysctl_task;
+ struct sysctl_ctx_list sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+ struct callout sendordered_c;
+};
+
+struct ada_quirk_entry {
+ struct scsi_inquiry_pattern inq_pat;
+ ada_quirks quirks;
+};
+
+//static struct ada_quirk_entry ada_quirk_table[] =
+//{
+//};
+
+static disk_strategy_t adastrategy;
+static dumper_t adadump;
+static periph_init_t adainit;
+static void adaasync(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg);
+static void adasysctlinit(void *context, int pending);
+static periph_ctor_t adaregister;
+static periph_dtor_t adacleanup;
+static periph_start_t adastart;
+static periph_oninv_t adaoninvalidate;
+static void adadone(struct cam_periph *periph,
+ union ccb *done_ccb);
+static int adaerror(union ccb *ccb, u_int32_t cam_flags,
+ u_int32_t sense_flags);
+static void adasetgeom(struct cam_periph *periph,
+ struct ccb_getdev *cgd);
+static timeout_t adasendorderedtag;
+static void adashutdown(void *arg, int howto);
+
+#ifndef ADA_DEFAULT_TIMEOUT
+#define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */
+#endif
+
+#ifndef ADA_DEFAULT_RETRY
+#define ADA_DEFAULT_RETRY 4
+#endif
+
+#ifndef ADA_DEFAULT_SEND_ORDERED
+#define ADA_DEFAULT_SEND_ORDERED 1
+#endif
+
+
+static int ada_retry_count = ADA_DEFAULT_RETRY;
+static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
+static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
+
+SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
+ "CAM Direct Access Disk driver");
+SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
+ &ada_retry_count, 0, "Normal I/O retry count");
+TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
+SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
+ &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
+TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
+SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW,
+ &ada_send_ordered, 0, "Send Ordered Tags");
+TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered);
+
+/*
+ * ADA_ORDEREDTAG_INTERVAL determines how often, relative
+ * to the default timeout, we check to see whether an ordered
+ * tagged transaction is appropriate to prevent simple tag
+ * starvation. Since we'd like to ensure that there is at least
+ * 1/2 of the timeout length left for a starved transaction to
+ * complete after we've sent an ordered tag, we must poll at least
+ * four times in every timeout period. This takes care of the worst
+ * case where a starved transaction starts during an interval that
+ * meets the requirement "don't send an ordered tag" test so it takes
+ * us two intervals to determine that a tag must be sent.
+ */
+#ifndef ADA_ORDEREDTAG_INTERVAL
+#define ADA_ORDEREDTAG_INTERVAL 4
+#endif
+
+static struct periph_driver adadriver =
+{
+ adainit, "ada",
+ TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
+};
+
+PERIPHDRIVER_DECLARE(ada, adadriver);
+
+MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
+
+static int
+adaopen(struct disk *dp)
+{
+ struct cam_periph *periph;
+ struct ada_softc *softc;
+ int unit;
+ int error;
+
+ periph = (struct cam_periph *)dp->d_drv1;
+ if (periph == NULL) {
+ return (ENXIO);
+ }
+
+ if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
+ return(ENXIO);
+ }
+
+ cam_periph_lock(periph);
+ if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
+ cam_periph_unlock(periph);
+ cam_periph_release(periph);
+ return (error);
+ }
+
+ unit = periph->unit_number;
+ softc = (struct ada_softc *)periph->softc;
+ softc->flags |= ADA_FLAG_OPEN;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
+ ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit,
+ unit));
+
+ if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
+ /* Invalidate our pack information. */
+ softc->flags &= ~ADA_FLAG_PACK_INVALID;
+ }
+
+ cam_periph_unhold(periph);
+ cam_periph_unlock(periph);
+ return (0);
+}
+
+static int
+adaclose(struct disk *dp)
+{
+ struct cam_periph *periph;
+ struct ada_softc *softc;
+ union ccb *ccb;
+ int error;
+
+ periph = (struct cam_periph *)dp->d_drv1;
+ if (periph == NULL)
+ return (ENXIO);
+
+ cam_periph_lock(periph);
+ if ((error = cam_periph_hold(periph, PRIBIO)) != 0) {
+ cam_periph_unlock(periph);
+ cam_periph_release(periph);
+ return (error);
+ }
+
+ softc = (struct ada_softc *)periph->softc;
+ /* We only sync the cache if the drive is capable of it. */
+ if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
+
+ ccb = cam_periph_getccb(periph, /*priority*/1);
+ ccb->ccb_h.ccb_state = ADA_CCB_DUMP;
+ cam_fill_ataio(&ccb->ataio,
+ 1,
+ adadone,
+ CAM_DIR_NONE,
+ 0,
+ NULL,
+ 0,
+ ada_default_timeout*1000);
+
+ if (softc->flags & ADA_FLAG_CAN_48BIT)
+ ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
+ else
+ ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
+ xpt_polled_action(ccb);
+
+ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
+ xpt_print(periph->path, "Synchronize cache failed\n");
+
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ xpt_release_ccb(ccb);
+ }
+
+ softc->flags &= ~ADA_FLAG_OPEN;
+ cam_periph_unhold(periph);
+ cam_periph_unlock(periph);
+ cam_periph_release(periph);
+ return (0);
+}
+
+/*
+ * Actually translate the requested transfer into one the physical driver
+ * can understand. The transfer is described by a buf and will include
+ * only one physical transfer.
+ */
+static void
+adastrategy(struct bio *bp)
+{
+ struct cam_periph *periph;
+ struct ada_softc *softc;
+
+ periph = (struct cam_periph *)bp->bio_disk->d_drv1;
+ if (periph == NULL) {
+ biofinish(bp, NULL, ENXIO);
+ return;
+ }
+ softc = (struct ada_softc *)periph->softc;
+
+ cam_periph_lock(periph);
+
+#if 0
+ /*
+ * check it's not too big a transfer for our adapter
+ */
+ scsi_minphys(bp,&sd_switch);
+#endif
+
+ /*
+ * Mask interrupts so that the pack cannot be invalidated until
+ * after we are in the queue. Otherwise, we might not properly
+ * clean up one of the buffers.
+ */
+
+ /*
+ * If the device has been made invalid, error out
+ */
+ if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
+ cam_periph_unlock(periph);
+ biofinish(bp, NULL, ENXIO);
+ return;
+ }
+
+ /*
+ * Place it in the queue of disk activities for this disk
+ */
+ bioq_disksort(&softc->bio_queue, bp);
+
+ /*
+ * Schedule ourselves for performing the work.
+ */
+ xpt_schedule(periph, /* XXX priority */1);
+ cam_periph_unlock(periph);
+
+ return;
+}
+
+static int
+adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
+{
+ struct cam_periph *periph;
+ struct ada_softc *softc;
+ u_int secsize;
+ union ccb ccb;
+ struct disk *dp;
+ uint64_t lba;
+ uint16_t count;
+
+ dp = arg;
+ periph = dp->d_drv1;
+ if (periph == NULL)
+ return (ENXIO);
+ softc = (struct ada_softc *)periph->softc;
+ cam_periph_lock(periph);
+ secsize = softc->params.secsize;
+ lba = offset / secsize;
+ count = length / secsize;
+
+ if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
+ cam_periph_unlock(periph);
+ return (ENXIO);
+ }
+
+ if (length > 0) {
+ periph->flags |= CAM_PERIPH_POLLED;
+ xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
+ ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
+ cam_fill_ataio(&ccb.ataio,
+ 0,
+ adadone,
+ CAM_DIR_OUT,
+ 0,
+ (u_int8_t *) virtual,
+ length,
+ ada_default_timeout*1000);
+ if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
+ (lba + count >= ATA_MAX_28BIT_LBA ||
+ count >= 256)) {
+ ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
+ 0, lba, count);
+ } else {
+ ata_36bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
+ 0, lba, count);
+ }
+ xpt_polled_action(&ccb);
+
+ if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ printf("Aborting dump due to I/O error.\n");
+ cam_periph_unlock(periph);
+ return(EIO);
+ }
+ cam_periph_unlock(periph);
+ return(0);
+ }
+
+ if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
+ xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
+
+ ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
+ cam_fill_ataio(&ccb.ataio,
+ 1,
+ adadone,
+ CAM_DIR_NONE,
+ 0,
+ NULL,
+ 0,
+ ada_default_timeout*1000);
+
+ if (softc->flags & ADA_FLAG_CAN_48BIT)
+ ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
+ else
+ ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
+ xpt_polled_action(&ccb);
+
+ if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
+ xpt_print(periph->path, "Synchronize cache failed\n");
+
+ if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(ccb.ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ periph->flags &= ~CAM_PERIPH_POLLED;
+ cam_periph_unlock(periph);
+ return (0);
+}
+
+static void
+adainit(void)
+{
+ cam_status status;
+
+ /*
+ * Install a global async callback. This callback will
+ * receive async callbacks like "new device found".
+ */
+ status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
+
+ if (status != CAM_REQ_CMP) {
+ printf("ada: Failed to attach master async callback "
+ "due to status 0x%x!\n", status);
+ } else if (ada_send_ordered) {
+
+ /* Register our shutdown event handler */
+ if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
+ NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
+ printf("adainit: shutdown event registration failed!\n");
+ }
+}
+
+static void
+adaoninvalidate(struct cam_periph *periph)
+{
+ struct ada_softc *softc;
+
+ softc = (struct ada_softc *)periph->softc;
+
+ /*
+ * De-register any async callbacks.
+ */
+ xpt_register_async(0, adaasync, periph, periph->path);
+
+ softc->flags |= ADA_FLAG_PACK_INVALID;
+
+ /*
+ * Return all queued I/O with ENXIO.
+ * XXX Handle any transactions queued to the card
+ * with XPT_ABORT_CCB.
+ */
+ bioq_flush(&softc->bio_queue, NULL, ENXIO);
+
+ disk_gone(softc->disk);
+ xpt_print(periph->path, "lost device\n");
+}
+
+static void
+adacleanup(struct cam_periph *periph)
+{
+ struct ada_softc *softc;
+
+ softc = (struct ada_softc *)periph->softc;
+
+ xpt_print(periph->path, "removing device entry\n");
+ cam_periph_unlock(periph);
+
+ /*
+ * If we can't free the sysctl tree, oh well...
+ */
+ if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
+ && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
+ xpt_print(periph->path, "can't remove sysctl context\n");
+ }
+
+ disk_destroy(softc->disk);
+ callout_drain(&softc->sendordered_c);
+ free(softc, M_DEVBUF);
+ cam_periph_lock(periph);
+}
+
+static void
+adaasync(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg)
+{
+ struct cam_periph *periph;
+
+ periph = (struct cam_periph *)callback_arg;
+ switch (code) {
+ case AC_FOUND_DEVICE:
+ {
+ struct ccb_getdev *cgd;
+ cam_status status;
+
+ cgd = (struct ccb_getdev *)arg;
+ if (cgd == NULL)
+ break;
+
+ if (cgd->protocol != PROTO_ATA)
+ break;
+
+// if (SID_TYPE(&cgd->inq_data) != T_DIRECT
+// && SID_TYPE(&cgd->inq_data) != T_RBC
+// && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
+// break;
+
+ /*
+ * Allocate a peripheral instance for
+ * this device and start the probe
+ * process.
+ */
+ status = cam_periph_alloc(adaregister, adaoninvalidate,
+ adacleanup, adastart,
+ "ada", CAM_PERIPH_BIO,
+ cgd->ccb_h.path, adaasync,
+ AC_FOUND_DEVICE, cgd);
+
+ if (status != CAM_REQ_CMP
+ && status != CAM_REQ_INPROG)
+ printf("adaasync: Unable to attach to new device "
+ "due to status 0x%x\n", status);
+ break;
+ }
+ case AC_SENT_BDR:
+ case AC_BUS_RESET:
+ {
+ struct ada_softc *softc;
+ struct ccb_hdr *ccbh;
+
+ softc = (struct ada_softc *)periph->softc;
+ /*
+ * Don't fail on the expected unit attention
+ * that will occur.
+ */
+ softc->flags |= ADA_FLAG_RETRY_UA;
+ LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
+ ccbh->ccb_state |= ADA_CCB_RETRY_UA;
+ /* FALLTHROUGH*/
+ }
+ default:
+ cam_periph_async(periph, code, path, arg);
+ break;
+ }
+}
+
+static void
+adasysctlinit(void *context, int pending)
+{
+ struct cam_periph *periph;
+ struct ada_softc *softc;
+ char tmpstr[80], tmpstr2[80];
+
+ periph = (struct cam_periph *)context;
+ if (cam_periph_acquire(periph) != CAM_REQ_CMP)
+ return;
+
+ softc = (struct ada_softc *)periph->softc;
+ snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
+ snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
+
+ sysctl_ctx_init(&softc->sysctl_ctx);
+ softc->flags |= ADA_FLAG_SCTX_INIT;
+ softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
+ SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
+ CTLFLAG_RD, 0, tmpstr);
+ if (softc->sysctl_tree == NULL) {
+ printf("adasysctlinit: unable to allocate sysctl tree\n");
+ cam_periph_release(periph);
+ return;
+ }
+
+ cam_periph_release(periph);
+}
+
+static cam_status
+adaregister(struct cam_periph *periph, void *arg)
+{
+ struct ada_softc *softc;
+ struct ccb_pathinq cpi;
+ struct ccb_getdev *cgd;
+ char announce_buf[80];
+ struct disk_params *dp;
+ caddr_t match;
+ u_int maxio;
+
+ cgd = (struct ccb_getdev *)arg;
+ if (periph == NULL) {
+ printf("adaregister: periph was NULL!!\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ if (cgd == NULL) {
+ printf("adaregister: no getdev CCB, can't register device\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
+ M_NOWAIT|M_ZERO);
+
+ if (softc == NULL) {
+ printf("adaregister: Unable to probe new device. "
+ "Unable to allocate softc\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ LIST_INIT(&softc->pending_ccbs);
+ softc->state = ADA_STATE_NORMAL;
+ bioq_init(&softc->bio_queue);
+
+ if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
+ softc->flags |= ADA_FLAG_CAN_48BIT;
+ if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
+ softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
+ if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ &&
+ cgd->ident_data.queue >= 31)
+ softc->flags |= ADA_FLAG_CAN_NCQ;
+// if ((cgd->inq_data.flags & SID_CmdQue) != 0)
+// softc->flags |= ADA_FLAG_TAGGED_QUEUING;
+
+ periph->softc = softc;
+
+ /*
+ * See if this device has any quirks.
+ */
+// match = cam_quirkmatch((caddr_t)&cgd->inq_data,
+// (caddr_t)ada_quirk_table,
+// sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
+// sizeof(*ada_quirk_table), scsi_inquiry_match);
+ match = NULL;
+
+ if (match != NULL)
+ softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
+ else
+ softc->quirks = ADA_Q_NONE;
+
+ /* Check if the SIM does not want queued commands */
+ bzero(&cpi, sizeof(cpi));
+ xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
+ cpi.ccb_h.func_code = XPT_PATH_INQ;
+ xpt_action((union ccb *)&cpi);
+ if (cpi.ccb_h.status != CAM_REQ_CMP ||
+ (cpi.hba_inquiry & PI_TAG_ABLE) == 0)
+ softc->flags &= ~ADA_FLAG_CAN_NCQ;
+
+ TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
+
+ /*
+ * Register this media as a disk
+ */
+ mtx_unlock(periph->sim->mtx);
+ softc->disk = disk_alloc();
+ softc->disk->d_open = adaopen;
+ softc->disk->d_close = adaclose;
+ softc->disk->d_strategy = adastrategy;
+ softc->disk->d_dump = adadump;
+ softc->disk->d_name = "ada";
+ softc->disk->d_drv1 = periph;
+ maxio = cpi.maxio; /* Honor max I/O size of SIM */
+ if (maxio == 0)
+ maxio = DFLTPHYS; /* traditional default */
+ else if (maxio > MAXPHYS)
+ maxio = MAXPHYS; /* for safety */
+ if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
+ maxio = min(maxio, 65535 * 512);
+ else /* 28bit ATA command limit */
+ maxio = min(maxio, 255 * 512);
+ softc->disk->d_maxsize = maxio;
+ softc->disk->d_unit = periph->unit_number;
+ softc->disk->d_flags = 0;
+ if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
+ softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
+
+ adasetgeom(periph, cgd);
+ softc->disk->d_sectorsize = softc->params.secsize;
+ softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors;
+ /* XXX: these are not actually "firmware" values, so they may be wrong */
+ softc->disk->d_fwsectors = softc->params.secs_per_track;
+ softc->disk->d_fwheads = softc->params.heads;
+// softc->disk->d_devstat->block_size = softc->params.secsize;
+// softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
+
+ disk_create(softc->disk, DISK_VERSION);
+ mtx_lock(periph->sim->mtx);
+
+ dp = &softc->params;
+ snprintf(announce_buf, sizeof(announce_buf),
+ "%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
+ (uintmax_t)(((uintmax_t)dp->secsize *
+ dp->sectors) / (1024*1024)),
+ (uintmax_t)dp->sectors,
+ dp->secsize, dp->heads,
+ dp->secs_per_track, dp->cylinders);
+ xpt_announce_periph(periph, announce_buf);
+ if (softc->flags & ADA_FLAG_CAN_NCQ) {
+ printf("%s%d: Native Command Queueing enabled\n",
+ periph->periph_name, periph->unit_number);
+ }
+
+ /*
+ * Add async callbacks for bus reset and
+ * bus device reset calls. I don't bother
+ * checking if this fails as, in most cases,
+ * the system will function just fine without
+ * them and the only alternative would be to
+ * not attach the device on failure.
+ */
+ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
+ adaasync, periph, periph->path);
+
+ /*
+ * Take an exclusive refcount on the periph while adastart is called
+ * to finish the probe. The reference will be dropped in adadone at
+ * the end of probe.
+ */
+// (void)cam_periph_hold(periph, PRIBIO);
+// xpt_schedule(periph, /*priority*/5);
+
+ /*
+ * Schedule a periodic event to occasionally send an
+ * ordered tag to a device.
+ */
+ callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
+ callout_reset(&softc->sendordered_c,
+ (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
+ adasendorderedtag, softc);
+
+ return(CAM_REQ_CMP);
+}
+
+static void
+adastart(struct cam_periph *periph, union ccb *start_ccb)
+{
+ struct ada_softc *softc;
+
+ softc = (struct ada_softc *)periph->softc;
+
+ switch (softc->state) {
+ case ADA_STATE_NORMAL:
+ {
+ /* Pull a buffer from the queue and get going on it */
+ struct bio *bp;
+
+ /*
+ * See if there is a buf with work for us to do..
+ */
+ bp = bioq_first(&softc->bio_queue);
+ if (periph->immediate_priority <= periph->pinfo.priority) {
+ CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
+ ("queuing for immediate ccb\n"));
+ start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
+ SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
+ periph_links.sle);
+ periph->immediate_priority = CAM_PRIORITY_NONE;
+ wakeup(&periph->ccb_list);
+ } else if (bp == NULL) {
+ xpt_release_ccb(start_ccb);
+ } else {
+ struct ccb_ataio *ataio = &start_ccb->ataio;
+ u_int8_t tag_code;
+
+ bioq_remove(&softc->bio_queue, bp);
+
+ if ((softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
+ softc->flags &= ~ADA_FLAG_NEED_OTAG;
+ softc->ordered_tag_count++;
+ tag_code = 0;//MSG_ORDERED_Q_TAG;
+ } else {
+ tag_code = 0;//MSG_SIMPLE_Q_TAG;
+ }
+ switch (bp->bio_cmd) {
+ case BIO_READ:
+ case BIO_WRITE:
+ {
+ uint64_t lba = bp->bio_pblkno;
+ uint16_t count = bp->bio_bcount / softc->params.secsize;
+
+ cam_fill_ataio(ataio,
+ ada_retry_count,
+ adadone,
+ bp->bio_cmd == BIO_READ ?
+ CAM_DIR_IN : CAM_DIR_OUT,
+ tag_code,
+ bp->bio_data,
+ bp->bio_bcount,
+ ada_default_timeout*1000);
+
+ if (softc->flags & ADA_FLAG_CAN_NCQ) {
+ if (bp->bio_cmd == BIO_READ) {
+ ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
+ lba, count);
+ } else {
+ ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
+ lba, count);
+ }
+ } else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
+ (lba + count >= ATA_MAX_28BIT_LBA ||
+ count >= 256)) {
+ if (bp->bio_cmd == BIO_READ) {
+ ata_48bit_cmd(ataio, ATA_READ_DMA48,
+ 0, lba, count);
+ } else {
+ ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
+ 0, lba, count);
+ }
+ } else {
+ if (bp->bio_cmd == BIO_READ) {
+ ata_36bit_cmd(ataio, ATA_READ_DMA,
+ 0, lba, count);
+ } else {
+ ata_36bit_cmd(ataio, ATA_WRITE_DMA,
+ 0, lba, count);
+ }
+ }
+ }
+ break;
+ case BIO_FLUSH:
+ cam_fill_ataio(ataio,
+ 1,
+ adadone,
+ CAM_DIR_NONE,
+ tag_code,
+ NULL,
+ 0,
+ ada_default_timeout*1000);
+
+ if (softc->flags & ADA_FLAG_CAN_48BIT)
+ ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
+ else
+ ata_48bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
+ break;
+ }
+ start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
+
+ /*
+ * Block out any asyncronous callbacks
+ * while we touch the pending ccb list.
+ */
+ LIST_INSERT_HEAD(&softc->pending_ccbs,
+ &start_ccb->ccb_h, periph_links.le);
+ softc->outstanding_cmds++;
+
+ /* We expect a unit attention from this device */
+ if ((softc->flags & ADA_FLAG_RETRY_UA) != 0) {
+ start_ccb->ccb_h.ccb_state |= ADA_CCB_RETRY_UA;
+ softc->flags &= ~ADA_FLAG_RETRY_UA;
+ }
+
+ start_ccb->ccb_h.ccb_bp = bp;
+ bp = bioq_first(&softc->bio_queue);
+
+ xpt_action(start_ccb);
+ }
+
+ if (bp != NULL) {
+ /* Have more work to do, so ensure we stay scheduled */
+ xpt_schedule(periph, /* XXX priority */1);
+ }
+ break;
+ }
+ }
+}
+
+static void
+adadone(struct cam_periph *periph, union ccb *done_ccb)
+{
+ struct ada_softc *softc;
+ struct ccb_ataio *ataio;
+
+ softc = (struct ada_softc *)periph->softc;
+ ataio = &done_ccb->ataio;
+ switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
+ case ADA_CCB_BUFFER_IO:
+ {
+ struct bio *bp;
+
+ bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ int error;
+
+ error = adaerror(done_ccb, CAM_RETRY_SELTO, 0);
+ if (error == ERESTART) {
+ /*
+ * A retry was scheuled, so
+ * just return.
+ */
+ return;
+ }
+ if (error != 0) {
+
+ if (error == ENXIO) {
+ /*
+ * Catastrophic error. Mark our pack as
+ * invalid.
+ */
+ /*
+ * XXX See if this is really a media
+ * XXX change first?
+ */
+ xpt_print(periph->path,
+ "Invalidating pack\n");
+ softc->flags |= ADA_FLAG_PACK_INVALID;
+ }
+
+ /*
+ * return all queued I/O with EIO, so that
+ * the client can retry these I/Os in the
+ * proper order should it attempt to recover.
+ */
+ bioq_flush(&softc->bio_queue, NULL, EIO);
+ bp->bio_error = error;
+ bp->bio_resid = bp->bio_bcount;
+ bp->bio_flags |= BIO_ERROR;
+ } else {
+ bp->bio_resid = ataio->resid;
+ bp->bio_error = 0;
+ if (bp->bio_resid != 0)
+ bp->bio_flags |= BIO_ERROR;
+ }
+ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ } else {
+ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
+ panic("REQ_CMP with QFRZN");
+ bp->bio_resid = ataio->resid;
+ if (ataio->resid > 0)
+ bp->bio_flags |= BIO_ERROR;
+ }
+
+ /*
+ * Block out any asyncronous callbacks
+ * while we touch the pending ccb list.
+ */
+ LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
+ softc->outstanding_cmds--;
+ if (softc->outstanding_cmds == 0)
+ softc->flags |= ADA_FLAG_WENT_IDLE;
+
+ biodone(bp);
+ break;
+ }
+ case ADA_CCB_WAITING:
+ {
+ /* Caller will release the CCB */
+ wakeup(&done_ccb->ccb_h.cbfcnp);
+ return;
+ }
+ case ADA_CCB_DUMP:
+ /* No-op. We're polling */
+ return;
+ default:
+ break;
+ }
+ xpt_release_ccb(done_ccb);
+}
+
+static int
+adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
+{
+ struct ada_softc *softc;
+ struct cam_periph *periph;
+
+ periph = xpt_path_periph(ccb->ccb_h.path);
+ softc = (struct ada_softc *)periph->softc;
+
+ return(cam_periph_error(ccb, cam_flags, sense_flags,
+ &softc->saved_ccb));
+}
+
+static void
+adasetgeom(struct cam_periph *periph, struct ccb_getdev *cgd)
+{
+ struct ada_softc *softc = (struct ada_softc *)periph->softc;
+ struct disk_params *dp = &softc->params;
+ u_int64_t lbasize48;
+ u_int32_t lbasize;
+
+ dp->secsize = 512;
+ if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
+ cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
+ dp->heads = cgd->ident_data.current_heads;
+ dp->secs_per_track = cgd->ident_data.current_sectors;
+ dp->cylinders = cgd->ident_data.cylinders;
+ dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
+ ((u_int32_t)cgd->ident_data.current_size_2 << 16);
+ } else {
+ dp->heads = cgd->ident_data.heads;
+ dp->secs_per_track = cgd->ident_data.sectors;
+ dp->cylinders = cgd->ident_data.cylinders;
+ dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
+ }
+ lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
+ ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
+
+ /* does this device need oldstyle CHS addressing */
+// if (!ad_version(cgd->ident_data.version_major) || !lbasize)
+// atadev->flags |= ATA_D_USE_CHS;
+
+ /* use the 28bit LBA size if valid or bigger than the CHS mapping */
+ if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
+ dp->sectors = lbasize;
+
+ /* use the 48bit LBA size if valid */
+ lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
+ ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
+ ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
+ ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
+ if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
+ lbasize48 > ATA_MAX_28BIT_LBA)
+ dp->sectors = lbasize48;
+}
+
+static void
+adasendorderedtag(void *arg)
+{
+ struct ada_softc *softc = arg;
+
+ if (ada_send_ordered) {
+ if ((softc->ordered_tag_count == 0)
+ && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
+ softc->flags |= ADA_FLAG_NEED_OTAG;
+ }
+ if (softc->outstanding_cmds > 0)
+ softc->flags &= ~ADA_FLAG_WENT_IDLE;
+
+ softc->ordered_tag_count = 0;
+ }
+ /* Queue us up again */
+ callout_reset(&softc->sendordered_c,
+ (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
+ adasendorderedtag, softc);
+}
+
+/*
+ * Step through all ADA peripheral drivers, and if the device is still open,
+ * sync the disk cache to physical media.
+ */
+static void
+adashutdown(void * arg, int howto)
+{
+ struct cam_periph *periph;
+ struct ada_softc *softc;
+
+ TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
+ union ccb ccb;
+
+ cam_periph_lock(periph);
+ softc = (struct ada_softc *)periph->softc;
+ /*
+ * We only sync the cache if the drive is still open, and
+ * if the drive is capable of it..
+ */
+ if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
+ (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
+ cam_periph_unlock(periph);
+ continue;
+ }
+
+ xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
+
+ ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
+ cam_fill_ataio(&ccb.ataio,
+ 1,
+ adadone,
+ CAM_DIR_NONE,
+ 0,
+ NULL,
+ 0,
+ ada_default_timeout*1000);
+
+ if (softc->flags & ADA_FLAG_CAN_48BIT)
+ ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
+ else
+ ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
+ xpt_polled_action(&ccb);
+
+ if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
+ xpt_print(periph->path, "Synchronize cache failed\n");
+
+ if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(ccb.ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ cam_periph_unlock(periph);
+ }
+}
+
+#endif /* _KERNEL */
diff --git a/sys/cam/ata/ata_xpt.c b/sys/cam/ata/ata_xpt.c
new file mode 100644
index 0000000..7f8daa2
--- /dev/null
+++ b/sys/cam/ata/ata_xpt.c
@@ -0,0 +1,1895 @@
+/*-
+ * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/time.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+#include <sys/md5.h>
+#include <sys/interrupt.h>
+#include <sys/sbuf.h>
+
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+
+#ifdef PC98
+#include <pc98/pc98/pc98_machdep.h> /* geometry translation */
+#endif
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_queue.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/cam_xpt_internal.h>
+#include <cam/cam_debug.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+#include <cam/scsi/scsi_pass.h>
+#include <cam/ata/ata_all.h>
+#include <machine/stdarg.h> /* for xpt_print below */
+#include "opt_cam.h"
+
+struct scsi_quirk_entry {
+ struct scsi_inquiry_pattern inq_pat;
+ u_int8_t quirks;
+#define CAM_QUIRK_NOLUNS 0x01
+#define CAM_QUIRK_NOSERIAL 0x02
+#define CAM_QUIRK_HILUNS 0x04
+#define CAM_QUIRK_NOHILUNS 0x08
+ u_int mintags;
+ u_int maxtags;
+};
+#define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk))
+
+static periph_init_t probe_periph_init;
+
+static struct periph_driver probe_driver =
+{
+ probe_periph_init, "probe",
+ TAILQ_HEAD_INITIALIZER(probe_driver.units)
+};
+
+PERIPHDRIVER_DECLARE(probe, probe_driver);
+
+typedef enum {
+ PROBE_RESET,
+ PROBE_IDENTIFY,
+ PROBE_SETMODE,
+ PROBE_INQUIRY,
+ PROBE_FULL_INQUIRY,
+ PROBE_PM_PID,
+ PROBE_PM_PRV,
+ PROBE_PM_PORTS,
+ PROBE_PM_RESET,
+ PROBE_PM_CONNECT,
+ PROBE_PM_CHECK,
+ PROBE_PM_CLEAR,
+ PROBE_INVALID
+} probe_action;
+
+static char *probe_action_text[] = {
+ "PROBE_RESET",
+ "PROBE_IDENTIFY",
+ "PROBE_SETMODE",
+ "PROBE_INQUIRY",
+ "PROBE_FULL_INQUIRY",
+ "PROBE_PM_PID",
+ "PROBE_PM_PRV",
+ "PROBE_PM_PORTS",
+ "PROBE_PM_RESET",
+ "PROBE_PM_CONNECT",
+ "PROBE_PM_CHECK",
+ "PROBE_PM_CLEAR",
+ "PROBE_INVALID"
+};
+
+#define PROBE_SET_ACTION(softc, newaction) \
+do { \
+ char **text; \
+ text = probe_action_text; \
+ CAM_DEBUG((softc)->periph->path, CAM_DEBUG_INFO, \
+ ("Probe %s to %s\n", text[(softc)->action], \
+ text[(newaction)])); \
+ (softc)->action = (newaction); \
+} while(0)
+
+typedef enum {
+ PROBE_NO_ANNOUNCE = 0x04
+} probe_flags;
+
+typedef struct {
+ TAILQ_HEAD(, ccb_hdr) request_ccbs;
+ probe_action action;
+ union ccb saved_ccb;
+ probe_flags flags;
+ u_int8_t digest[16];
+ uint32_t pm_pid;
+ uint32_t pm_prv;
+ int pm_ports;
+ int pm_step;
+ int pm_try;
+ struct cam_periph *periph;
+} probe_softc;
+
+static struct scsi_quirk_entry scsi_quirk_table[] =
+{
+ {
+ /* Default tagged queuing parameters for all devices */
+ {
+ T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
+ /*vendor*/"*", /*product*/"*", /*revision*/"*"
+ },
+ /*quirks*/0, /*mintags*/2, /*maxtags*/32
+ },
+};
+
+static const int scsi_quirk_table_size =
+ sizeof(scsi_quirk_table) / sizeof(*scsi_quirk_table);
+
+static cam_status proberegister(struct cam_periph *periph,
+ void *arg);
+static void probeschedule(struct cam_periph *probe_periph);
+static void probestart(struct cam_periph *periph, union ccb *start_ccb);
+//static void proberequestdefaultnegotiation(struct cam_periph *periph);
+//static int proberequestbackoff(struct cam_periph *periph,
+// struct cam_ed *device);
+static void probedone(struct cam_periph *periph, union ccb *done_ccb);
+static void probecleanup(struct cam_periph *periph);
+static void scsi_find_quirk(struct cam_ed *device);
+static void ata_scan_bus(struct cam_periph *periph, union ccb *ccb);
+static void ata_scan_lun(struct cam_periph *periph,
+ struct cam_path *path, cam_flags flags,
+ union ccb *ccb);
+static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
+static struct cam_ed *
+ ata_alloc_device(struct cam_eb *bus, struct cam_et *target,
+ lun_id_t lun_id);
+static void ata_device_transport(struct cam_path *path);
+static void scsi_set_transfer_settings(struct ccb_trans_settings *cts,
+ struct cam_ed *device,
+ int async_update);
+static void scsi_toggle_tags(struct cam_path *path);
+static void ata_dev_async(u_int32_t async_code,
+ struct cam_eb *bus,
+ struct cam_et *target,
+ struct cam_ed *device,
+ void *async_arg);
+static void ata_action(union ccb *start_ccb);
+
+static struct xpt_xport ata_xport = {
+ .alloc_device = ata_alloc_device,
+ .action = ata_action,
+ .async = ata_dev_async,
+};
+
+struct xpt_xport *
+ata_get_xport(void)
+{
+ return (&ata_xport);
+}
+
+static void
+probe_periph_init()
+{
+}
+
+static cam_status
+proberegister(struct cam_periph *periph, void *arg)
+{
+ union ccb *request_ccb; /* CCB representing the probe request */
+ cam_status status;
+ probe_softc *softc;
+
+ request_ccb = (union ccb *)arg;
+ if (periph == NULL) {
+ printf("proberegister: periph was NULL!!\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ if (request_ccb == NULL) {
+ printf("proberegister: no probe CCB, "
+ "can't register device\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+
+ softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT);
+
+ if (softc == NULL) {
+ printf("proberegister: Unable to probe new device. "
+ "Unable to allocate softc\n");
+ return(CAM_REQ_CMP_ERR);
+ }
+ TAILQ_INIT(&softc->request_ccbs);
+ TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
+ periph_links.tqe);
+ softc->flags = 0;
+ periph->softc = softc;
+ softc->periph = periph;
+ softc->action = PROBE_INVALID;
+ status = cam_periph_acquire(periph);
+ if (status != CAM_REQ_CMP) {
+ return (status);
+ }
+
+
+ /*
+ * Ensure we've waited at least a bus settle
+ * delay before attempting to probe the device.
+ * For HBAs that don't do bus resets, this won't make a difference.
+ */
+ cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
+ scsi_delay);
+ probeschedule(periph);
+ return(CAM_REQ_CMP);
+}
+
+static void
+probeschedule(struct cam_periph *periph)
+{
+ struct ccb_pathinq cpi;
+ union ccb *ccb;
+ probe_softc *softc;
+
+ softc = (probe_softc *)periph->softc;
+ ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
+
+ xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
+ cpi.ccb_h.func_code = XPT_PATH_INQ;
+ xpt_action((union ccb *)&cpi);
+
+ if (periph->path->device->flags & CAM_DEV_UNCONFIGURED)
+ PROBE_SET_ACTION(softc, PROBE_RESET);
+ else if (periph->path->device->protocol == PROTO_SATAPM)
+ PROBE_SET_ACTION(softc, PROBE_PM_PID);
+ else
+ PROBE_SET_ACTION(softc, PROBE_IDENTIFY);
+
+ if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
+ softc->flags |= PROBE_NO_ANNOUNCE;
+ else
+ softc->flags &= ~PROBE_NO_ANNOUNCE;
+
+ xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
+}
+
+static void
+probestart(struct cam_periph *periph, union ccb *start_ccb)
+{
+ /* Probe the device that our peripheral driver points to */
+ struct ccb_ataio *ataio;
+ struct ccb_scsiio *csio;
+ struct ccb_trans_settings cts;
+ probe_softc *softc;
+
+ CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
+
+ softc = (probe_softc *)periph->softc;
+ ataio = &start_ccb->ataio;
+ csio = &start_ccb->csio;
+
+ switch (softc->action) {
+ case PROBE_RESET:
+ if (start_ccb->ccb_h.target_id == 15) {
+ /* Report SIM that we have no knowledge about PM presence. */
+ bzero(&cts, sizeof(cts));
+ xpt_setup_ccb(&cts.ccb_h, start_ccb->ccb_h.path, 1);
+ cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
+ cts.type = CTS_TYPE_CURRENT_SETTINGS;
+ cts.xport_specific.sata.pm_present = 0;
+ cts.xport_specific.sata.valid = CTS_SATA_VALID_PM;
+ xpt_action((union ccb *)&cts);
+ }
+ cam_fill_ataio(ataio,
+ 0,
+ probedone,
+ /*flags*/CAM_DIR_NONE,
+ MSG_SIMPLE_Q_TAG,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ (start_ccb->ccb_h.target_id == 15 ? 3 : 15) * 1000);
+ ata_reset_cmd(ataio);
+ break;
+ case PROBE_IDENTIFY:
+ {
+ struct ata_params *ident_buf =
+ &periph->path->device->ident_data;
+
+ if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
+ /* Prepare check that it is the same device. */
+ MD5_CTX context;
+
+ MD5Init(&context);
+ MD5Update(&context,
+ (unsigned char *)ident_buf->model,
+ sizeof(ident_buf->model));
+ MD5Update(&context,
+ (unsigned char *)ident_buf->revision,
+ sizeof(ident_buf->revision));
+ MD5Update(&context,
+ (unsigned char *)ident_buf->serial,
+ sizeof(ident_buf->serial));
+ MD5Final(softc->digest, &context);
+ }
+ cam_fill_ataio(ataio,
+ 1,
+ probedone,
+ /*flags*/CAM_DIR_IN,
+ MSG_SIMPLE_Q_TAG,
+ /*data_ptr*/(u_int8_t *)ident_buf,
+ /*dxfer_len*/sizeof(struct ata_params),
+ 30 * 1000);
+ if (periph->path->device->protocol == PROTO_ATA)
+ ata_36bit_cmd(ataio, ATA_ATA_IDENTIFY, 0, 0, 0);
+ else
+ ata_36bit_cmd(ataio, ATA_ATAPI_IDENTIFY, 0, 0, 0);
+ break;
+ }
+ case PROBE_SETMODE:
+ {
+ struct ata_params *ident_buf =
+ &periph->path->device->ident_data;
+
+ cam_fill_ataio(ataio,
+ 1,
+ probedone,
+ /*flags*/CAM_DIR_IN,
+ MSG_SIMPLE_Q_TAG,
+ /*data_ptr*/(u_int8_t *)ident_buf,
+ /*dxfer_len*/sizeof(struct ata_params),
+ 30 * 1000);
+ ata_36bit_cmd(ataio, ATA_SETFEATURES, ATA_SF_SETXFER, 0,
+ ata_max_mode(ident_buf, ATA_UDMA6, ATA_UDMA6));
+ break;
+ }
+ case PROBE_INQUIRY:
+ case PROBE_FULL_INQUIRY:
+ {
+ u_int inquiry_len;
+ struct scsi_inquiry_data *inq_buf =
+ &periph->path->device->inq_data;
+
+ if (softc->action == PROBE_INQUIRY)
+ inquiry_len = SHORT_INQUIRY_LENGTH;
+ else
+ inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf);
+ /*
+ * Some parallel SCSI devices fail to send an
+ * ignore wide residue message when dealing with
+ * odd length inquiry requests. Round up to be
+ * safe.
+ */
+ inquiry_len = roundup2(inquiry_len, 2);
+ scsi_inquiry(csio,
+ /*retries*/1,
+ probedone,
+ MSG_SIMPLE_Q_TAG,
+ (u_int8_t *)inq_buf,
+ inquiry_len,
+ /*evpd*/FALSE,
+ /*page_code*/0,
+ SSD_MIN_SIZE,
+ /*timeout*/60 * 1000);
+ break;
+ }
+ case PROBE_PM_PID:
+ cam_fill_ataio(ataio,
+ 1,
+ probedone,
+ /*flags*/CAM_DIR_NONE,
+ MSG_SIMPLE_Q_TAG,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ 10 * 1000);
+ ata_pm_read_cmd(ataio, 0, 15);
+ break;
+ case PROBE_PM_PRV:
+ cam_fill_ataio(ataio,
+ 1,
+ probedone,
+ /*flags*/CAM_DIR_NONE,
+ MSG_SIMPLE_Q_TAG,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ 10 * 1000);
+ ata_pm_read_cmd(ataio, 1, 15);
+ break;
+ case PROBE_PM_PORTS:
+ cam_fill_ataio(ataio,
+ 1,
+ probedone,
+ /*flags*/CAM_DIR_NONE,
+ MSG_SIMPLE_Q_TAG,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ 10 * 1000);
+ ata_pm_read_cmd(ataio, 2, 15);
+ break;
+ case PROBE_PM_RESET:
+ {
+ struct ata_params *ident_buf =
+ &periph->path->device->ident_data;
+ cam_fill_ataio(ataio,
+ 1,
+ probedone,
+ /*flags*/CAM_DIR_NONE,
+ MSG_SIMPLE_Q_TAG,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ 10 * 1000);
+ ata_pm_write_cmd(ataio, 2, softc->pm_step,
+ (ident_buf->cylinders & (1 << softc->pm_step)) ? 0 : 1);
+printf("PM RESET %d %04x %d\n", softc->pm_step, ident_buf->cylinders,
+ (ident_buf->cylinders & (1 << softc->pm_step)) ? 0 : 1);
+ break;
+ }
+ case PROBE_PM_CONNECT:
+ cam_fill_ataio(ataio,
+ 1,
+ probedone,
+ /*flags*/CAM_DIR_NONE,
+ MSG_SIMPLE_Q_TAG,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ 10 * 1000);
+ ata_pm_write_cmd(ataio, 2, softc->pm_step, 0);
+ break;
+ case PROBE_PM_CHECK:
+ cam_fill_ataio(ataio,
+ 1,
+ probedone,
+ /*flags*/CAM_DIR_NONE,
+ MSG_SIMPLE_Q_TAG,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ 10 * 1000);
+ ata_pm_read_cmd(ataio, 0, softc->pm_step);
+ break;
+ case PROBE_PM_CLEAR:
+ cam_fill_ataio(ataio,
+ 1,
+ probedone,
+ /*flags*/CAM_DIR_NONE,
+ MSG_SIMPLE_Q_TAG,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ 10 * 1000);
+ ata_pm_write_cmd(ataio, 1, softc->pm_step, 0xFFFFFFFF);
+ break;
+ case PROBE_INVALID:
+ CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_INFO,
+ ("probestart: invalid action state\n"));
+ default:
+ break;
+ }
+ xpt_action(start_ccb);
+}
+#if 0
+static void
+proberequestdefaultnegotiation(struct cam_periph *periph)
+{
+ struct ccb_trans_settings cts;
+
+ xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
+ cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
+ cts.type = CTS_TYPE_USER_SETTINGS;
+ xpt_action((union ccb *)&cts);
+ if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ return;
+ }
+ cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
+ cts.type = CTS_TYPE_CURRENT_SETTINGS;
+ xpt_action((union ccb *)&cts);
+}
+
+/*
+ * Backoff Negotiation Code- only pertinent for SPI devices.
+ */
+static int
+proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
+{
+ struct ccb_trans_settings cts;
+ struct ccb_trans_settings_spi *spi;
+
+ memset(&cts, 0, sizeof (cts));
+ xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
+ cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
+ cts.type = CTS_TYPE_CURRENT_SETTINGS;
+ xpt_action((union ccb *)&cts);
+ if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ if (bootverbose) {
+ xpt_print(periph->path,
+ "failed to get current device settings\n");
+ }
+ return (0);
+ }
+ if (cts.transport != XPORT_SPI) {
+ if (bootverbose) {
+ xpt_print(periph->path, "not SPI transport\n");
+ }
+ return (0);
+ }
+ spi = &cts.xport_specific.spi;
+
+ /*
+ * We cannot renegotiate sync rate if we don't have one.
+ */
+ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
+ if (bootverbose) {
+ xpt_print(periph->path, "no sync rate known\n");
+ }
+ return (0);
+ }
+
+ /*
+ * We'll assert that we don't have to touch PPR options- the
+ * SIM will see what we do with period and offset and adjust
+ * the PPR options as appropriate.
+ */
+
+ /*
+ * A sync rate with unknown or zero offset is nonsensical.
+ * A sync period of zero means Async.
+ */
+ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0
+ || spi->sync_offset == 0 || spi->sync_period == 0) {
+ if (bootverbose) {
+ xpt_print(periph->path, "no sync rate available\n");
+ }
+ return (0);
+ }
+
+ if (device->flags & CAM_DEV_DV_HIT_BOTTOM) {
+ CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
+ ("hit async: giving up on DV\n"));
+ return (0);
+ }
+
+
+ /*
+ * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
+ * We don't try to remember 'last' settings to see if the SIM actually
+ * gets into the speed we want to set. We check on the SIM telling
+ * us that a requested speed is bad, but otherwise don't try and
+ * check the speed due to the asynchronous and handshake nature
+ * of speed setting.
+ */
+ spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET;
+ for (;;) {
+ spi->sync_period++;
+ if (spi->sync_period >= 0xf) {
+ spi->sync_period = 0;
+ spi->sync_offset = 0;
+ CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
+ ("setting to async for DV\n"));
+ /*
+ * Once we hit async, we don't want to try
+ * any more settings.
+ */
+ device->flags |= CAM_DEV_DV_HIT_BOTTOM;
+ } else if (bootverbose) {
+ CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
+ ("DV: period 0x%x\n", spi->sync_period));
+ printf("setting period to 0x%x\n", spi->sync_period);
+ }
+ cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
+ cts.type = CTS_TYPE_CURRENT_SETTINGS;
+ xpt_action((union ccb *)&cts);
+ if ((cts.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ break;
+ }
+ CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
+ ("DV: failed to set period 0x%x\n", spi->sync_period));
+ if (spi->sync_period == 0) {
+ return (0);
+ }
+ }
+ return (1);
+}
+#endif
+static void
+probedone(struct cam_periph *periph, union ccb *done_ccb)
+{
+ struct ata_params *ident_buf;
+ probe_softc *softc;
+ struct cam_path *path;
+ u_int32_t priority;
+ int found = 0;
+
+ CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
+
+ softc = (probe_softc *)periph->softc;
+ path = done_ccb->ccb_h.path;
+ priority = done_ccb->ccb_h.pinfo.priority;
+ ident_buf = &path->device->ident_data;
+
+ switch (softc->action) {
+ case PROBE_RESET:
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ int sign = (done_ccb->ataio.res.lba_high << 8) +
+ done_ccb->ataio.res.lba_mid;
+ xpt_print(path, "SIGNATURE: %04x\n", sign);
+ if (sign == 0x0000 &&
+ done_ccb->ccb_h.target_id != 15) {
+ path->device->protocol = PROTO_ATA;
+ PROBE_SET_ACTION(softc, PROBE_IDENTIFY);
+ } else if (sign == 0x9669 &&
+ done_ccb->ccb_h.target_id == 15) {
+ struct ccb_trans_settings cts;
+
+ /* Report SIM that PM is present. */
+ bzero(&cts, sizeof(cts));
+ xpt_setup_ccb(&cts.ccb_h, path, 1);
+ cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
+ cts.type = CTS_TYPE_CURRENT_SETTINGS;
+ cts.xport_specific.sata.pm_present = 1;
+ cts.xport_specific.sata.valid = CTS_SATA_VALID_PM;
+ xpt_action((union ccb *)&cts);
+ path->device->protocol = PROTO_SATAPM;
+ PROBE_SET_ACTION(softc, PROBE_PM_PID);
+ } else if (sign == 0xeb14 &&
+ done_ccb->ccb_h.target_id != 15) {
+ path->device->protocol = PROTO_SCSI;
+ PROBE_SET_ACTION(softc, PROBE_IDENTIFY);
+ } else {
+ if (done_ccb->ccb_h.target_id != 15) {
+ xpt_print(path,
+ "Unexpected signature 0x%04x\n", sign);
+ }
+ xpt_release_ccb(done_ccb);
+ break;
+ }
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ } else if (cam_periph_error(done_ccb, 0, 0,
+ &softc->saved_ccb) == ERESTART) {
+ return;
+ } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge the queue */
+ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
+ /*run_queue*/TRUE);
+ }
+ goto device_fail;
+ case PROBE_IDENTIFY:
+ {
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ int16_t *ptr;
+
+ for (ptr = (int16_t *)ident_buf;
+ ptr < (int16_t *)ident_buf + sizeof(struct ata_params)/2; ptr++) {
+ *ptr = le16toh(*ptr);
+ }
+ if (strncmp(ident_buf->model, "FX", 2) &&
+ strncmp(ident_buf->model, "NEC", 3) &&
+ strncmp(ident_buf->model, "Pioneer", 7) &&
+ strncmp(ident_buf->model, "SHARP", 5)) {
+ ata_bswap(ident_buf->model, sizeof(ident_buf->model));
+ ata_bswap(ident_buf->revision, sizeof(ident_buf->revision));
+ ata_bswap(ident_buf->serial, sizeof(ident_buf->serial));
+ }
+ ata_btrim(ident_buf->model, sizeof(ident_buf->model));
+ ata_bpack(ident_buf->model, ident_buf->model, sizeof(ident_buf->model));
+ ata_btrim(ident_buf->revision, sizeof(ident_buf->revision));
+ ata_bpack(ident_buf->revision, ident_buf->revision, sizeof(ident_buf->revision));
+ ata_btrim(ident_buf->serial, sizeof(ident_buf->serial));
+ ata_bpack(ident_buf->serial, ident_buf->serial, sizeof(ident_buf->serial));
+
+ if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
+ /* Check that it is the same device. */
+ MD5_CTX context;
+ u_int8_t digest[16];
+
+ MD5Init(&context);
+ MD5Update(&context,
+ (unsigned char *)ident_buf->model,
+ sizeof(ident_buf->model));
+ MD5Update(&context,
+ (unsigned char *)ident_buf->revision,
+ sizeof(ident_buf->revision));
+ MD5Update(&context,
+ (unsigned char *)ident_buf->serial,
+ sizeof(ident_buf->serial));
+ MD5Final(digest, &context);
+ if (bcmp(digest, softc->digest, sizeof(digest))) {
+ /* Device changed. */
+ xpt_async(AC_LOST_DEVICE, path, NULL);
+ }
+ xpt_release_ccb(done_ccb);
+ break;
+ }
+
+ /* Clean up from previous instance of this device */
+ if (path->device->serial_num != NULL) {
+ free(path->device->serial_num, M_CAMXPT);
+ path->device->serial_num = NULL;
+ path->device->serial_num_len = 0;
+ }
+ path->device->serial_num =
+ (u_int8_t *)malloc((sizeof(ident_buf->serial) + 1),
+ M_CAMXPT, M_NOWAIT);
+ if (path->device->serial_num != NULL) {
+ bcopy(ident_buf->serial,
+ path->device->serial_num,
+ sizeof(ident_buf->serial));
+ path->device->serial_num[sizeof(ident_buf->serial)]
+ = '\0';
+ path->device->serial_num_len =
+ strlen(path->device->serial_num);
+ }
+
+ path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
+
+ scsi_find_quirk(path->device);
+ ata_device_transport(path);
+
+ PROBE_SET_ACTION(softc, PROBE_SETMODE);
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ } else if (cam_periph_error(done_ccb, 0, 0,
+ &softc->saved_ccb) == ERESTART) {
+ return;
+ } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge the queue */
+ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
+ /*run_queue*/TRUE);
+ }
+device_fail:
+ /*
+ * If we get to this point, we got an error status back
+ * from the inquiry and the error status doesn't require
+ * automatically retrying the command. Therefore, the
+ * inquiry failed. If we had inquiry information before
+ * for this device, but this latest inquiry command failed,
+ * the device has probably gone away. If this device isn't
+ * already marked unconfigured, notify the peripheral
+ * drivers that this device is no more.
+ */
+ if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
+ /* Send the async notification. */
+ xpt_async(AC_LOST_DEVICE, path, NULL);
+
+ xpt_release_ccb(done_ccb);
+ break;
+ }
+ case PROBE_SETMODE:
+ {
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ if (path->device->protocol == PROTO_ATA) {
+ path->device->flags &= ~CAM_DEV_UNCONFIGURED;
+ done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
+ xpt_action(done_ccb);
+ xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
+ done_ccb);
+ xpt_release_ccb(done_ccb);
+ break;
+ } else {
+ PROBE_SET_ACTION(softc, PROBE_INQUIRY);
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ }
+ } else if (cam_periph_error(done_ccb, 0, 0,
+ &softc->saved_ccb) == ERESTART) {
+ return;
+ } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge the queue */
+ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
+ /*run_queue*/TRUE);
+ }
+ goto device_fail;
+ }
+ case PROBE_INQUIRY:
+ case PROBE_FULL_INQUIRY:
+ {
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ struct scsi_inquiry_data *inq_buf;
+ u_int8_t periph_qual;
+
+ path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
+ inq_buf = &path->device->inq_data;
+
+ periph_qual = SID_QUAL(inq_buf);
+
+ if (periph_qual == SID_QUAL_LU_CONNECTED) {
+ u_int8_t len;
+
+ /*
+ * We conservatively request only
+ * SHORT_INQUIRY_LEN bytes of inquiry
+ * information during our first try
+ * at sending an INQUIRY. If the device
+ * has more information to give,
+ * perform a second request specifying
+ * the amount of information the device
+ * is willing to give.
+ */
+ len = inq_buf->additional_length
+ + offsetof(struct scsi_inquiry_data,
+ additional_length) + 1;
+ if (softc->action == PROBE_INQUIRY
+ && len > SHORT_INQUIRY_LENGTH) {
+ PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY);
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ }
+
+ scsi_find_quirk(path->device);
+
+// scsi_devise_transport(path);
+ path->device->flags &= ~CAM_DEV_UNCONFIGURED;
+ done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
+ xpt_action(done_ccb);
+ xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
+ done_ccb);
+ xpt_release_ccb(done_ccb);
+ break;
+ }
+ } else if (cam_periph_error(done_ccb, 0, 0,
+ &softc->saved_ccb) == ERESTART) {
+ return;
+ } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge the queue */
+ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
+ /*run_queue*/TRUE);
+ }
+ goto device_fail;
+ }
+ case PROBE_PM_PID:
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0)
+ bzero(ident_buf, sizeof(*ident_buf));
+ softc->pm_pid = (done_ccb->ataio.res.lba_high << 24) +
+ (done_ccb->ataio.res.lba_mid << 16) +
+ (done_ccb->ataio.res.lba_low << 8) +
+ done_ccb->ataio.res.sector_count;
+ printf("PM Product ID: %08x\n", softc->pm_pid);
+ snprintf(ident_buf->model, sizeof(ident_buf->model),
+ "Port Multiplier %08x", softc->pm_pid);
+ PROBE_SET_ACTION(softc, PROBE_PM_PRV);
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ } else if (cam_periph_error(done_ccb, 0, 0,
+ &softc->saved_ccb) == ERESTART) {
+ return;
+ } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge the queue */
+ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
+ /*run_queue*/TRUE);
+ }
+ goto device_fail;
+ case PROBE_PM_PRV:
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ softc->pm_prv = (done_ccb->ataio.res.lba_high << 24) +
+ (done_ccb->ataio.res.lba_mid << 16) +
+ (done_ccb->ataio.res.lba_low << 8) +
+ done_ccb->ataio.res.sector_count;
+ printf("PM Revision: %08x\n", softc->pm_prv);
+ snprintf(ident_buf->revision, sizeof(ident_buf->revision),
+ "%04x", softc->pm_prv);
+ PROBE_SET_ACTION(softc, PROBE_PM_PORTS);
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ } else if (cam_periph_error(done_ccb, 0, 0,
+ &softc->saved_ccb) == ERESTART) {
+ return;
+ } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge the queue */
+ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
+ /*run_queue*/TRUE);
+ }
+ goto device_fail;
+ case PROBE_PM_PORTS:
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ softc->pm_ports = (done_ccb->ataio.res.lba_high << 24) +
+ (done_ccb->ataio.res.lba_mid << 16) +
+ (done_ccb->ataio.res.lba_low << 8) +
+ done_ccb->ataio.res.sector_count;
+ /* This PM declares 6 ports, while only 5 of them are real.
+ * Port 5 is enclosure management bridge port, which has implementation
+ * problems, causing probe faults. Hide it for now. */
+ if (softc->pm_pid == 0x37261095 && softc->pm_ports == 6)
+ softc->pm_ports = 5;
+ /* This PM declares 7 ports, while only 5 of them are real.
+ * Port 5 is some fake "Config Disk" with 640 sectors size,
+ * port 6 is enclosure management bridge port.
+ * Both fake ports has implementation problems, causing
+ * probe faults. Hide them for now. */
+ if (softc->pm_pid == 0x47261095 && softc->pm_ports == 7)
+ softc->pm_ports = 5;
+ printf("PM ports: %d\n", softc->pm_ports);
+ ident_buf->config = softc->pm_ports;
+ path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
+ softc->pm_step = 0;
+ PROBE_SET_ACTION(softc, PROBE_PM_RESET);
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ } else if (cam_periph_error(done_ccb, 0, 0,
+ &softc->saved_ccb) == ERESTART) {
+ return;
+ } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge the queue */
+ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
+ /*run_queue*/TRUE);
+ }
+ goto device_fail;
+ case PROBE_PM_RESET:
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ softc->pm_step++;
+ if (softc->pm_step < softc->pm_ports) {
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ } else {
+ softc->pm_step = 0;
+ DELAY(5000);
+ printf("PM reset done\n");
+ PROBE_SET_ACTION(softc, PROBE_PM_CONNECT);
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ }
+ } else if (cam_periph_error(done_ccb, 0, 0,
+ &softc->saved_ccb) == ERESTART) {
+ return;
+ } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge the queue */
+ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
+ /*run_queue*/TRUE);
+ }
+ goto device_fail;
+ case PROBE_PM_CONNECT:
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ softc->pm_step++;
+ if (softc->pm_step < softc->pm_ports) {
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ } else {
+ softc->pm_step = 0;
+ softc->pm_try = 0;
+ printf("PM connect done\n");
+ PROBE_SET_ACTION(softc, PROBE_PM_CHECK);
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ }
+ } else if (cam_periph_error(done_ccb, 0, 0,
+ &softc->saved_ccb) == ERESTART) {
+ return;
+ } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge the queue */
+ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
+ /*run_queue*/TRUE);
+ }
+ goto device_fail;
+ case PROBE_PM_CHECK:
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ int res = (done_ccb->ataio.res.lba_high << 24) +
+ (done_ccb->ataio.res.lba_mid << 16) +
+ (done_ccb->ataio.res.lba_low << 8) +
+ done_ccb->ataio.res.sector_count;
+ if ((res & 0xf0f) == 0x103 && (res & 0x0f0) != 0) {
+ printf("PM status: %d - %08x\n", softc->pm_step, res);
+ ident_buf->cylinders |= (1 << softc->pm_step);
+ softc->pm_step++;
+ } else {
+ if (softc->pm_try < 100) {
+ DELAY(10000);
+ softc->pm_try++;
+ } else {
+ printf("PM status: %d - %08x\n", softc->pm_step, res);
+ ident_buf->cylinders &= ~(1 << softc->pm_step);
+ softc->pm_step++;
+ }
+ }
+ if (softc->pm_step < softc->pm_ports) {
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ } else {
+ softc->pm_step = 0;
+ PROBE_SET_ACTION(softc, PROBE_PM_CLEAR);
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ }
+ } else if (cam_periph_error(done_ccb, 0, 0,
+ &softc->saved_ccb) == ERESTART) {
+ return;
+ } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge the queue */
+ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
+ /*run_queue*/TRUE);
+ }
+ goto device_fail;
+ case PROBE_PM_CLEAR:
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ softc->pm_step++;
+ if (softc->pm_step < softc->pm_ports) {
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ }
+ found = ident_buf->cylinders | 0x8000;
+ if (path->device->flags & CAM_DEV_UNCONFIGURED) {
+ path->device->flags &= ~CAM_DEV_UNCONFIGURED;
+ done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
+ xpt_action(done_ccb);
+ xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
+ done_ccb);
+ xpt_release_ccb(done_ccb);
+ }
+ break;
+ } else if (cam_periph_error(done_ccb, 0, 0,
+ &softc->saved_ccb) == ERESTART) {
+ return;
+ } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge the queue */
+ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
+ /*run_queue*/TRUE);
+ }
+ goto device_fail;
+ case PROBE_INVALID:
+ CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_INFO,
+ ("probedone: invalid action state\n"));
+ default:
+ break;
+ }
+ done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
+ TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
+ done_ccb->ccb_h.status = CAM_REQ_CMP;
+ done_ccb->ccb_h.ppriv_field1 = found;
+ xpt_done(done_ccb);
+ if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
+ cam_periph_invalidate(periph);
+ cam_periph_release_locked(periph);
+ } else {
+ probeschedule(periph);
+ }
+}
+
+static void
+probecleanup(struct cam_periph *periph)
+{
+ free(periph->softc, M_CAMXPT);
+}
+
+static void
+scsi_find_quirk(struct cam_ed *device)
+{
+ struct scsi_quirk_entry *quirk;
+ caddr_t match;
+
+ match = cam_quirkmatch((caddr_t)&device->inq_data,
+ (caddr_t)scsi_quirk_table,
+ sizeof(scsi_quirk_table) /
+ sizeof(*scsi_quirk_table),
+ sizeof(*scsi_quirk_table), scsi_inquiry_match);
+
+ if (match == NULL)
+ panic("xpt_find_quirk: device didn't match wildcard entry!!");
+
+ quirk = (struct scsi_quirk_entry *)match;
+ device->quirk = quirk;
+ device->mintags = quirk->mintags;
+ device->maxtags = quirk->maxtags;
+}
+
+typedef struct {
+ union ccb *request_ccb;
+ struct ccb_pathinq *cpi;
+ int counter;
+ int found;
+} ata_scan_bus_info;
+
+/*
+ * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
+ * As the scan progresses, xpt_scan_bus is used as the
+ * callback on completion function.
+ */
+static void
+ata_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
+{
+ struct cam_path *path;
+ ata_scan_bus_info *scan_info;
+ union ccb *work_ccb;
+ cam_status status;
+
+ CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
+ ("xpt_scan_bus\n"));
+ switch (request_ccb->ccb_h.func_code) {
+ case XPT_SCAN_BUS:
+ /* Find out the characteristics of the bus */
+ work_ccb = xpt_alloc_ccb_nowait();
+ if (work_ccb == NULL) {
+ request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ xpt_done(request_ccb);
+ return;
+ }
+ xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
+ request_ccb->ccb_h.pinfo.priority);
+ work_ccb->ccb_h.func_code = XPT_PATH_INQ;
+ xpt_action(work_ccb);
+ if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
+ request_ccb->ccb_h.status = work_ccb->ccb_h.status;
+ xpt_free_ccb(work_ccb);
+ xpt_done(request_ccb);
+ return;
+ }
+
+ /* Save some state for use while we probe for devices */
+ scan_info = (ata_scan_bus_info *)
+ malloc(sizeof(ata_scan_bus_info), M_CAMXPT, M_NOWAIT);
+ if (scan_info == NULL) {
+ request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ xpt_done(request_ccb);
+ return;
+ }
+ scan_info->request_ccb = request_ccb;
+ scan_info->cpi = &work_ccb->cpi;
+ scan_info->found = 0x8001;
+ scan_info->counter = 0;
+ /* If PM supported, probe it first. */
+ if (scan_info->cpi->hba_inquiry & PI_SATAPM)
+ scan_info->counter = 15;
+
+ work_ccb = xpt_alloc_ccb_nowait();
+ if (work_ccb == NULL) {
+ free(scan_info, M_CAMXPT);
+ request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ xpt_done(request_ccb);
+ break;
+ }
+ goto scan_next;
+ case XPT_SCAN_LUN:
+ work_ccb = request_ccb;
+ /* Reuse the same CCB to query if a device was really found */
+ scan_info = (ata_scan_bus_info *)work_ccb->ccb_h.ppriv_ptr0;
+ /* Free the current request path- we're done with it. */
+ xpt_free_path(work_ccb->ccb_h.path);
+ /* If there is PM... */
+ if (scan_info->counter == 15) {
+ if (work_ccb->ccb_h.ppriv_field1 != 0) {
+ /* Save PM probe result. */
+ scan_info->found = work_ccb->ccb_h.ppriv_field1;
+ } else {
+ struct ccb_trans_settings cts;
+
+ /* Report SIM that PM is absent. */
+ bzero(&cts, sizeof(cts));
+ xpt_setup_ccb(&cts.ccb_h,
+ scan_info->request_ccb->ccb_h.path, 1);
+ cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
+ cts.type = CTS_TYPE_CURRENT_SETTINGS;
+ cts.xport_specific.sata.pm_present = 1;
+ cts.xport_specific.sata.valid = CTS_SATA_VALID_PM;
+ xpt_action((union ccb *)&cts);
+ }
+ }
+take_next:
+ /* Take next device. Wrap from 15 (PM) to 0. */
+ scan_info->counter = (scan_info->counter + 1 ) & 0x0f;
+ if (scan_info->counter >= scan_info->cpi->max_target+1) {
+ xpt_free_ccb(work_ccb);
+ xpt_free_ccb((union ccb *)scan_info->cpi);
+ request_ccb = scan_info->request_ccb;
+ free(scan_info, M_CAMXPT);
+ request_ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(request_ccb);
+ break;
+ }
+scan_next:
+ status = xpt_create_path(&path, xpt_periph,
+ scan_info->request_ccb->ccb_h.path_id,
+ scan_info->counter, 0);
+ if (status != CAM_REQ_CMP) {
+ printf("xpt_scan_bus: xpt_create_path failed"
+ " with status %#x, bus scan halted\n",
+ status);
+ xpt_free_ccb(work_ccb);
+ xpt_free_ccb((union ccb *)scan_info->cpi);
+ request_ccb = scan_info->request_ccb;
+ free(scan_info, M_CAMXPT);
+ request_ccb->ccb_h.status = status;
+ xpt_done(request_ccb);
+ break;
+ }
+ if ((scan_info->found & (1 << scan_info->counter)) == 0) {
+ xpt_async(AC_LOST_DEVICE, path, NULL);
+ xpt_free_path(path);
+ goto take_next;
+ }
+ xpt_setup_ccb(&work_ccb->ccb_h, path,
+ scan_info->request_ccb->ccb_h.pinfo.priority);
+ work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
+ work_ccb->ccb_h.cbfcnp = ata_scan_bus;
+ work_ccb->ccb_h.ppriv_ptr0 = scan_info;
+ work_ccb->crcn.flags = scan_info->request_ccb->crcn.flags;
+ xpt_action(work_ccb);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+ata_scan_lun(struct cam_periph *periph, struct cam_path *path,
+ cam_flags flags, union ccb *request_ccb)
+{
+ struct ccb_pathinq cpi;
+ cam_status status;
+ struct cam_path *new_path;
+ struct cam_periph *old_periph;
+
+ CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
+ ("xpt_scan_lun\n"));
+
+ xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
+ cpi.ccb_h.func_code = XPT_PATH_INQ;
+ xpt_action((union ccb *)&cpi);
+
+ if (cpi.ccb_h.status != CAM_REQ_CMP) {
+ if (request_ccb != NULL) {
+ request_ccb->ccb_h.status = cpi.ccb_h.status;
+ xpt_done(request_ccb);
+ }
+ return;
+ }
+
+ if (request_ccb == NULL) {
+ request_ccb = malloc(sizeof(union ccb), M_CAMXPT, M_NOWAIT);
+ if (request_ccb == NULL) {
+ xpt_print(path, "xpt_scan_lun: can't allocate CCB, "
+ "can't continue\n");
+ return;
+ }
+ new_path = malloc(sizeof(*new_path), M_CAMXPT, M_NOWAIT);
+ if (new_path == NULL) {
+ xpt_print(path, "xpt_scan_lun: can't allocate path, "
+ "can't continue\n");
+ free(request_ccb, M_CAMXPT);
+ return;
+ }
+ status = xpt_compile_path(new_path, xpt_periph,
+ path->bus->path_id,
+ path->target->target_id,
+ path->device->lun_id);
+
+ if (status != CAM_REQ_CMP) {
+ xpt_print(path, "xpt_scan_lun: can't compile path, "
+ "can't continue\n");
+ free(request_ccb, M_CAMXPT);
+ free(new_path, M_CAMXPT);
+ return;
+ }
+ xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
+ request_ccb->ccb_h.cbfcnp = xptscandone;
+ request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
+ request_ccb->crcn.flags = flags;
+ }
+
+ if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
+ probe_softc *softc;
+
+ softc = (probe_softc *)old_periph->softc;
+ TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
+ periph_links.tqe);
+ } else {
+ status = cam_periph_alloc(proberegister, NULL, probecleanup,
+ probestart, "probe",
+ CAM_PERIPH_BIO,
+ request_ccb->ccb_h.path, NULL, 0,
+ request_ccb);
+
+ if (status != CAM_REQ_CMP) {
+ xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
+ "returned an error, can't continue probe\n");
+ request_ccb->ccb_h.status = status;
+ xpt_done(request_ccb);
+ }
+ }
+}
+
+static void
+xptscandone(struct cam_periph *periph, union ccb *done_ccb)
+{
+ xpt_release_path(done_ccb->ccb_h.path);
+ free(done_ccb->ccb_h.path, M_CAMXPT);
+ free(done_ccb, M_CAMXPT);
+}
+
+static struct cam_ed *
+ata_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
+{
+ struct cam_path path;
+ struct scsi_quirk_entry *quirk;
+ struct cam_ed *device;
+ struct cam_ed *cur_device;
+
+ device = xpt_alloc_device(bus, target, lun_id);
+ if (device == NULL)
+ return (NULL);
+
+ /*
+ * Take the default quirk entry until we have inquiry
+ * data and can determine a better quirk to use.
+ */
+ quirk = &scsi_quirk_table[scsi_quirk_table_size - 1];
+ device->quirk = (void *)quirk;
+ device->mintags = quirk->mintags;
+ device->maxtags = quirk->maxtags;
+ bzero(&device->inq_data, sizeof(device->inq_data));
+ device->inq_flags = 0;
+ device->queue_flags = 0;
+ device->serial_num = NULL;
+ device->serial_num_len = 0;
+
+ /*
+ * XXX should be limited by number of CCBs this bus can
+ * do.
+ */
+ bus->sim->max_ccbs += device->ccbq.devq_openings;
+ /* Insertion sort into our target's device list */
+ cur_device = TAILQ_FIRST(&target->ed_entries);
+ while (cur_device != NULL && cur_device->lun_id < lun_id)
+ cur_device = TAILQ_NEXT(cur_device, links);
+ if (cur_device != NULL) {
+ TAILQ_INSERT_BEFORE(cur_device, device, links);
+ } else {
+ TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
+ }
+ target->generation++;
+ if (lun_id != CAM_LUN_WILDCARD) {
+ xpt_compile_path(&path,
+ NULL,
+ bus->path_id,
+ target->target_id,
+ lun_id);
+ ata_device_transport(&path);
+ xpt_release_path(&path);
+ }
+
+ return (device);
+}
+
+static void
+ata_device_transport(struct cam_path *path)
+{
+ struct ccb_pathinq cpi;
+// struct ccb_trans_settings cts;
+ struct scsi_inquiry_data *inq_buf;
+
+ /* Get transport information from the SIM */
+ xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
+ cpi.ccb_h.func_code = XPT_PATH_INQ;
+ xpt_action((union ccb *)&cpi);
+
+ inq_buf = NULL;
+// if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
+// inq_buf = &path->device->inq_data;
+// path->device->protocol = cpi.protocol;
+// path->device->protocol_version =
+// inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
+ path->device->transport = cpi.transport;
+ path->device->transport_version = cpi.transport_version;
+#if 0
+ /*
+ * Any device not using SPI3 features should
+ * be considered SPI2 or lower.
+ */
+ if (inq_buf != NULL) {
+ if (path->device->transport == XPORT_SPI
+ && (inq_buf->spi3data & SID_SPI_MASK) == 0
+ && path->device->transport_version > 2)
+ path->device->transport_version = 2;
+ } else {
+ struct cam_ed* otherdev;
+
+ for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
+ otherdev != NULL;
+ otherdev = TAILQ_NEXT(otherdev, links)) {
+ if (otherdev != path->device)
+ break;
+ }
+
+ if (otherdev != NULL) {
+ /*
+ * Initially assume the same versioning as
+ * prior luns for this target.
+ */
+ path->device->protocol_version =
+ otherdev->protocol_version;
+ path->device->transport_version =
+ otherdev->transport_version;
+ } else {
+ /* Until we know better, opt for safty */
+ path->device->protocol_version = 2;
+ if (path->device->transport == XPORT_SPI)
+ path->device->transport_version = 2;
+ else
+ path->device->transport_version = 0;
+ }
+ }
+
+ /*
+ * XXX
+ * For a device compliant with SPC-2 we should be able
+ * to determine the transport version supported by
+ * scrutinizing the version descriptors in the
+ * inquiry buffer.
+ */
+
+ /* Tell the controller what we think */
+ xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
+ cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
+ cts.type = CTS_TYPE_CURRENT_SETTINGS;
+ cts.transport = path->device->transport;
+ cts.transport_version = path->device->transport_version;
+ cts.protocol = path->device->protocol;
+ cts.protocol_version = path->device->protocol_version;
+ cts.proto_specific.valid = 0;
+ cts.xport_specific.valid = 0;
+ xpt_action((union ccb *)&cts);
+#endif
+}
+
+static void
+ata_action(union ccb *start_ccb)
+{
+
+ switch (start_ccb->ccb_h.func_code) {
+ case XPT_SET_TRAN_SETTINGS:
+ {
+ scsi_set_transfer_settings(&start_ccb->cts,
+ start_ccb->ccb_h.path->device,
+ /*async_update*/FALSE);
+ break;
+ }
+ case XPT_SCAN_BUS:
+ ata_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
+ break;
+ case XPT_SCAN_LUN:
+ ata_scan_lun(start_ccb->ccb_h.path->periph,
+ start_ccb->ccb_h.path, start_ccb->crcn.flags,
+ start_ccb);
+ break;
+ case XPT_GET_TRAN_SETTINGS:
+ {
+ struct cam_sim *sim;
+
+ sim = start_ccb->ccb_h.path->bus->sim;
+ (*(sim->sim_action))(sim, start_ccb);
+ break;
+ }
+ default:
+ xpt_action_default(start_ccb);
+ break;
+ }
+}
+
+static void
+scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
+ int async_update)
+{
+ struct ccb_pathinq cpi;
+ struct ccb_trans_settings cur_cts;
+ struct ccb_trans_settings_scsi *scsi;
+ struct ccb_trans_settings_scsi *cur_scsi;
+ struct cam_sim *sim;
+ struct scsi_inquiry_data *inq_data;
+
+ if (device == NULL) {
+ cts->ccb_h.status = CAM_PATH_INVALID;
+ xpt_done((union ccb *)cts);
+ return;
+ }
+
+ if (cts->protocol == PROTO_UNKNOWN
+ || cts->protocol == PROTO_UNSPECIFIED) {
+ cts->protocol = device->protocol;
+ cts->protocol_version = device->protocol_version;
+ }
+
+ if (cts->protocol_version == PROTO_VERSION_UNKNOWN
+ || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
+ cts->protocol_version = device->protocol_version;
+
+ if (cts->protocol != device->protocol) {
+ xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
+ cts->protocol, device->protocol);
+ cts->protocol = device->protocol;
+ }
+
+ if (cts->protocol_version > device->protocol_version) {
+ if (bootverbose) {
+ xpt_print(cts->ccb_h.path, "Down reving Protocol "
+ "Version from %d to %d?\n", cts->protocol_version,
+ device->protocol_version);
+ }
+ cts->protocol_version = device->protocol_version;
+ }
+
+ if (cts->transport == XPORT_UNKNOWN
+ || cts->transport == XPORT_UNSPECIFIED) {
+ cts->transport = device->transport;
+ cts->transport_version = device->transport_version;
+ }
+
+ if (cts->transport_version == XPORT_VERSION_UNKNOWN
+ || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
+ cts->transport_version = device->transport_version;
+
+ if (cts->transport != device->transport) {
+ xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
+ cts->transport, device->transport);
+ cts->transport = device->transport;
+ }
+
+ if (cts->transport_version > device->transport_version) {
+ if (bootverbose) {
+ xpt_print(cts->ccb_h.path, "Down reving Transport "
+ "Version from %d to %d?\n", cts->transport_version,
+ device->transport_version);
+ }
+ cts->transport_version = device->transport_version;
+ }
+
+ sim = cts->ccb_h.path->bus->sim;
+
+ /*
+ * Nothing more of interest to do unless
+ * this is a device connected via the
+ * SCSI protocol.
+ */
+ if (cts->protocol != PROTO_SCSI) {
+ if (async_update == FALSE)
+ (*(sim->sim_action))(sim, (union ccb *)cts);
+ return;
+ }
+
+ inq_data = &device->inq_data;
+ scsi = &cts->proto_specific.scsi;
+ xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
+ cpi.ccb_h.func_code = XPT_PATH_INQ;
+ xpt_action((union ccb *)&cpi);
+
+ /* SCSI specific sanity checking */
+ if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
+ || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
+ || (device->queue_flags & SCP_QUEUE_DQUE) != 0
+ || (device->mintags == 0)) {
+ /*
+ * Can't tag on hardware that doesn't support tags,
+ * doesn't have it enabled, or has broken tag support.
+ */
+ scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
+ }
+
+ if (async_update == FALSE) {
+ /*
+ * Perform sanity checking against what the
+ * controller and device can do.
+ */
+ xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
+ cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
+ cur_cts.type = cts->type;
+ xpt_action((union ccb *)&cur_cts);
+ if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ return;
+ }
+ cur_scsi = &cur_cts.proto_specific.scsi;
+ if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
+ scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
+ scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
+ }
+ if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
+ scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
+ }
+
+ /* SPI specific sanity checking */
+ if (cts->transport == XPORT_SPI && async_update == FALSE) {
+ u_int spi3caps;
+ struct ccb_trans_settings_spi *spi;
+ struct ccb_trans_settings_spi *cur_spi;
+
+ spi = &cts->xport_specific.spi;
+
+ cur_spi = &cur_cts.xport_specific.spi;
+
+ /* Fill in any gaps in what the user gave us */
+ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
+ spi->sync_period = cur_spi->sync_period;
+ if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
+ spi->sync_period = 0;
+ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
+ spi->sync_offset = cur_spi->sync_offset;
+ if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
+ spi->sync_offset = 0;
+ if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
+ spi->ppr_options = cur_spi->ppr_options;
+ if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
+ spi->ppr_options = 0;
+ if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
+ spi->bus_width = cur_spi->bus_width;
+ if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
+ spi->bus_width = 0;
+ if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
+ spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
+ spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
+ }
+ if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
+ spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
+ if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
+ && (inq_data->flags & SID_Sync) == 0
+ && cts->type == CTS_TYPE_CURRENT_SETTINGS)
+ || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)) {
+ /* Force async */
+ spi->sync_period = 0;
+ spi->sync_offset = 0;
+ }
+
+ switch (spi->bus_width) {
+ case MSG_EXT_WDTR_BUS_32_BIT:
+ if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
+ || (inq_data->flags & SID_WBus32) != 0
+ || cts->type == CTS_TYPE_USER_SETTINGS)
+ && (cpi.hba_inquiry & PI_WIDE_32) != 0)
+ break;
+ /* Fall Through to 16-bit */
+ case MSG_EXT_WDTR_BUS_16_BIT:
+ if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
+ || (inq_data->flags & SID_WBus16) != 0
+ || cts->type == CTS_TYPE_USER_SETTINGS)
+ && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
+ spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
+ break;
+ }
+ /* Fall Through to 8-bit */
+ default: /* New bus width?? */
+ case MSG_EXT_WDTR_BUS_8_BIT:
+ /* All targets can do this */
+ spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
+ break;
+ }
+
+ spi3caps = cpi.xport_specific.spi.ppr_options;
+ if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
+ && cts->type == CTS_TYPE_CURRENT_SETTINGS)
+ spi3caps &= inq_data->spi3data;
+
+ if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
+ spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+
+ if ((spi3caps & SID_SPI_IUS) == 0)
+ spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
+
+ if ((spi3caps & SID_SPI_QAS) == 0)
+ spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
+
+ /* No SPI Transfer settings are allowed unless we are wide */
+ if (spi->bus_width == 0)
+ spi->ppr_options = 0;
+
+ if ((spi->valid & CTS_SPI_VALID_DISC)
+ && ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0)) {
+ /*
+ * Can't tag queue without disconnection.
+ */
+ scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
+ scsi->valid |= CTS_SCSI_VALID_TQ;
+ }
+
+ /*
+ * If we are currently performing tagged transactions to
+ * this device and want to change its negotiation parameters,
+ * go non-tagged for a bit to give the controller a chance to
+ * negotiate unhampered by tag messages.
+ */
+ if (cts->type == CTS_TYPE_CURRENT_SETTINGS
+ && (device->inq_flags & SID_CmdQue) != 0
+ && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
+ && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
+ CTS_SPI_VALID_SYNC_OFFSET|
+ CTS_SPI_VALID_BUS_WIDTH)) != 0)
+ scsi_toggle_tags(cts->ccb_h.path);
+ }
+
+ if (cts->type == CTS_TYPE_CURRENT_SETTINGS
+ && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
+ int device_tagenb;
+
+ /*
+ * If we are transitioning from tags to no-tags or
+ * vice-versa, we need to carefully freeze and restart
+ * the queue so that we don't overlap tagged and non-tagged
+ * commands. We also temporarily stop tags if there is
+ * a change in transfer negotiation settings to allow
+ * "tag-less" negotiation.
+ */
+ if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
+ || (device->inq_flags & SID_CmdQue) != 0)
+ device_tagenb = TRUE;
+ else
+ device_tagenb = FALSE;
+
+ if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
+ && device_tagenb == FALSE)
+ || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
+ && device_tagenb == TRUE)) {
+
+ if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
+ /*
+ * Delay change to use tags until after a
+ * few commands have gone to this device so
+ * the controller has time to perform transfer
+ * negotiations without tagged messages getting
+ * in the way.
+ */
+ device->tag_delay_count = CAM_TAG_DELAY_COUNT;
+ device->flags |= CAM_DEV_TAG_AFTER_COUNT;
+ } else {
+ struct ccb_relsim crs;
+
+ xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
+ device->inq_flags &= ~SID_CmdQue;
+ xpt_dev_ccbq_resize(cts->ccb_h.path,
+ sim->max_dev_openings);
+ device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
+ device->tag_delay_count = 0;
+
+ xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
+ /*priority*/1);
+ crs.ccb_h.func_code = XPT_REL_SIMQ;
+ crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
+ crs.openings
+ = crs.release_timeout
+ = crs.qfrozen_cnt
+ = 0;
+ xpt_action((union ccb *)&crs);
+ }
+ }
+ }
+ if (async_update == FALSE)
+ (*(sim->sim_action))(sim, (union ccb *)cts);
+}
+
+static void
+scsi_toggle_tags(struct cam_path *path)
+{
+ struct cam_ed *dev;
+
+ /*
+ * Give controllers a chance to renegotiate
+ * before starting tag operations. We
+ * "toggle" tagged queuing off then on
+ * which causes the tag enable command delay
+ * counter to come into effect.
+ */
+ dev = path->device;
+ if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
+ || ((dev->inq_flags & SID_CmdQue) != 0
+ && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
+ struct ccb_trans_settings cts;
+
+ xpt_setup_ccb(&cts.ccb_h, path, 1);
+ cts.protocol = PROTO_SCSI;
+ cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
+ cts.transport = XPORT_UNSPECIFIED;
+ cts.transport_version = XPORT_VERSION_UNSPECIFIED;
+ cts.proto_specific.scsi.flags = 0;
+ cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
+ scsi_set_transfer_settings(&cts, path->device,
+ /*async_update*/TRUE);
+ cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
+ scsi_set_transfer_settings(&cts, path->device,
+ /*async_update*/TRUE);
+ }
+}
+
+/*
+ * Handle any per-device event notifications that require action by the XPT.
+ */
+static void
+ata_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
+ struct cam_ed *device, void *async_arg)
+{
+ cam_status status;
+ struct cam_path newpath;
+
+ /*
+ * We only need to handle events for real devices.
+ */
+ if (target->target_id == CAM_TARGET_WILDCARD
+ || device->lun_id == CAM_LUN_WILDCARD)
+ return;
+
+ /*
+ * We need our own path with wildcards expanded to
+ * handle certain types of events.
+ */
+ if ((async_code == AC_SENT_BDR)
+ || (async_code == AC_BUS_RESET)
+ || (async_code == AC_INQ_CHANGED))
+ status = xpt_compile_path(&newpath, NULL,
+ bus->path_id,
+ target->target_id,
+ device->lun_id);
+ else
+ status = CAM_REQ_CMP_ERR;
+
+ if (status == CAM_REQ_CMP) {
+
+ /*
+ * Allow transfer negotiation to occur in a
+ * tag free environment.
+ */
+ if (async_code == AC_SENT_BDR
+ || async_code == AC_BUS_RESET)
+ scsi_toggle_tags(&newpath);
+
+ if (async_code == AC_INQ_CHANGED) {
+ /*
+ * We've sent a start unit command, or
+ * something similar to a device that
+ * may have caused its inquiry data to
+ * change. So we re-scan the device to
+ * refresh the inquiry data for it.
+ */
+ ata_scan_lun(newpath.periph, &newpath,
+ CAM_EXPECT_INQ_CHANGE, NULL);
+ }
+ xpt_release_path(&newpath);
+ } else if (async_code == AC_LOST_DEVICE) {
+ device->flags |= CAM_DEV_UNCONFIGURED;
+ } else if (async_code == AC_TRANSFER_NEG) {
+ struct ccb_trans_settings *settings;
+
+ settings = (struct ccb_trans_settings *)async_arg;
+ scsi_set_transfer_settings(settings, device,
+ /*async_update*/TRUE);
+ }
+}
+
OpenPOWER on IntegriCloud