summaryrefslogtreecommitdiffstats
path: root/sys/cam
diff options
context:
space:
mode:
Diffstat (limited to 'sys/cam')
-rw-r--r--sys/cam/ata/ata_all.c259
-rw-r--r--sys/cam/ata/ata_all.h17
-rw-r--r--sys/cam/ata/ata_da.c1342
-rw-r--r--sys/cam/cam_ccb.h12
-rw-r--r--sys/cam/ctl/scsi_ctl.c23
-rw-r--r--sys/cam/scsi/scsi_all.c301
-rw-r--r--sys/cam/scsi/scsi_all.h96
-rw-r--r--sys/cam/scsi/scsi_da.c1806
-rw-r--r--sys/cam/scsi/scsi_da.h112
9 files changed, 3742 insertions, 226 deletions
diff --git a/sys/cam/ata/ata_all.c b/sys/cam/ata/ata_all.c
index 51231b7..36c1f35 100644
--- a/sys/cam/ata/ata_all.c
+++ b/sys/cam/ata/ata_all.c
@@ -110,18 +110,45 @@ ata_op_string(struct ata_cmd *cmd)
case 0x3f: return ("WRITE_LOG_EXT");
case 0x40: return ("READ_VERIFY");
case 0x42: return ("READ_VERIFY48");
+ case 0x44: return ("ZERO_EXT");
case 0x45:
switch (cmd->features) {
case 0x55: return ("WRITE_UNCORRECTABLE48 PSEUDO");
case 0xaa: return ("WRITE_UNCORRECTABLE48 FLAGGED");
}
return "WRITE_UNCORRECTABLE48";
+ case 0x47: return ("READ_LOG_DMA_EXT");
+ case 0x4a: return ("ZAC_MANAGEMENT_IN");
case 0x51: return ("CONFIGURE_STREAM");
case 0x60: return ("READ_FPDMA_QUEUED");
case 0x61: return ("WRITE_FPDMA_QUEUED");
- case 0x63: return ("NCQ_NON_DATA");
- case 0x64: return ("SEND_FPDMA_QUEUED");
- case 0x65: return ("RECEIVE_FPDMA_QUEUED");
+ case 0x63:
+ switch (cmd->features & 0xf) {
+ case 0x00: return ("NCQ_NON_DATA ABORT NCQ QUEUE");
+ case 0x01: return ("NCQ_NON_DATA DEADLINE HANDLING");
+ case 0x05: return ("NCQ_NON_DATA SET FEATURES");
+ /*
+ * XXX KDM need common decoding between NCQ and non-NCQ
+ * versions of SET FEATURES.
+ */
+ case 0x06: return ("NCQ_NON_DATA ZERO EXT");
+ case 0x07: return ("NCQ_NON_DATA ZAC MANAGEMENT OUT");
+ }
+ return ("NCQ_NON_DATA");
+ case 0x64:
+ switch (cmd->sector_count_exp & 0xf) {
+ case 0x00: return ("SEND_FPDMA_QUEUED DATA SET MANAGEMENT");
+ case 0x02: return ("SEND_FPDMA_QUEUED WRITE LOG DMA EXT");
+ case 0x03: return ("SEND_FPDMA_QUEUED ZAC MANAGEMENT OUT");
+ case 0x04: return ("SEND_FPDMA_QUEUED DATA SET MANAGEMENT XL");
+ }
+ return ("SEND_FPDMA_QUEUED");
+ case 0x65:
+ switch (cmd->sector_count_exp & 0xf) {
+ case 0x01: return ("RECEIVE_FPDMA_QUEUED READ LOG DMA EXT");
+ case 0x02: return ("RECEIVE_FPDMA_QUEUED ZAC MANAGEMENT IN");
+ }
+ return ("RECEIVE_FPDMA_QUEUED");
case 0x67:
if (cmd->features == 0xec)
return ("SEP_ATTN IDENTIFY");
@@ -136,6 +163,7 @@ ata_op_string(struct ata_cmd *cmd)
case 0x87: return ("CFA_TRANSLATE_SECTOR");
case 0x90: return ("EXECUTE_DEVICE_DIAGNOSTIC");
case 0x92: return ("DOWNLOAD_MICROCODE");
+ case 0x9a: return ("ZAC_MANAGEMENT_OUT");
case 0xa0: return ("PACKET");
case 0xa1: return ("ATAPI_IDENTIFY");
case 0xa2: return ("SERVICE");
@@ -179,23 +207,44 @@ ata_op_string(struct ata_cmd *cmd)
case 0xec: return ("ATA_IDENTIFY");
case 0xed: return ("MEDIA_EJECT");
case 0xef:
+ /*
+ * XXX KDM need common decoding between NCQ and non-NCQ
+ * versions of SET FEATURES.
+ */
switch (cmd->features) {
- case 0x03: return ("SETFEATURES SET TRANSFER MODE");
- case 0x02: return ("SETFEATURES ENABLE WCACHE");
- case 0x82: return ("SETFEATURES DISABLE WCACHE");
- case 0x06: return ("SETFEATURES ENABLE PUIS");
- case 0x86: return ("SETFEATURES DISABLE PUIS");
- case 0x07: return ("SETFEATURES SPIN-UP");
- case 0x10: return ("SETFEATURES ENABLE SATA FEATURE");
- case 0x90: return ("SETFEATURES DISABLE SATA FEATURE");
- case 0xaa: return ("SETFEATURES ENABLE RCACHE");
- case 0x55: return ("SETFEATURES DISABLE RCACHE");
+ case 0x02: return ("SETFEATURES ENABLE WCACHE");
+ case 0x03: return ("SETFEATURES SET TRANSFER MODE");
+ case 0x04: return ("SETFEATURES ENABLE APM");
+ case 0x06: return ("SETFEATURES ENABLE PUIS");
+ case 0x07: return ("SETFEATURES SPIN-UP");
+ case 0x0b: return ("SETFEATURES ENABLE WRITE READ VERIFY");
+ case 0x0c: return ("SETFEATURES ENABLE DEVICE LIFE CONTROL");
+ case 0x10: return ("SETFEATURES ENABLE SATA FEATURE");
+ case 0x41: return ("SETFEATURES ENABLE FREEFALL CONTROL");
+ case 0x43: return ("SETFEATURES SET MAX HOST INT SECT TIMES");
+ case 0x45: return ("SETFEATURES SET RATE BASIS");
+ case 0x4a: return ("SETFEATURES EXTENDED POWER CONDITIONS");
+ case 0x55: return ("SETFEATURES DISABLE RCACHE");
case 0x5d: return ("SETFEATURES ENABLE RELIRQ");
- case 0xdd: return ("SETFEATURES DISABLE RELIRQ");
case 0x5e: return ("SETFEATURES ENABLE SRVIRQ");
+ case 0x62: return ("SETFEATURES LONG PHYS SECT ALIGN ERC");
+ case 0x63: return ("SETFEATURES DSN");
+ case 0x66: return ("SETFEATURES DISABLE DEFAULTS");
+ case 0x82: return ("SETFEATURES DISABLE WCACHE");
+ case 0x85: return ("SETFEATURES DISABLE APM");
+ case 0x86: return ("SETFEATURES DISABLE PUIS");
+ case 0x8b: return ("SETFEATURES DISABLE WRITE READ VERIFY");
+ case 0x8c: return ("SETFEATURES DISABLE DEVICE LIFE CONTROL");
+ case 0x90: return ("SETFEATURES DISABLE SATA FEATURE");
+ case 0xaa: return ("SETFEATURES ENABLE RCACHE");
+ case 0xC1: return ("SETFEATURES DISABLE FREEFALL CONTROL");
+ case 0xC3: return ("SETFEATURES SENSE DATA REPORTING");
+ case 0xC4: return ("SETFEATURES NCQ SENSE DATA RETURN");
+ case 0xCC: return ("SETFEATURES ENABLE DEFAULTS");
+ case 0xdd: return ("SETFEATURES DISABLE RELIRQ");
case 0xde: return ("SETFEATURES DISABLE SRVIRQ");
- }
- return "SETFEATURES";
+ }
+ return "SETFEATURES";
case 0xf1: return ("SECURITY_SET_PASSWORD");
case 0xf2: return ("SECURITY_UNLOCK");
case 0xf3: return ("SECURITY_ERASE_PREPARE");
@@ -463,7 +512,8 @@ ata_48bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint16_t features,
cmd == ATA_WRITE_DMA_QUEUED48 ||
cmd == ATA_WRITE_DMA_QUEUED_FUA48 ||
cmd == ATA_WRITE_STREAM_DMA48 ||
- cmd == ATA_DATA_SET_MANAGEMENT)
+ cmd == ATA_DATA_SET_MANAGEMENT ||
+ cmd == ATA_READ_LOG_DMA_EXT)
ataio->cmd.flags |= CAM_ATAIO_DMA;
ataio->cmd.command = cmd;
ataio->cmd.features = features;
@@ -534,6 +584,36 @@ ata_pm_write_cmd(struct ccb_ataio *ataio, int reg, int port, uint32_t val)
}
void
+ata_read_log(struct ccb_ataio *ataio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint32_t log_address, uint32_t page_number, uint16_t block_count,
+ uint32_t protocol, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint32_t timeout)
+{
+ uint64_t lba;
+
+ cam_fill_ataio(ataio,
+ /*retries*/ 1,
+ /*cbfcnp*/ cbfcnp,
+ /*flags*/ CAM_DIR_IN,
+ /*tag_action*/ 0,
+ /*data_ptr*/ data_ptr,
+ /*dxfer_len*/ dxfer_len,
+ /*timeout*/ timeout);
+
+ lba = (((uint64_t)page_number & 0xff00) << 32) |
+ ((page_number & 0x00ff) << 8) |
+ (log_address & 0xff);
+
+ ata_48bit_cmd(ataio,
+ /*cmd*/ (protocol & CAM_ATAIO_DMA) ? ATA_READ_LOG_DMA_EXT :
+ ATA_READ_LOG_EXT,
+ /*features*/ 0,
+ /*lba*/ lba,
+ /*sector_count*/ block_count);
+}
+
+void
ata_bswap(int8_t *buf, int len)
{
u_int16_t *ptr = (u_int16_t*)(buf + len);
@@ -893,3 +973,148 @@ semb_write_buffer(struct ccb_ataio *ataio,
length > 0 ? data_ptr[0] : 0, 0x80, length / 4);
}
+
+void
+ata_zac_mgmt_out(struct ccb_ataio *ataio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ int use_ncq, uint8_t zm_action, uint64_t zone_id,
+ uint8_t zone_flags, uint16_t sector_count, uint8_t *data_ptr,
+ uint32_t dxfer_len, uint32_t timeout)
+{
+ uint8_t command_out, ata_flags;
+ uint16_t features_out, sectors_out;
+ uint32_t auxiliary;
+
+ if (use_ncq == 0) {
+ command_out = ATA_ZAC_MANAGEMENT_OUT;
+ features_out = (zm_action & 0xf) | (zone_flags << 8);
+ if (dxfer_len == 0) {
+ ata_flags = 0;
+ sectors_out = 0;
+ } else {
+ ata_flags = CAM_ATAIO_DMA;
+ /* XXX KDM use sector count? */
+ sectors_out = ((dxfer_len >> 9) & 0xffff);
+ }
+ auxiliary = 0;
+ } else {
+ if (dxfer_len == 0) {
+ command_out = ATA_NCQ_NON_DATA;
+ features_out = ATA_NCQ_ZAC_MGMT_OUT;
+ sectors_out = 0;
+ } else {
+ command_out = ATA_SEND_FPDMA_QUEUED;
+
+ /* Note that we're defaulting to normal priority */
+ sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8;
+
+ /*
+ * For SEND FPDMA QUEUED, the transfer length is
+ * encoded in the FEATURE register, and 0 means
+ * that 65536 512 byte blocks are to be tranferred.
+ * In practice, it seems unlikely that we'll see
+ * a transfer that large.
+ */
+ if (dxfer_len == (65536 * 512)) {
+ features_out = 0;
+ } else {
+ /*
+ * Yes, the caller can theoretically send a
+ * transfer larger than we can handle.
+ * Anyone using this function needs enough
+ * knowledge to avoid doing that.
+ */
+ features_out = ((dxfer_len >> 9) & 0xffff);
+ }
+ }
+ auxiliary = (zm_action & 0xf) | (zone_flags << 8);
+
+ ata_flags = CAM_ATAIO_FPDMA;
+ }
+
+ cam_fill_ataio(ataio,
+ /*retries*/ retries,
+ /*cbfcnp*/ cbfcnp,
+ /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
+ /*tag_action*/ 0,
+ /*data_ptr*/ data_ptr,
+ /*dxfer_len*/ dxfer_len,
+ /*timeout*/ timeout);
+
+ ata_48bit_cmd(ataio,
+ /*cmd*/ command_out,
+ /*features*/ features_out,
+ /*lba*/ zone_id,
+ /*sector_count*/ sectors_out);
+
+ ataio->cmd.flags |= ata_flags;
+ if (auxiliary != 0) {
+ ataio->ata_flags |= ATA_FLAG_AUX;
+ ataio->aux = auxiliary;
+ }
+}
+
+void
+ata_zac_mgmt_in(struct ccb_ataio *ataio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ int use_ncq, uint8_t zm_action, uint64_t zone_id,
+ uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint32_t timeout)
+{
+ uint8_t command_out, ata_flags;
+ uint16_t features_out, sectors_out;
+ uint32_t auxiliary;
+
+ if (use_ncq == 0) {
+ command_out = ATA_ZAC_MANAGEMENT_IN;
+ /* XXX KDM put a macro here */
+ features_out = (zm_action & 0xf) | (zone_flags << 8);
+ ata_flags = CAM_ATAIO_DMA;
+ sectors_out = ((dxfer_len >> 9) & 0xffff);
+ auxiliary = 0;
+ } else {
+ command_out = ATA_RECV_FPDMA_QUEUED;
+ sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8;
+ auxiliary = (zm_action & 0xf) | (zone_flags << 8),
+ ata_flags = CAM_ATAIO_FPDMA;
+ /*
+ * For RECEIVE FPDMA QUEUED, the transfer length is
+ * encoded in the FEATURE register, and 0 means
+ * that 65536 512 byte blocks are to be tranferred.
+ * In practice, it is unlikely we will see a transfer that
+ * large.
+ */
+ if (dxfer_len == (65536 * 512)) {
+ features_out = 0;
+ } else {
+ /*
+ * Yes, the caller can theoretically request a
+ * transfer larger than we can handle.
+ * Anyone using this function needs enough
+ * knowledge to avoid doing that.
+ */
+ features_out = ((dxfer_len >> 9) & 0xffff);
+ }
+ }
+
+ cam_fill_ataio(ataio,
+ /*retries*/ retries,
+ /*cbfcnp*/ cbfcnp,
+ /*flags*/ CAM_DIR_IN,
+ /*tag_action*/ 0,
+ /*data_ptr*/ data_ptr,
+ /*dxfer_len*/ dxfer_len,
+ /*timeout*/ timeout);
+
+ ata_48bit_cmd(ataio,
+ /*cmd*/ command_out,
+ /*features*/ features_out,
+ /*lba*/ zone_id,
+ /*sector_count*/ sectors_out);
+
+ ataio->cmd.flags |= ata_flags;
+ if (auxiliary != 0) {
+ ataio->ata_flags |= ATA_FLAG_AUX;
+ ataio->aux = auxiliary;
+ }
+}
diff --git a/sys/cam/ata/ata_all.h b/sys/cam/ata/ata_all.h
index 433c61c..ea902d0 100644
--- a/sys/cam/ata/ata_all.h
+++ b/sys/cam/ata/ata_all.h
@@ -125,6 +125,11 @@ void ata_ncq_cmd(struct ccb_ataio *ataio, uint8_t cmd,
void ata_reset_cmd(struct ccb_ataio *ataio);
void ata_pm_read_cmd(struct ccb_ataio *ataio, int reg, int port);
void ata_pm_write_cmd(struct ccb_ataio *ataio, int reg, int port, uint32_t val);
+void ata_read_log(struct ccb_ataio *ataio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint32_t log_address, uint32_t page_number,
+ uint16_t block_count, uint32_t protocol,
+ uint8_t *data_ptr, uint32_t dxfer_len, uint32_t timeout);
void ata_bswap(int8_t *buf, int len);
void ata_btrim(int8_t *buf, int len);
@@ -167,4 +172,16 @@ void semb_write_buffer(struct ccb_ataio *ataio,
uint8_t tag_action, uint8_t *data_ptr, uint16_t param_list_length,
uint32_t timeout);
+void ata_zac_mgmt_out(struct ccb_ataio *ataio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ int use_ncq __unused, uint8_t zm_action, uint64_t zone_id,
+ uint8_t zone_flags, uint16_t sector_count, uint8_t *data_ptr,
+ uint32_t dxfer_len, uint32_t timeout);
+
+void ata_zac_mgmt_in(struct ccb_ataio *ataio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ int use_ncq __unused, uint8_t zm_action, uint64_t zone_id,
+ uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint32_t timeout);
+
#endif
diff --git a/sys/cam/ata/ata_da.c b/sys/cam/ata/ata_da.c
index 76b87f1..8a6e241 100644
--- a/sys/cam/ata/ata_da.c
+++ b/sys/cam/ata/ata_da.c
@@ -43,9 +43,11 @@ __FBSDID("$FreeBSD$");
#include <sys/devicestat.h>
#include <sys/eventhandler.h>
#include <sys/malloc.h>
+#include <sys/endian.h>
#include <sys/cons.h>
#include <sys/proc.h>
#include <sys/reboot.h>
+#include <sys/sbuf.h>
#include <geom/geom_disk.h>
#endif /* _KERNEL */
@@ -58,6 +60,8 @@ __FBSDID("$FreeBSD$");
#include <cam/cam_ccb.h>
#include <cam/cam_periph.h>
#include <cam/cam_xpt_periph.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
#include <cam/cam_sim.h>
#include <cam/cam_iosched.h>
@@ -74,25 +78,37 @@ extern int iosched_debug;
typedef enum {
ADA_STATE_RAHEAD,
ADA_STATE_WCACHE,
+ ADA_STATE_LOGDIR,
+ ADA_STATE_IDDIR,
+ ADA_STATE_SUP_CAP,
+ ADA_STATE_ZONE,
ADA_STATE_NORMAL
} ada_state;
typedef enum {
- ADA_FLAG_CAN_48BIT = 0x0002,
- ADA_FLAG_CAN_FLUSHCACHE = 0x0004,
- ADA_FLAG_CAN_NCQ = 0x0008,
- ADA_FLAG_CAN_DMA = 0x0010,
- ADA_FLAG_NEED_OTAG = 0x0020,
- ADA_FLAG_WAS_OTAG = 0x0040,
- ADA_FLAG_CAN_TRIM = 0x0080,
- ADA_FLAG_OPEN = 0x0100,
- ADA_FLAG_SCTX_INIT = 0x0200,
- ADA_FLAG_CAN_CFA = 0x0400,
- ADA_FLAG_CAN_POWERMGT = 0x0800,
- ADA_FLAG_CAN_DMA48 = 0x1000,
- ADA_FLAG_DIRTY = 0x2000,
- ADA_FLAG_CAN_NCQ_TRIM = 0x4000, /* CAN_TRIM also set */
- ADA_FLAG_PIM_CAN_NCQ_TRIM = 0x8000
+ ADA_FLAG_CAN_48BIT = 0x00000002,
+ ADA_FLAG_CAN_FLUSHCACHE = 0x00000004,
+ ADA_FLAG_CAN_NCQ = 0x00000008,
+ ADA_FLAG_CAN_DMA = 0x00000010,
+ ADA_FLAG_NEED_OTAG = 0x00000020,
+ ADA_FLAG_WAS_OTAG = 0x00000040,
+ ADA_FLAG_CAN_TRIM = 0x00000080,
+ ADA_FLAG_OPEN = 0x00000100,
+ ADA_FLAG_SCTX_INIT = 0x00000200,
+ ADA_FLAG_CAN_CFA = 0x00000400,
+ ADA_FLAG_CAN_POWERMGT = 0x00000800,
+ ADA_FLAG_CAN_DMA48 = 0x00001000,
+ ADA_FLAG_CAN_LOG = 0x00002000,
+ ADA_FLAG_CAN_IDLOG = 0x00004000,
+ ADA_FLAG_CAN_SUPCAP = 0x00008000,
+ ADA_FLAG_CAN_ZONE = 0x00010000,
+ ADA_FLAG_CAN_WCACHE = 0x00020000,
+ ADA_FLAG_CAN_RAHEAD = 0x00040000,
+ ADA_FLAG_PROBED = 0x00080000,
+ ADA_FLAG_ANNOUNCED = 0x00100000,
+ ADA_FLAG_DIRTY = 0x00200000,
+ ADA_FLAG_CAN_NCQ_TRIM = 0x00400000, /* CAN_TRIM also set */
+ ADA_FLAG_PIM_ATA_EXT = 0x00800000
} ada_flags;
typedef enum {
@@ -112,9 +128,52 @@ typedef enum {
ADA_CCB_BUFFER_IO = 0x03,
ADA_CCB_DUMP = 0x05,
ADA_CCB_TRIM = 0x06,
+ ADA_CCB_LOGDIR = 0x07,
+ ADA_CCB_IDDIR = 0x08,
+ ADA_CCB_SUP_CAP = 0x09,
+ ADA_CCB_ZONE = 0x0a,
ADA_CCB_TYPE_MASK = 0x0F,
} ada_ccb_state;
+typedef enum {
+ ADA_ZONE_NONE = 0x00,
+ ADA_ZONE_DRIVE_MANAGED = 0x01,
+ ADA_ZONE_HOST_AWARE = 0x02,
+ ADA_ZONE_HOST_MANAGED = 0x03
+} ada_zone_mode;
+
+typedef enum {
+ ADA_ZONE_FLAG_RZ_SUP = 0x0001,
+ ADA_ZONE_FLAG_OPEN_SUP = 0x0002,
+ ADA_ZONE_FLAG_CLOSE_SUP = 0x0004,
+ ADA_ZONE_FLAG_FINISH_SUP = 0x0008,
+ ADA_ZONE_FLAG_RWP_SUP = 0x0010,
+ ADA_ZONE_FLAG_SUP_MASK = (ADA_ZONE_FLAG_RZ_SUP |
+ ADA_ZONE_FLAG_OPEN_SUP |
+ ADA_ZONE_FLAG_CLOSE_SUP |
+ ADA_ZONE_FLAG_FINISH_SUP |
+ ADA_ZONE_FLAG_RWP_SUP),
+ ADA_ZONE_FLAG_URSWRZ = 0x0020,
+ ADA_ZONE_FLAG_OPT_SEQ_SET = 0x0040,
+ ADA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080,
+ ADA_ZONE_FLAG_MAX_SEQ_SET = 0x0100,
+ ADA_ZONE_FLAG_SET_MASK = (ADA_ZONE_FLAG_OPT_SEQ_SET |
+ ADA_ZONE_FLAG_OPT_NONSEQ_SET |
+ ADA_ZONE_FLAG_MAX_SEQ_SET)
+} ada_zone_flags;
+
+static struct ada_zone_desc {
+ ada_zone_flags value;
+ const char *desc;
+} ada_zone_desc_table[] = {
+ {ADA_ZONE_FLAG_RZ_SUP, "Report Zones" },
+ {ADA_ZONE_FLAG_OPEN_SUP, "Open" },
+ {ADA_ZONE_FLAG_CLOSE_SUP, "Close" },
+ {ADA_ZONE_FLAG_FINISH_SUP, "Finish" },
+ {ADA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
+};
+
+
/* Offsets into our private area for storing information */
#define ccb_state ppriv_field0
#define ccb_bp ppriv_ptr1
@@ -157,6 +216,15 @@ struct ada_softc {
int refcount; /* Active xpt_action() calls */
ada_state state;
ada_flags flags;
+ ada_zone_mode zone_mode;
+ ada_zone_flags zone_flags;
+ struct ata_gp_log_dir ata_logdir;
+ int valid_logdir_len;
+ struct ata_identify_log_pages ata_iddir;
+ int valid_iddir_len;
+ uint64_t optimal_seq_zones;
+ uint64_t optimal_nonseq_zones;
+ uint64_t max_seq_zones;
ada_quirks quirks;
ada_delete_methods delete_method;
int trim_max_ranges;
@@ -624,13 +692,28 @@ static struct ada_quirk_entry ada_quirk_table[] =
static disk_strategy_t adastrategy;
static dumper_t adadump;
static periph_init_t adainit;
+static void adadiskgonecb(struct disk *dp);
+static periph_oninv_t adaoninvalidate;
+static periph_dtor_t adacleanup;
static void adaasync(void *callback_arg, u_int32_t code,
struct cam_path *path, void *arg);
+static int adazonemodesysctl(SYSCTL_HANDLER_ARGS);
+static int adazonesupsysctl(SYSCTL_HANDLER_ARGS);
static void adasysctlinit(void *context, int pending);
+static int adagetattr(struct bio *bp);
+static void adasetflags(struct ada_softc *softc,
+ struct ccb_getdev *cgd);
static periph_ctor_t adaregister;
-static periph_dtor_t adacleanup;
+static void ada_dsmtrim(struct ada_softc *softc, struct bio *bp,
+ struct ccb_ataio *ataio);
+static void ada_cfaerase(struct ada_softc *softc, struct bio *bp,
+ struct ccb_ataio *ataio);
+static int ada_zone_bio_to_ata(int disk_zone_cmd);
+static int ada_zone_cmd(struct cam_periph *periph, union ccb *ccb,
+ struct bio *bp, int *queue_ccb);
static periph_start_t adastart;
-static periph_oninv_t adaoninvalidate;
+static void adaprobedone(struct cam_periph *periph, union ccb *ccb);
+static void adazonedone(struct cam_periph *periph, union ccb *ccb);
static void adadone(struct cam_periph *periph,
union ccb *done_ccb);
static int adaerror(union ccb *ccb, u_int32_t cam_flags,
@@ -738,6 +821,8 @@ static int adadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
PERIPHDRIVER_DECLARE(ada, adadriver);
+static MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
+
static int
adaopen(struct disk *dp)
{
@@ -860,6 +945,14 @@ adastrategy(struct bio *bp)
biofinish(bp, NULL, ENXIO);
return;
}
+
+ /*
+ * Zone commands must be ordered, because they can depend on the
+ * effects of previously issued commands, and they may affect
+ * commands after them.
+ */
+ if (bp->bio_cmd == BIO_ZONE)
+ bp->bio_flags |= BIO_ORDERED;
/*
* Place it in the queue of disk activities for this disk
@@ -1129,45 +1222,10 @@ adaasync(void *callback_arg, u_int32_t code,
cgd.ccb_h.func_code = XPT_GDEV_TYPE;
xpt_action((union ccb *)&cgd);
- if ((cgd.ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
- (cgd.inq_flags & SID_DMA))
- softc->flags |= ADA_FLAG_CAN_DMA;
- else
- softc->flags &= ~ADA_FLAG_CAN_DMA;
- if (cgd.ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
- softc->flags |= ADA_FLAG_CAN_48BIT;
- if (cgd.inq_flags & SID_DMA48)
- softc->flags |= ADA_FLAG_CAN_DMA48;
- else
- softc->flags &= ~ADA_FLAG_CAN_DMA48;
- } else
- softc->flags &= ~(ADA_FLAG_CAN_48BIT |
- ADA_FLAG_CAN_DMA48);
- if ((cgd.ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
- (cgd.inq_flags & SID_DMA) && (cgd.inq_flags & SID_CmdQue))
- softc->flags |= ADA_FLAG_CAN_NCQ;
- else
- softc->flags &= ~ADA_FLAG_CAN_NCQ;
-
- if ((cgd.ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
- (cgd.inq_flags & SID_DMA)) {
- softc->flags |= ADA_FLAG_CAN_TRIM;
- /*
- * If we can do RCVSND_FPDMA_QUEUED commands, we may be able to do
- * NCQ trims, if we support trims at all. We also need support from
- * the sim do do things properly. Perhaps we should look at log 13
- * dword 0 bit 0 and dword 1 bit 0 are set too...
- */
- if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 &&
- (softc->flags & ADA_FLAG_PIM_CAN_NCQ_TRIM) != 0 &&
- (cgd.ident_data.satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 &&
- (softc->flags & ADA_FLAG_CAN_TRIM) != 0)
- softc->flags |= ADA_FLAG_CAN_NCQ_TRIM;
- else
- softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM;
- } else
- softc->flags &= ~(ADA_FLAG_CAN_TRIM | ADA_FLAG_CAN_NCQ_TRIM);
- adasetdeletemethod(softc);
+ /*
+ * Set/clear support flags based on the new Identify data.
+ */
+ adasetflags(softc, &cgd);
cam_periph_async(periph, code, path, arg);
break;
@@ -1196,12 +1254,12 @@ adaasync(void *callback_arg, u_int32_t code,
xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
cgd.ccb_h.func_code = XPT_GDEV_TYPE;
xpt_action((union ccb *)&cgd);
- if (ADA_RA >= 0 &&
- cgd.ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD)
+ if (ADA_RA >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD)
softc->state = ADA_STATE_RAHEAD;
- else if (ADA_WC >= 0 &&
- cgd.ident_data.support.command1 & ATA_SUPPORT_WRITECACHE)
+ else if (ADA_WC >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD)
softc->state = ADA_STATE_WCACHE;
+ else if (softc->flags & ADA_FLAG_CAN_LOG)
+ softc->state = ADA_STATE_LOGDIR;
else
break;
if (cam_periph_acquire(periph) != CAM_REQ_CMP)
@@ -1215,6 +1273,73 @@ adaasync(void *callback_arg, u_int32_t code,
}
}
+static int
+adazonemodesysctl(SYSCTL_HANDLER_ARGS)
+{
+ char tmpbuf[40];
+ struct ada_softc *softc;
+ int error;
+
+ softc = (struct ada_softc *)arg1;
+
+ switch (softc->zone_mode) {
+ case ADA_ZONE_DRIVE_MANAGED:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
+ break;
+ case ADA_ZONE_HOST_AWARE:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
+ break;
+ case ADA_ZONE_HOST_MANAGED:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
+ break;
+ case ADA_ZONE_NONE:
+ default:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
+ break;
+ }
+
+ error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
+
+ return (error);
+}
+
+static int
+adazonesupsysctl(SYSCTL_HANDLER_ARGS)
+{
+ char tmpbuf[180];
+ struct ada_softc *softc;
+ struct sbuf sb;
+ int error, first;
+ unsigned int i;
+
+ softc = (struct ada_softc *)arg1;
+
+ error = 0;
+ first = 1;
+ sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0);
+
+ for (i = 0; i < sizeof(ada_zone_desc_table) /
+ sizeof(ada_zone_desc_table[0]); i++) {
+ if (softc->zone_flags & ada_zone_desc_table[i].value) {
+ if (first == 0)
+ sbuf_printf(&sb, ", ");
+ else
+ first = 0;
+ sbuf_cat(&sb, ada_zone_desc_table[i].desc);
+ }
+ }
+
+ if (first == 1)
+ sbuf_printf(&sb, "None");
+
+ sbuf_finish(&sb);
+
+ error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
+
+ return (error);
+}
+
+
static void
adasysctlinit(void *context, int pending)
{
@@ -1231,7 +1356,7 @@ adasysctlinit(void *context, int pending)
}
softc = (struct ada_softc *)periph->softc;
- snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
+ snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d",periph->unit_number);
snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
sysctl_ctx_init(&softc->sysctl_ctx);
@@ -1261,6 +1386,29 @@ adasysctlinit(void *context, int pending)
SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
OID_AUTO, "rotating", CTLFLAG_RD | CTLFLAG_MPSAFE,
&softc->rotating, 0, "Rotating media");
+ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD,
+ softc, 0, adazonemodesysctl, "A",
+ "Zone Mode");
+ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD,
+ softc, 0, adazonesupsysctl, "A",
+ "Zone Support");
+ SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
+ SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
+ "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
+ "Optimal Number of Open Sequential Write Preferred Zones");
+ SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
+ SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
+ "optimal_nonseq_zones", CTLFLAG_RD,
+ &softc->optimal_nonseq_zones,
+ "Optimal Number of Non-Sequentially Written Sequential Write "
+ "Preferred Zones");
+ SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
+ SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
+ "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
+ "Maximum Number of Open Sequential Write Required Zones");
+
#ifdef ADA_TEST_FAILURE
/*
* Add a 'door bell' sysctl which allows one to set it from userland
@@ -1361,6 +1509,103 @@ adadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
return (EINVAL);
}
+static void
+adasetflags(struct ada_softc *softc, struct ccb_getdev *cgd)
+{
+ if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
+ (cgd->inq_flags & SID_DMA))
+ softc->flags |= ADA_FLAG_CAN_DMA;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_DMA;
+
+ if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
+ softc->flags |= ADA_FLAG_CAN_48BIT;
+ if (cgd->inq_flags & SID_DMA48)
+ softc->flags |= ADA_FLAG_CAN_DMA48;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_DMA48;
+ } else
+ softc->flags &= ~(ADA_FLAG_CAN_48BIT | ADA_FLAG_CAN_DMA48);
+
+ if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
+ softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_FLUSHCACHE;
+
+ if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
+ softc->flags |= ADA_FLAG_CAN_POWERMGT;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_POWERMGT;
+
+ if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
+ (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue))
+ softc->flags |= ADA_FLAG_CAN_NCQ;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_NCQ;
+
+ if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
+ (cgd->inq_flags & SID_DMA)) {
+ softc->flags |= ADA_FLAG_CAN_TRIM;
+ softc->trim_max_ranges = TRIM_MAX_RANGES;
+ if (cgd->ident_data.max_dsm_blocks != 0) {
+ softc->trim_max_ranges =
+ min(cgd->ident_data.max_dsm_blocks *
+ ATA_DSM_BLK_RANGES, softc->trim_max_ranges);
+ }
+ /*
+ * If we can do RCVSND_FPDMA_QUEUED commands, we may be able
+ * to do NCQ trims, if we support trims at all. We also need
+ * support from the SIM to do things properly. Perhaps we
+ * should look at log 13 dword 0 bit 0 and dword 1 bit 0 are
+ * set too...
+ */
+ if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 &&
+ (softc->flags & ADA_FLAG_PIM_ATA_EXT) != 0 &&
+ (cgd->ident_data.satacapabilities2 &
+ ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 &&
+ (softc->flags & ADA_FLAG_CAN_TRIM) != 0)
+ softc->flags |= ADA_FLAG_CAN_NCQ_TRIM;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM;
+ } else
+ softc->flags &= ~(ADA_FLAG_CAN_TRIM | ADA_FLAG_CAN_NCQ_TRIM);
+
+ if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
+ softc->flags |= ADA_FLAG_CAN_CFA;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_CFA;
+
+ /*
+ * Now that we've set the appropriate flags, setup the delete
+ * method.
+ */
+ adasetdeletemethod(softc);
+
+ if (cgd->ident_data.support.extension & ATA_SUPPORT_GENLOG)
+ softc->flags |= ADA_FLAG_CAN_LOG;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_LOG;
+
+ if ((cgd->ident_data.support3 & ATA_SUPPORT_ZONE_MASK) ==
+ ATA_SUPPORT_ZONE_HOST_AWARE)
+ softc->zone_mode = ADA_ZONE_HOST_AWARE;
+ else if ((cgd->ident_data.support3 & ATA_SUPPORT_ZONE_MASK) ==
+ ATA_SUPPORT_ZONE_DEV_MANAGED)
+ softc->zone_mode = ADA_ZONE_DRIVE_MANAGED;
+ else
+ softc->zone_mode = ADA_ZONE_NONE;
+
+ if (cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD)
+ softc->flags |= ADA_FLAG_CAN_RAHEAD;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_RAHEAD;
+
+ if (cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE)
+ softc->flags |= ADA_FLAG_CAN_WCACHE;
+ else
+ softc->flags &= ~ADA_FLAG_CAN_WCACHE;
+}
+
static cam_status
adaregister(struct cam_periph *periph, void *arg)
{
@@ -1394,35 +1639,10 @@ adaregister(struct cam_periph *periph, void *arg)
return(CAM_REQ_CMP_ERR);
}
- if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
- (cgd->inq_flags & SID_DMA))
- softc->flags |= ADA_FLAG_CAN_DMA;
- if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
- softc->flags |= ADA_FLAG_CAN_48BIT;
- if (cgd->inq_flags & SID_DMA48)
- softc->flags |= ADA_FLAG_CAN_DMA48;
- }
- if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
- softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
- if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
- softc->flags |= ADA_FLAG_CAN_POWERMGT;
- if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
- (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue))
- softc->flags |= ADA_FLAG_CAN_NCQ;
- if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
- (cgd->inq_flags & SID_DMA)) {
- softc->flags |= ADA_FLAG_CAN_TRIM;
- softc->trim_max_ranges = TRIM_MAX_RANGES;
- if (cgd->ident_data.max_dsm_blocks != 0) {
- softc->trim_max_ranges =
- min(cgd->ident_data.max_dsm_blocks *
- ATA_DSM_BLK_RANGES, softc->trim_max_ranges);
- }
- }
- if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
- softc->flags |= ADA_FLAG_CAN_CFA;
-
- adasetdeletemethod(softc);
+ /*
+ * Set support flags based on the Identify data.
+ */
+ adasetflags(softc, cgd);
periph->softc = softc;
@@ -1498,7 +1718,7 @@ adaregister(struct cam_periph *periph, void *arg)
maxio = min(maxio, 256 * softc->params.secsize);
softc->disk->d_maxsize = maxio;
softc->disk->d_unit = periph->unit_number;
- softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION;
+ softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
if (softc->flags & ADA_FLAG_CAN_TRIM) {
@@ -1516,19 +1736,6 @@ adaregister(struct cam_periph *periph, void *arg)
softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
softc->unmappedio = 1;
}
- /*
- * If we can do RCVSND_FPDMA_QUEUED commands, we may be able to do
- * NCQ trims, if we support trims at all. We also need support from
- * the sim do do things properly. Perhaps we should look at log 13
- * dword 0 bit 0 and dword 1 bit 0 are set too...
- */
- if (cpi.hba_misc & PIM_ATA_EXT)
- softc->flags |= ADA_FLAG_PIM_CAN_NCQ_TRIM;
- if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 &&
- (softc->flags & ADA_FLAG_PIM_CAN_NCQ_TRIM) != 0 &&
- (cgd->ident_data.satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 &&
- (softc->flags & ADA_FLAG_CAN_TRIM) != 0)
- softc->flags |= ADA_FLAG_CAN_NCQ_TRIM;
strlcpy(softc->disk->d_descr, cgd->ident_data.model,
MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model)));
strlcpy(softc->disk->d_ident, cgd->ident_data.serial,
@@ -1555,7 +1762,6 @@ adaregister(struct cam_periph *periph, void *arg)
softc->disk->d_fwsectors = softc->params.secs_per_track;
softc->disk->d_fwheads = softc->params.heads;
ata_disk_firmware_geom_adjust(softc->disk);
- adasetdeletemethod(softc);
/*
* Acquire a reference to the periph before we register with GEOM.
@@ -1570,7 +1776,6 @@ adaregister(struct cam_periph *periph, void *arg)
}
disk_create(softc->disk, DISK_VERSION);
cam_periph_lock(periph);
- cam_periph_unhold(periph);
dp = &softc->params;
snprintf(announce_buf, sizeof(announce_buf),
@@ -1608,20 +1813,23 @@ adaregister(struct cam_periph *periph, void *arg)
(ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
adasendorderedtag, softc);
- if (ADA_RA >= 0 &&
- cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) {
+ if (ADA_RA >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD) {
softc->state = ADA_STATE_RAHEAD;
- } else if (ADA_WC >= 0 &&
- cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
+ } else if (ADA_WC >= 0 && softc->flags & ADA_FLAG_CAN_WCACHE) {
softc->state = ADA_STATE_WCACHE;
+ } else if (softc->flags & ADA_FLAG_CAN_LOG) {
+ softc->state = ADA_STATE_LOGDIR;
} else {
- softc->state = ADA_STATE_NORMAL;
+ /*
+ * Nothing to probe, so we can just transition to the
+ * normal state.
+ */
+ adaprobedone(periph, NULL);
return(CAM_REQ_CMP);
}
- if (cam_periph_acquire(periph) != CAM_REQ_CMP)
- softc->state = ADA_STATE_NORMAL;
- else
- xpt_schedule(periph, CAM_PRIORITY_DEV);
+
+ xpt_schedule(periph, CAM_PRIORITY_DEV);
+
return(CAM_REQ_CMP);
}
@@ -1754,6 +1962,209 @@ ada_cfaerase(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
}
+static int
+ada_zone_bio_to_ata(int disk_zone_cmd)
+{
+ switch (disk_zone_cmd) {
+ case DISK_ZONE_OPEN:
+ return ATA_ZM_OPEN_ZONE;
+ case DISK_ZONE_CLOSE:
+ return ATA_ZM_CLOSE_ZONE;
+ case DISK_ZONE_FINISH:
+ return ATA_ZM_FINISH_ZONE;
+ case DISK_ZONE_RWP:
+ return ATA_ZM_RWP;
+ }
+
+ return -1;
+}
+
+static int
+ada_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
+ int *queue_ccb)
+{
+ struct ada_softc *softc;
+ int error;
+
+ error = 0;
+
+ if (bp->bio_cmd != BIO_ZONE) {
+ error = EINVAL;
+ goto bailout;
+ }
+
+ softc = periph->softc;
+
+ switch (bp->bio_zone.zone_cmd) {
+ case DISK_ZONE_OPEN:
+ case DISK_ZONE_CLOSE:
+ case DISK_ZONE_FINISH:
+ case DISK_ZONE_RWP: {
+ int zone_flags;
+ int zone_sa;
+ uint64_t lba;
+
+ zone_sa = ada_zone_bio_to_ata(bp->bio_zone.zone_cmd);
+ if (zone_sa == -1) {
+ xpt_print(periph->path, "Cannot translate zone "
+ "cmd %#x to ATA\n", bp->bio_zone.zone_cmd);
+ error = EINVAL;
+ goto bailout;
+ }
+
+ zone_flags = 0;
+ lba = bp->bio_zone.zone_params.rwp.id;
+
+ if (bp->bio_zone.zone_params.rwp.flags &
+ DISK_ZONE_RWP_FLAG_ALL)
+ zone_flags |= ZBC_OUT_ALL;
+
+ ata_zac_mgmt_out(&ccb->ataio,
+ /*retries*/ ada_retry_count,
+ /*cbfcnp*/ adadone,
+ /*use_ncq*/ (softc->flags &
+ ADA_FLAG_PIM_ATA_EXT) ? 1 : 0,
+ /*zm_action*/ zone_sa,
+ /*zone_id*/ lba,
+ /*zone_flags*/ zone_flags,
+ /*sector_count*/ 0,
+ /*data_ptr*/ NULL,
+ /*dxfer_len*/ 0,
+ /*timeout*/ ada_default_timeout * 1000);
+ *queue_ccb = 1;
+
+ break;
+ }
+ case DISK_ZONE_REPORT_ZONES: {
+ uint8_t *rz_ptr;
+ uint32_t num_entries, alloc_size;
+ struct disk_zone_report *rep;
+
+ rep = &bp->bio_zone.zone_params.report;
+
+ num_entries = rep->entries_allocated;
+ if (num_entries == 0) {
+ xpt_print(periph->path, "No entries allocated for "
+ "Report Zones request\n");
+ error = EINVAL;
+ goto bailout;
+ }
+ alloc_size = sizeof(struct scsi_report_zones_hdr) +
+ (sizeof(struct scsi_report_zones_desc) * num_entries);
+ alloc_size = min(alloc_size, softc->disk->d_maxsize);
+ rz_ptr = malloc(alloc_size, M_ATADA, M_NOWAIT | M_ZERO);
+ if (rz_ptr == NULL) {
+ xpt_print(periph->path, "Unable to allocate memory "
+ "for Report Zones request\n");
+ error = ENOMEM;
+ goto bailout;
+ }
+
+ ata_zac_mgmt_in(&ccb->ataio,
+ /*retries*/ ada_retry_count,
+ /*cbcfnp*/ adadone,
+ /*use_ncq*/ (softc->flags &
+ ADA_FLAG_PIM_ATA_EXT) ? 1 : 0,
+ /*zm_action*/ ATA_ZM_REPORT_ZONES,
+ /*zone_id*/ rep->starting_id,
+ /*zone_flags*/ rep->rep_options,
+ /*data_ptr*/ rz_ptr,
+ /*dxfer_len*/ alloc_size,
+ /*timeout*/ ada_default_timeout * 1000);
+
+ /*
+ * For BIO_ZONE, this isn't normally needed. However, it
+ * is used by devstat_end_transaction_bio() to determine
+ * how much data was transferred.
+ */
+ /*
+ * XXX KDM we have a problem. But I'm not sure how to fix
+ * it. devstat uses bio_bcount - bio_resid to calculate
+ * the amount of data transferred. The GEOM disk code
+ * uses bio_length - bio_resid to calculate the amount of
+ * data in bio_completed. We have different structure
+ * sizes above and below the ada(4) driver. So, if we
+ * use the sizes above, the amount transferred won't be
+ * quite accurate for devstat. If we use different sizes
+ * for bio_bcount and bio_length (above and below
+ * respectively), then the residual needs to match one or
+ * the other. Everything is calculated after the bio
+ * leaves the driver, so changing the values around isn't
+ * really an option. For now, just set the count to the
+ * passed in length. This means that the calculations
+ * above (e.g. bio_completed) will be correct, but the
+ * amount of data reported to devstat will be slightly
+ * under or overstated.
+ */
+ bp->bio_bcount = bp->bio_length;
+
+ *queue_ccb = 1;
+
+ break;
+ }
+ case DISK_ZONE_GET_PARAMS: {
+ struct disk_zone_disk_params *params;
+
+ params = &bp->bio_zone.zone_params.disk_params;
+ bzero(params, sizeof(*params));
+
+ switch (softc->zone_mode) {
+ case ADA_ZONE_DRIVE_MANAGED:
+ params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
+ break;
+ case ADA_ZONE_HOST_AWARE:
+ params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
+ break;
+ case ADA_ZONE_HOST_MANAGED:
+ params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
+ break;
+ default:
+ case ADA_ZONE_NONE:
+ params->zone_mode = DISK_ZONE_MODE_NONE;
+ break;
+ }
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_URSWRZ)
+ params->flags |= DISK_ZONE_DISK_URSWRZ;
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_OPT_SEQ_SET) {
+ params->optimal_seq_zones = softc->optimal_seq_zones;
+ params->flags |= DISK_ZONE_OPT_SEQ_SET;
+ }
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_OPT_NONSEQ_SET) {
+ params->optimal_nonseq_zones =
+ softc->optimal_nonseq_zones;
+ params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
+ }
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_MAX_SEQ_SET) {
+ params->max_seq_zones = softc->max_seq_zones;
+ params->flags |= DISK_ZONE_MAX_SEQ_SET;
+ }
+ if (softc->zone_flags & ADA_ZONE_FLAG_RZ_SUP)
+ params->flags |= DISK_ZONE_RZ_SUP;
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_OPEN_SUP)
+ params->flags |= DISK_ZONE_OPEN_SUP;
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_CLOSE_SUP)
+ params->flags |= DISK_ZONE_CLOSE_SUP;
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_FINISH_SUP)
+ params->flags |= DISK_ZONE_FINISH_SUP;
+
+ if (softc->zone_flags & ADA_ZONE_FLAG_RWP_SUP)
+ params->flags |= DISK_ZONE_RWP_SUP;
+ break;
+ }
+ default:
+ break;
+ }
+bailout:
+ return (error);
+}
+
static void
adastart(struct cam_periph *periph, union ccb *start_ccb)
{
@@ -1941,6 +2352,20 @@ adastart(struct cam_periph *periph, union ccb *start_ccb)
else
ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
break;
+ case BIO_ZONE: {
+ int error, queue_ccb;
+
+ queue_ccb = 0;
+
+ error = ada_zone_cmd(periph, start_ccb, bp, &queue_ccb);
+ if ((error != 0)
+ || (queue_ccb == 0)) {
+ biofinish(bp, NULL, error);
+ xpt_release_ccb(start_ccb);
+ return;
+ }
+ break;
+ }
}
start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
start_ccb->ccb_h.flags |= CAM_UNLOCKED;
@@ -1982,21 +2407,306 @@ out:
xpt_action(start_ccb);
break;
}
+ case ADA_STATE_LOGDIR:
+ {
+ struct ata_gp_log_dir *log_dir;
+
+ if ((softc->flags & ADA_FLAG_CAN_LOG) == 0) {
+ adaprobedone(periph, start_ccb);
+ break;
+ }
+
+ log_dir = malloc(sizeof(*log_dir), M_ATADA, M_NOWAIT|M_ZERO);
+ if (log_dir == NULL) {
+ xpt_print(periph->path, "Couldn't malloc log_dir "
+ "data\n");
+ softc->state = ADA_STATE_NORMAL;
+ xpt_release_ccb(start_ccb);
+ break;
+ }
+
+
+ ata_read_log(ataio,
+ /*retries*/1,
+ /*cbfcnp*/adadone,
+ /*log_address*/ ATA_LOG_DIRECTORY,
+ /*page_number*/ 0,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ?
+ CAM_ATAIO_DMA : 0,
+ /*data_ptr*/ (uint8_t *)log_dir,
+ /*dxfer_len*/sizeof(*log_dir),
+ /*timeout*/ada_default_timeout*1000);
+
+ start_ccb->ccb_h.ccb_state = ADA_CCB_LOGDIR;
+ xpt_action(start_ccb);
+ break;
+ }
+ case ADA_STATE_IDDIR:
+ {
+ struct ata_identify_log_pages *id_dir;
+
+ id_dir = malloc(sizeof(*id_dir), M_ATADA, M_NOWAIT | M_ZERO);
+ if (id_dir == NULL) {
+ xpt_print(periph->path, "Couldn't malloc id_dir "
+ "data\n");
+ adaprobedone(periph, start_ccb);
+ break;
+ }
+
+ ata_read_log(ataio,
+ /*retries*/1,
+ /*cbfcnp*/adadone,
+ /*log_address*/ ATA_IDENTIFY_DATA_LOG,
+ /*page_number*/ ATA_IDL_PAGE_LIST,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ?
+ CAM_ATAIO_DMA : 0,
+ /*data_ptr*/ (uint8_t *)id_dir,
+ /*dxfer_len*/ sizeof(*id_dir),
+ /*timeout*/ada_default_timeout*1000);
+
+ start_ccb->ccb_h.ccb_state = ADA_CCB_IDDIR;
+ xpt_action(start_ccb);
+ break;
+ }
+ case ADA_STATE_SUP_CAP:
+ {
+ struct ata_identify_log_sup_cap *sup_cap;
+
+ sup_cap = malloc(sizeof(*sup_cap), M_ATADA, M_NOWAIT|M_ZERO);
+ if (sup_cap == NULL) {
+ xpt_print(periph->path, "Couldn't malloc sup_cap "
+ "data\n");
+ adaprobedone(periph, start_ccb);
+ break;
+ }
+
+ ata_read_log(ataio,
+ /*retries*/1,
+ /*cbfcnp*/adadone,
+ /*log_address*/ ATA_IDENTIFY_DATA_LOG,
+ /*page_number*/ ATA_IDL_SUP_CAP,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ?
+ CAM_ATAIO_DMA : 0,
+ /*data_ptr*/ (uint8_t *)sup_cap,
+ /*dxfer_len*/ sizeof(*sup_cap),
+ /*timeout*/ada_default_timeout*1000);
+
+ start_ccb->ccb_h.ccb_state = ADA_CCB_SUP_CAP;
+ xpt_action(start_ccb);
+ break;
+ }
+ case ADA_STATE_ZONE:
+ {
+ struct ata_zoned_info_log *ata_zone;
+
+ ata_zone = malloc(sizeof(*ata_zone), M_ATADA, M_NOWAIT|M_ZERO);
+ if (ata_zone == NULL) {
+ xpt_print(periph->path, "Couldn't malloc ata_zone "
+ "data\n");
+ adaprobedone(periph, start_ccb);
+ break;
+ }
+
+ ata_read_log(ataio,
+ /*retries*/1,
+ /*cbfcnp*/adadone,
+ /*log_address*/ ATA_IDENTIFY_DATA_LOG,
+ /*page_number*/ ATA_IDL_ZDI,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ?
+ CAM_ATAIO_DMA : 0,
+ /*data_ptr*/ (uint8_t *)ata_zone,
+ /*dxfer_len*/ sizeof(*ata_zone),
+ /*timeout*/ada_default_timeout*1000);
+
+ start_ccb->ccb_h.ccb_state = ADA_CCB_ZONE;
+ xpt_action(start_ccb);
+ break;
+ }
+ }
+}
+
+static void
+adaprobedone(struct cam_periph *periph, union ccb *ccb)
+{
+ struct ada_softc *softc;
+
+ softc = (struct ada_softc *)periph->softc;
+
+ if (ccb != NULL)
+ xpt_release_ccb(ccb);
+
+ softc->state = ADA_STATE_NORMAL;
+ softc->flags |= ADA_FLAG_PROBED;
+ adaschedule(periph);
+ if ((softc->flags & ADA_FLAG_ANNOUNCED) == 0) {
+ softc->flags |= ADA_FLAG_ANNOUNCED;
+ cam_periph_unhold(periph);
+ } else {
+ cam_periph_release_locked(periph);
+ }
+}
+
+static void
+adazonedone(struct cam_periph *periph, union ccb *ccb)
+{
+ struct ada_softc *softc;
+ struct bio *bp;
+
+ softc = periph->softc;
+ bp = (struct bio *)ccb->ccb_h.ccb_bp;
+
+ switch (bp->bio_zone.zone_cmd) {
+ case DISK_ZONE_OPEN:
+ case DISK_ZONE_CLOSE:
+ case DISK_ZONE_FINISH:
+ case DISK_ZONE_RWP:
+ break;
+ case DISK_ZONE_REPORT_ZONES: {
+ uint32_t avail_len;
+ struct disk_zone_report *rep;
+ struct scsi_report_zones_hdr *hdr;
+ struct scsi_report_zones_desc *desc;
+ struct disk_zone_rep_entry *entry;
+ uint32_t num_alloced, hdr_len, num_avail;
+ uint32_t num_to_fill, i;
+
+ rep = &bp->bio_zone.zone_params.report;
+ avail_len = ccb->ataio.dxfer_len - ccb->ataio.resid;
+ /*
+ * Note that bio_resid isn't normally used for zone
+ * commands, but it is used by devstat_end_transaction_bio()
+ * to determine how much data was transferred. Because
+ * the size of the SCSI/ATA data structures is different
+ * than the size of the BIO interface structures, the
+ * amount of data actually transferred from the drive will
+ * be different than the amount of data transferred to
+ * the user.
+ */
+ num_alloced = rep->entries_allocated;
+ hdr = (struct scsi_report_zones_hdr *)ccb->ataio.data_ptr;
+ if (avail_len < sizeof(*hdr)) {
+ /*
+ * Is there a better error than EIO here? We asked
+ * for at least the header, and we got less than
+ * that.
+ */
+ bp->bio_error = EIO;
+ bp->bio_flags |= BIO_ERROR;
+ bp->bio_resid = bp->bio_bcount;
+ break;
+ }
+
+ hdr_len = le32dec(hdr->length);
+ if (hdr_len > 0)
+ rep->entries_available = hdr_len / sizeof(*desc);
+ else
+ rep->entries_available = 0;
+ /*
+ * NOTE: using the same values for the BIO version of the
+ * same field as the SCSI/ATA values. This means we could
+ * get some additional values that aren't defined in bio.h
+ * if more values of the same field are defined later.
+ */
+ rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
+ rep->header.maximum_lba = le64dec(hdr->maximum_lba);
+ /*
+ * If the drive reports no entries that match the query,
+ * we're done.
+ */
+ if (hdr_len == 0) {
+ rep->entries_filled = 0;
+ bp->bio_resid = bp->bio_bcount;
+ break;
+ }
+
+ num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
+ hdr_len / sizeof(*desc));
+ /*
+ * If the drive didn't return any data, then we're done.
+ */
+ if (num_avail == 0) {
+ rep->entries_filled = 0;
+ bp->bio_resid = bp->bio_bcount;
+ break;
+ }
+
+ num_to_fill = min(num_avail, rep->entries_allocated);
+ /*
+ * If the user didn't allocate any entries for us to fill,
+ * we're done.
+ */
+ if (num_to_fill == 0) {
+ rep->entries_filled = 0;
+ bp->bio_resid = bp->bio_bcount;
+ break;
+ }
+
+ for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
+ i < num_to_fill; i++, desc++, entry++) {
+ /*
+ * NOTE: we're mapping the values here directly
+ * from the SCSI/ATA bit definitions to the bio.h
+ * definitions. There is also a warning in
+ * disk_zone.h, but the impact is that if
+ * additional values are added in the SCSI/ATA
+ * specs these will be visible to consumers of
+ * this interface.
+ */
+ entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
+ entry->zone_condition =
+ (desc->zone_flags & SRZ_ZONE_COND_MASK) >>
+ SRZ_ZONE_COND_SHIFT;
+ entry->zone_flags |= desc->zone_flags &
+ (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET);
+ entry->zone_length = le64dec(desc->zone_length);
+ entry->zone_start_lba = le64dec(desc->zone_start_lba);
+ entry->write_pointer_lba =
+ le64dec(desc->write_pointer_lba);
+ }
+ rep->entries_filled = num_to_fill;
+ /*
+ * Note that this residual is accurate from the user's
+ * standpoint, but the amount transferred isn't accurate
+ * from the standpoint of what actually came back from the
+ * drive.
+ */
+ bp->bio_resid = bp->bio_bcount - (num_to_fill * sizeof(*entry));
+ break;
+ }
+ case DISK_ZONE_GET_PARAMS:
+ default:
+ /*
+ * In theory we should not get a GET_PARAMS bio, since it
+ * should be handled without queueing the command to the
+ * drive.
+ */
+ panic("%s: Invalid zone command %d", __func__,
+ bp->bio_zone.zone_cmd);
+ break;
}
+
+ if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
+ free(ccb->ataio.data_ptr, M_ATADA);
}
+
static void
adadone(struct cam_periph *periph, union ccb *done_ccb)
{
struct ada_softc *softc;
struct ccb_ataio *ataio;
- struct ccb_getdev *cgd;
struct cam_path *path;
+ uint32_t priority;
int state;
softc = (struct ada_softc *)periph->softc;
ataio = &done_ccb->ataio;
path = done_ccb->ccb_h.path;
+ priority = done_ccb->ccb_h.pinfo.priority;
CAM_DEBUG(path, CAM_DEBUG_TRACE, ("adadone\n"));
@@ -2040,6 +2750,7 @@ adadone(struct cam_periph *periph, union ccb *done_ccb)
} else {
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
panic("REQ_CMP with QFRZN");
+
error = 0;
}
bp->bio_error = error;
@@ -2047,11 +2758,15 @@ adadone(struct cam_periph *periph, union ccb *done_ccb)
bp->bio_resid = bp->bio_bcount;
bp->bio_flags |= BIO_ERROR;
} else {
- if (state == ADA_CCB_TRIM)
+ if (bp->bio_cmd == BIO_ZONE)
+ adazonedone(periph, done_ccb);
+ else if (state == ADA_CCB_TRIM)
bp->bio_resid = 0;
else
bp->bio_resid = ataio->resid;
- if (bp->bio_resid > 0)
+
+ if ((bp->bio_resid > 0)
+ && (bp->bio_cmd != BIO_ZONE))
bp->bio_flags |= BIO_ERROR;
}
softc->outstanding_cmds--;
@@ -2100,7 +2815,6 @@ adadone(struct cam_periph *periph, union ccb *done_ccb)
{
if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
if (adaerror(done_ccb, 0, 0) == ERESTART) {
-out:
/* Drop freeze taken due to CAM_DEV_QFREEZE */
cam_release_devq(path, 0, 0, 0, FALSE);
return;
@@ -2121,30 +2835,21 @@ out:
* is removed, and we need it around for the CCB release
* operation.
*/
- cgd = (struct ccb_getdev *)done_ccb;
- xpt_setup_ccb(&cgd->ccb_h, path, CAM_PRIORITY_NORMAL);
- cgd->ccb_h.func_code = XPT_GDEV_TYPE;
- xpt_action((union ccb *)cgd);
- if (ADA_WC >= 0 &&
- cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
- softc->state = ADA_STATE_WCACHE;
- xpt_release_ccb(done_ccb);
- xpt_schedule(periph, CAM_PRIORITY_DEV);
- goto out;
- }
- softc->state = ADA_STATE_NORMAL;
+
xpt_release_ccb(done_ccb);
+ softc->state = ADA_STATE_WCACHE;
+ xpt_schedule(periph, priority);
/* Drop freeze taken due to CAM_DEV_QFREEZE */
cam_release_devq(path, 0, 0, 0, FALSE);
- adaschedule(periph);
- cam_periph_release_locked(periph);
return;
}
case ADA_CCB_WCACHE:
{
if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
if (adaerror(done_ccb, 0, 0) == ERESTART) {
- goto out;
+ /* Drop freeze taken due to CAM_DEV_QFREEZE */
+ cam_release_devq(path, 0, 0, 0, FALSE);
+ return;
} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
cam_release_devq(path,
/*relsim_flags*/0,
@@ -2154,20 +2859,365 @@ out:
}
}
- softc->state = ADA_STATE_NORMAL;
- /*
- * Since our peripheral may be invalidated by an error
- * above or an external event, we must release our CCB
- * before releasing the reference on the peripheral.
- * The peripheral will only go away once the last reference
- * is removed, and we need it around for the CCB release
- * operation.
- */
- xpt_release_ccb(done_ccb);
/* Drop freeze taken due to CAM_DEV_QFREEZE */
cam_release_devq(path, 0, 0, 0, FALSE);
- adaschedule(periph);
- cam_periph_release_locked(periph);
+
+ if (softc->flags & ADA_FLAG_CAN_LOG) {
+ xpt_release_ccb(done_ccb);
+ softc->state = ADA_STATE_LOGDIR;
+ xpt_schedule(periph, priority);
+ } else {
+ adaprobedone(periph, done_ccb);
+ }
+ return;
+ }
+ case ADA_CCB_LOGDIR:
+ {
+ int error;
+
+ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ error = 0;
+ softc->valid_logdir_len = 0;
+ bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
+ softc->valid_logdir_len =
+ ataio->dxfer_len - ataio->resid;
+ if (softc->valid_logdir_len > 0)
+ bcopy(ataio->data_ptr, &softc->ata_logdir,
+ min(softc->valid_logdir_len,
+ sizeof(softc->ata_logdir)));
+ /*
+ * Figure out whether the Identify Device log is
+ * supported. The General Purpose log directory
+ * has a header, and lists the number of pages
+ * available for each GP log identified by the
+ * offset into the list.
+ */
+ if ((softc->valid_logdir_len >=
+ ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
+ && (le16dec(softc->ata_logdir.header) ==
+ ATA_GP_LOG_DIR_VERSION)
+ && (le16dec(&softc->ata_logdir.num_pages[
+ (ATA_IDENTIFY_DATA_LOG *
+ sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
+ softc->flags |= ADA_FLAG_CAN_IDLOG;
+ } else {
+ softc->flags &= ~ADA_FLAG_CAN_IDLOG;
+ }
+ } else {
+ error = adaerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ /*
+ * If we can't get the ATA log directory,
+ * then ATA logs are effectively not
+ * supported even if the bit is set in the
+ * identify data.
+ */
+ softc->flags &= ~(ADA_FLAG_CAN_LOG |
+ ADA_FLAG_CAN_IDLOG);
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+
+
+ }
+
+ free(ataio->data_ptr, M_ATADA);
+
+ if ((error == 0)
+ && (softc->flags & ADA_FLAG_CAN_IDLOG)) {
+ softc->state = ADA_STATE_IDDIR;
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ } else
+ adaprobedone(periph, done_ccb);
+
+ return;
+ }
+ case ADA_CCB_IDDIR: {
+ int error;
+
+ if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ off_t entries_offset, max_entries;
+ error = 0;
+
+ softc->valid_iddir_len = 0;
+ bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
+ softc->flags &= ~(ADA_FLAG_CAN_SUPCAP |
+ ADA_FLAG_CAN_ZONE);
+ softc->valid_iddir_len =
+ ataio->dxfer_len - ataio->resid;
+ if (softc->valid_iddir_len > 0)
+ bcopy(ataio->data_ptr, &softc->ata_iddir,
+ min(softc->valid_iddir_len,
+ sizeof(softc->ata_iddir)));
+
+ entries_offset =
+ __offsetof(struct ata_identify_log_pages,entries);
+ max_entries = softc->valid_iddir_len - entries_offset;
+ if ((softc->valid_iddir_len > (entries_offset + 1))
+ && (le64dec(softc->ata_iddir.header) ==
+ ATA_IDLOG_REVISION)
+ && (softc->ata_iddir.entry_count > 0)) {
+ int num_entries, i;
+
+ num_entries = softc->ata_iddir.entry_count;
+ num_entries = min(num_entries,
+ softc->valid_iddir_len - entries_offset);
+ for (i = 0; i < num_entries &&
+ i < max_entries; i++) {
+ if (softc->ata_iddir.entries[i] ==
+ ATA_IDL_SUP_CAP)
+ softc->flags |=
+ ADA_FLAG_CAN_SUPCAP;
+ else if (softc->ata_iddir.entries[i]==
+ ATA_IDL_ZDI)
+ softc->flags |=
+ ADA_FLAG_CAN_ZONE;
+
+ if ((softc->flags &
+ ADA_FLAG_CAN_SUPCAP)
+ && (softc->flags &
+ ADA_FLAG_CAN_ZONE))
+ break;
+ }
+ }
+ } else {
+ error = adaerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ /*
+ * If we can't get the ATA Identify Data log
+ * directory, then it effectively isn't
+ * supported even if the ATA Log directory
+ * a non-zero number of pages present for
+ * this log.
+ */
+ softc->flags &= ~ADA_FLAG_CAN_IDLOG;
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+ }
+
+ free(ataio->data_ptr, M_ATADA);
+
+ if ((error == 0)
+ && (softc->flags & ADA_FLAG_CAN_SUPCAP)) {
+ softc->state = ADA_STATE_SUP_CAP;
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ } else
+ adaprobedone(periph, done_ccb);
+ return;
+ }
+ case ADA_CCB_SUP_CAP: {
+ int error;
+
+ if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ uint32_t valid_len;
+ size_t needed_size;
+ struct ata_identify_log_sup_cap *sup_cap;
+ error = 0;
+
+ sup_cap = (struct ata_identify_log_sup_cap *)
+ ataio->data_ptr;
+ valid_len = ataio->dxfer_len - ataio->resid;
+ needed_size =
+ __offsetof(struct ata_identify_log_sup_cap,
+ sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
+ if (valid_len >= needed_size) {
+ uint64_t zoned, zac_cap;
+
+ zoned = le64dec(sup_cap->zoned_cap);
+ if (zoned & ATA_ZONED_VALID) {
+ /*
+ * This should have already been
+ * set, because this is also in the
+ * ATA identify data.
+ */
+ if ((zoned & ATA_ZONED_MASK) ==
+ ATA_SUPPORT_ZONE_HOST_AWARE)
+ softc->zone_mode =
+ ADA_ZONE_HOST_AWARE;
+ else if ((zoned & ATA_ZONED_MASK) ==
+ ATA_SUPPORT_ZONE_DEV_MANAGED)
+ softc->zone_mode =
+ ADA_ZONE_DRIVE_MANAGED;
+ }
+
+ zac_cap = le64dec(sup_cap->sup_zac_cap);
+ if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
+ if (zac_cap & ATA_REPORT_ZONES_SUP)
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_RZ_SUP;
+ if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_OPEN_SUP;
+ if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_CLOSE_SUP;
+ if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_FINISH_SUP;
+ if (zac_cap & ATA_ND_RWP_SUP)
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_RWP_SUP;
+ } else {
+ /*
+ * This field was introduced in
+ * ACS-4, r08 on April 28th, 2015.
+ * If the drive firmware was written
+ * to an earlier spec, it won't have
+ * the field. So, assume all
+ * commands are supported.
+ */
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_SUP_MASK;
+ }
+
+ }
+ } else {
+ error = adaerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ /*
+ * If we can't get the ATA Identify Data
+ * Supported Capabilities page, clear the
+ * flag...
+ */
+ softc->flags &= ~ADA_FLAG_CAN_SUPCAP;
+ /*
+ * And clear zone capabilities.
+ */
+ softc->zone_flags &= ~ADA_ZONE_FLAG_SUP_MASK;
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+ }
+
+ free(ataio->data_ptr, M_ATADA);
+
+ if ((error == 0)
+ && (softc->flags & ADA_FLAG_CAN_ZONE)) {
+ softc->state = ADA_STATE_ZONE;
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ } else
+ adaprobedone(periph, done_ccb);
+ return;
+ }
+ case ADA_CCB_ZONE: {
+ int error;
+
+ if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ struct ata_zoned_info_log *zi_log;
+ uint32_t valid_len;
+ size_t needed_size;
+
+ zi_log = (struct ata_zoned_info_log *)ataio->data_ptr;
+
+ valid_len = ataio->dxfer_len - ataio->resid;
+ needed_size = __offsetof(struct ata_zoned_info_log,
+ version_info) + 1 + sizeof(zi_log->version_info);
+ if (valid_len >= needed_size) {
+ uint64_t tmpvar;
+
+ tmpvar = le64dec(zi_log->zoned_cap);
+ if (tmpvar & ATA_ZDI_CAP_VALID) {
+ if (tmpvar & ATA_ZDI_CAP_URSWRZ)
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_URSWRZ;
+ else
+ softc->zone_flags &=
+ ~ADA_ZONE_FLAG_URSWRZ;
+ }
+ tmpvar = le64dec(zi_log->optimal_seq_zones);
+ if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_OPT_SEQ_SET;
+ softc->optimal_seq_zones = (tmpvar &
+ ATA_ZDI_OPT_SEQ_MASK);
+ } else {
+ softc->zone_flags &=
+ ~ADA_ZONE_FLAG_OPT_SEQ_SET;
+ softc->optimal_seq_zones = 0;
+ }
+
+ tmpvar =le64dec(zi_log->optimal_nonseq_zones);
+ if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_OPT_NONSEQ_SET;
+ softc->optimal_nonseq_zones =
+ (tmpvar & ATA_ZDI_OPT_NS_MASK);
+ } else {
+ softc->zone_flags &=
+ ~ADA_ZONE_FLAG_OPT_NONSEQ_SET;
+ softc->optimal_nonseq_zones = 0;
+ }
+
+ tmpvar = le64dec(zi_log->max_seq_req_zones);
+ if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
+ softc->zone_flags |=
+ ADA_ZONE_FLAG_MAX_SEQ_SET;
+ softc->max_seq_zones =
+ (tmpvar & ATA_ZDI_MAX_SEQ_MASK);
+ } else {
+ softc->zone_flags &=
+ ~ADA_ZONE_FLAG_MAX_SEQ_SET;
+ softc->max_seq_zones = 0;
+ }
+ }
+ } else {
+ error = adaerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ softc->flags &= ~ADA_FLAG_CAN_ZONE;
+ softc->flags &= ~ADA_ZONE_FLAG_SET_MASK;
+
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+
+ }
+ free(ataio->data_ptr, M_ATADA);
+
+ adaprobedone(periph, done_ccb);
return;
}
case ADA_CCB_DUMP:
diff --git a/sys/cam/cam_ccb.h b/sys/cam/cam_ccb.h
index 6086ed2..914333d 100644
--- a/sys/cam/cam_ccb.h
+++ b/sys/cam/cam_ccb.h
@@ -1087,7 +1087,17 @@ struct ccb_notify_acknowledge {
u_int tag_id; /* Tag for immediate notify */
u_int seq_id; /* Tar for target of notify */
u_int initiator_id; /* Initiator Identifier */
- u_int arg; /* Function specific */
+ u_int arg; /* Response information */
+ /*
+ * Lower byte of arg is one of RESPONSE CODE values defined below
+ * (subset of response codes from SPL-4 and FCP-4 specifications),
+ * upper 3 bytes is code-specific ADDITIONAL RESPONSE INFORMATION.
+ */
+#define CAM_RSP_TMF_COMPLETE 0x00
+#define CAM_RSP_TMF_REJECTED 0x04
+#define CAM_RSP_TMF_FAILED 0x05
+#define CAM_RSP_TMF_SUCCEEDED 0x08
+#define CAM_RSP_TMF_INCORRECT_LUN 0x09
};
/* HBA engine structures. */
diff --git a/sys/cam/ctl/scsi_ctl.c b/sys/cam/ctl/scsi_ctl.c
index 98966e8..f403391 100644
--- a/sys/cam/ctl/scsi_ctl.c
+++ b/sys/cam/ctl/scsi_ctl.c
@@ -1552,6 +1552,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
/*
* Queue this back down to the SIM as an immediate notify.
*/
+ done_ccb->ccb_h.status = CAM_REQ_INPROG;
done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
xpt_action(done_ccb);
break;
@@ -2041,6 +2042,28 @@ ctlfe_done(union ctl_io *io)
*/
ccb->ccb_h.status = CAM_REQ_INPROG;
ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
+ switch (io->taskio.task_status) {
+ case CTL_TASK_FUNCTION_COMPLETE:
+ ccb->cna2.arg = CAM_RSP_TMF_COMPLETE;
+ break;
+ case CTL_TASK_FUNCTION_SUCCEEDED:
+ ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED;
+ ccb->ccb_h.flags |= CAM_SEND_STATUS;
+ break;
+ case CTL_TASK_FUNCTION_REJECTED:
+ ccb->cna2.arg = CAM_RSP_TMF_REJECTED;
+ ccb->ccb_h.flags |= CAM_SEND_STATUS;
+ break;
+ case CTL_TASK_LUN_DOES_NOT_EXIST:
+ ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN;
+ ccb->ccb_h.flags |= CAM_SEND_STATUS;
+ break;
+ case CTL_TASK_FUNCTION_NOT_SUPPORTED:
+ ccb->cna2.arg = CAM_RSP_TMF_FAILED;
+ ccb->ccb_h.flags |= CAM_SEND_STATUS;
+ break;
+ }
+ ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8;
xpt_action(ccb);
} else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) {
if (softc->flags & CTLFE_LUN_WILDCARD) {
diff --git a/sys/cam/scsi/scsi_all.c b/sys/cam/scsi/scsi_all.c
index 3b23ae0..722b44f 100644
--- a/sys/cam/scsi/scsi_all.c
+++ b/sys/cam/scsi/scsi_all.c
@@ -111,6 +111,7 @@ static void fetchtableentries(int sense_key, int asc, int ascq,
struct scsi_inquiry_data *,
const struct sense_key_table_entry **,
const struct asc_table_entry **);
+
#ifdef _KERNEL
static void init_scsi_delay(void);
static int sysctl_scsi_delay(SYSCTL_HANDLER_ARGS);
@@ -502,9 +503,9 @@ static struct op_table_entry scsi_op_codes[] = {
/* 93 M ERASE(16) */
{ 0x93, T, "ERASE(16)" },
/* 94 O ZBC OUT */
- { 0x94, D, "ZBC OUT" },
- /* 95 O ZBC OUT */
- { 0x95, D, "ZBC OUT" },
+ { 0x94, ALL, "ZBC OUT" },
+ /* 95 O ZBC IN */
+ { 0x95, ALL, "ZBC IN" },
/* 96 */
/* 97 */
/* 98 */
@@ -520,7 +521,6 @@ static struct op_table_entry scsi_op_codes[] = {
/* XXX KDM ALL for this? op-num.txt defines it for none.. */
/* 9E SERVICE ACTION IN(16) */
{ 0x9E, ALL, "SERVICE ACTION IN(16)" },
- /* XXX KDM ALL for this? op-num.txt defines it for ADC.. */
/* 9F M SERVICE ACTION OUT(16) */
{ 0x9F, ALL, "SERVICE ACTION OUT(16)" },
/* A0 MMOOO OMMM OMO REPORT LUNS */
@@ -673,6 +673,12 @@ scsi_op_desc(u_int16_t opcode, struct scsi_inquiry_data *inq_data)
if (pd_type == T_RBC)
pd_type = T_DIRECT;
+ /*
+ * Host managed drives are direct access for the most part.
+ */
+ if (pd_type == T_ZBC_HM)
+ pd_type = T_DIRECT;
+
/* Map NODEVICE to Direct Access Device to handle REPORT LUNS, etc. */
if (pd_type == T_NODEVICE)
pd_type = T_DIRECT;
@@ -4259,6 +4265,7 @@ scsi_get_block_info(struct scsi_sense_data *sense_data, u_int sense_len,
switch (SID_TYPE(inq_data)) {
case T_DIRECT:
case T_RBC:
+ case T_ZBC_HM:
break;
default:
goto bailout;
@@ -5408,6 +5415,9 @@ scsi_print_inquiry(struct scsi_inquiry_data *inq_data)
case T_ADC:
dtype = "Automation/Drive Interface";
break;
+ case T_ZBC_HM:
+ dtype = "Host Managed Zoned Block";
+ break;
case T_NODEVICE:
dtype = "Uninstalled";
break;
@@ -8135,23 +8145,30 @@ scsi_ata_identify(struct ccb_scsiio *csio, u_int32_t retries,
u_int16_t dxfer_len, u_int8_t sense_len,
u_int32_t timeout)
{
- scsi_ata_pass_16(csio,
- retries,
- cbfcnp,
- /*flags*/CAM_DIR_IN,
- tag_action,
- /*protocol*/AP_PROTO_PIO_IN,
- /*ata_flags*/AP_FLAG_TDIR_FROM_DEV|
- AP_FLAG_BYT_BLOK_BYTES|AP_FLAG_TLEN_SECT_CNT,
- /*features*/0,
- /*sector_count*/dxfer_len,
- /*lba*/0,
- /*command*/ATA_ATA_IDENTIFY,
- /*control*/0,
- data_ptr,
- dxfer_len,
- sense_len,
- timeout);
+ scsi_ata_pass(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_IN,
+ tag_action,
+ /*protocol*/AP_PROTO_PIO_IN,
+ /*ata_flags*/AP_FLAG_TDIR_FROM_DEV |
+ AP_FLAG_BYT_BLOK_BYTES |
+ AP_FLAG_TLEN_SECT_CNT,
+ /*features*/0,
+ /*sector_count*/dxfer_len,
+ /*lba*/0,
+ /*command*/ATA_ATA_IDENTIFY,
+ /*device*/ 0,
+ /*icc*/ 0,
+ /*auxiliary*/ 0,
+ /*control*/0,
+ data_ptr,
+ dxfer_len,
+ /*cdb_storage*/ NULL,
+ /*cdb_storage_len*/ 0,
+ /*minimum_cmd_size*/ 0,
+ sense_len,
+ timeout);
}
void
@@ -8179,6 +8196,248 @@ scsi_ata_trim(struct ccb_scsiio *csio, u_int32_t retries,
timeout);
}
+int
+scsi_ata_read_log(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, uint32_t log_address,
+ uint32_t page_number, uint16_t block_count,
+ uint8_t protocol, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t sense_len, uint32_t timeout)
+{
+ uint8_t command, protocol_out;
+ uint16_t count_out;
+ uint64_t lba;
+ int retval;
+
+ retval = 0;
+
+ switch (protocol) {
+ case AP_PROTO_DMA:
+ count_out = block_count;
+ command = ATA_READ_LOG_DMA_EXT;
+ protocol_out = AP_PROTO_DMA;
+ break;
+ case AP_PROTO_PIO_IN:
+ default:
+ count_out = block_count;
+ command = ATA_READ_LOG_EXT;
+ protocol_out = AP_PROTO_PIO_IN;
+ break;
+ }
+
+ lba = (((uint64_t)page_number & 0xff00) << 32) |
+ ((page_number & 0x00ff) << 8) |
+ (log_address & 0xff);
+
+ protocol_out |= AP_EXTEND;
+
+ retval = scsi_ata_pass(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_IN,
+ tag_action,
+ /*protocol*/ protocol_out,
+ /*ata_flags*/AP_FLAG_TLEN_SECT_CNT |
+ AP_FLAG_BYT_BLOK_BLOCKS |
+ AP_FLAG_TDIR_FROM_DEV,
+ /*feature*/ 0,
+ /*sector_count*/ count_out,
+ /*lba*/ lba,
+ /*command*/ command,
+ /*device*/ 0,
+ /*icc*/ 0,
+ /*auxiliary*/ 0,
+ /*control*/0,
+ data_ptr,
+ dxfer_len,
+ /*cdb_storage*/ NULL,
+ /*cdb_storage_len*/ 0,
+ /*minimum_cmd_size*/ 0,
+ sense_len,
+ timeout);
+
+ return (retval);
+}
+
+/*
+ * Note! This is an unusual CDB building function because it can return
+ * an error in the event that the command in question requires a variable
+ * length CDB, but the caller has not given storage space for one or has not
+ * given enough storage space. If there is enough space available in the
+ * standard SCSI CCB CDB bytes, we'll prefer that over passed in storage.
+ */
+int
+scsi_ata_pass(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint32_t flags, uint8_t tag_action,
+ uint8_t protocol, uint8_t ata_flags, uint16_t features,
+ uint16_t sector_count, uint64_t lba, uint8_t command,
+ uint8_t device, uint8_t icc, uint32_t auxiliary,
+ uint8_t control, u_int8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t *cdb_storage, size_t cdb_storage_len,
+ int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout)
+{
+ uint32_t cam_flags;
+ uint8_t *cdb_ptr;
+ int cmd_size;
+ int retval;
+ uint8_t cdb_len;
+
+ retval = 0;
+ cam_flags = flags;
+
+ /*
+ * Round the user's request to the nearest command size that is at
+ * least as big as what he requested.
+ */
+ if (minimum_cmd_size <= 12)
+ cmd_size = 12;
+ else if (minimum_cmd_size > 16)
+ cmd_size = 32;
+ else
+ cmd_size = 16;
+
+ /*
+ * If we have parameters that require a 48-bit ATA command, we have to
+ * use the 16 byte ATA PASS-THROUGH command at least.
+ */
+ if (((lba > ATA_MAX_28BIT_LBA)
+ || (sector_count > 255)
+ || (features > 255)
+ || (protocol & AP_EXTEND))
+ && ((cmd_size < 16)
+ || ((protocol & AP_EXTEND) == 0))) {
+ if (cmd_size < 16)
+ cmd_size = 16;
+ protocol |= AP_EXTEND;
+ }
+
+ /*
+ * The icc and auxiliary ATA registers are only supported in the
+ * 32-byte version of the ATA PASS-THROUGH command.
+ */
+ if ((icc != 0)
+ || (auxiliary != 0)) {
+ cmd_size = 32;
+ protocol |= AP_EXTEND;
+ }
+
+
+ if ((cmd_size > sizeof(csio->cdb_io.cdb_bytes))
+ && ((cdb_storage == NULL)
+ || (cdb_storage_len < cmd_size))) {
+ retval = 1;
+ goto bailout;
+ }
+
+ /*
+ * At this point we know we have enough space to store the command
+ * in one place or another. We prefer the built-in array, but used
+ * the passed in storage if necessary.
+ */
+ if (cmd_size <= sizeof(csio->cdb_io.cdb_bytes))
+ cdb_ptr = csio->cdb_io.cdb_bytes;
+ else {
+ cdb_ptr = cdb_storage;
+ cam_flags |= CAM_CDB_POINTER;
+ }
+
+ if (cmd_size <= 12) {
+ struct ata_pass_12 *cdb;
+
+ cdb = (struct ata_pass_12 *)cdb_ptr;
+ cdb_len = sizeof(*cdb);
+ bzero(cdb, cdb_len);
+
+ cdb->opcode = ATA_PASS_12;
+ cdb->protocol = protocol;
+ cdb->flags = ata_flags;
+ cdb->features = features;
+ cdb->sector_count = sector_count;
+ cdb->lba_low = lba & 0xff;
+ cdb->lba_mid = (lba >> 8) & 0xff;
+ cdb->lba_high = (lba >> 16) & 0xff;
+ cdb->device = ((lba >> 24) & 0xf) | ATA_DEV_LBA;
+ cdb->command = command;
+ cdb->control = control;
+ } else if (cmd_size <= 16) {
+ struct ata_pass_16 *cdb;
+
+ cdb = (struct ata_pass_16 *)cdb_ptr;
+ cdb_len = sizeof(*cdb);
+ bzero(cdb, cdb_len);
+
+ cdb->opcode = ATA_PASS_16;
+ cdb->protocol = protocol;
+ cdb->flags = ata_flags;
+ cdb->features = features & 0xff;
+ cdb->sector_count = sector_count & 0xff;
+ cdb->lba_low = lba & 0xff;
+ cdb->lba_mid = (lba >> 8) & 0xff;
+ cdb->lba_high = (lba >> 16) & 0xff;
+ /*
+ * If AP_EXTEND is set, we're sending a 48-bit command.
+ * Otherwise it's a 28-bit command.
+ */
+ if (protocol & AP_EXTEND) {
+ cdb->lba_low_ext = (lba >> 24) & 0xff;
+ cdb->lba_mid_ext = (lba >> 32) & 0xff;
+ cdb->lba_high_ext = (lba >> 40) & 0xff;
+ cdb->features_ext = (features >> 8) & 0xff;
+ cdb->sector_count_ext = (sector_count >> 8) & 0xff;
+ cdb->device = device | ATA_DEV_LBA;
+ } else {
+ cdb->lba_low_ext = (lba >> 24) & 0xf;
+ cdb->device = ((lba >> 24) & 0xf) | ATA_DEV_LBA;
+ }
+ cdb->command = command;
+ cdb->control = control;
+ } else {
+ struct ata_pass_32 *cdb;
+ uint8_t tmp_lba[8];
+
+ cdb = (struct ata_pass_32 *)cdb_ptr;
+ cdb_len = sizeof(*cdb);
+ bzero(cdb, cdb_len);
+ cdb->opcode = VARIABLE_LEN_CDB;
+ cdb->control = control;
+ cdb->length = sizeof(*cdb) - __offsetof(struct ata_pass_32,
+ service_action);
+ scsi_ulto2b(ATA_PASS_32_SA, cdb->service_action);
+ cdb->protocol = protocol;
+ cdb->flags = ata_flags;
+
+ if ((protocol & AP_EXTEND) == 0) {
+ lba &= 0x0fffffff;
+ cdb->device = ((lba >> 24) & 0xf) | ATA_DEV_LBA;
+ features &= 0xff;
+ sector_count &= 0xff;
+ } else {
+ cdb->device = device | ATA_DEV_LBA;
+ }
+ scsi_u64to8b(lba, tmp_lba);
+ bcopy(&tmp_lba[2], cdb->lba, sizeof(cdb->lba));
+ scsi_ulto2b(features, cdb->features);
+ scsi_ulto2b(sector_count, cdb->count);
+ cdb->command = command;
+ cdb->icc = icc;
+ scsi_ulto4b(auxiliary, cdb->auxiliary);
+ }
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ cam_flags,
+ tag_action,
+ data_ptr,
+ dxfer_len,
+ sense_len,
+ cmd_size,
+ timeout);
+bailout:
+ return (retval);
+}
+
void
scsi_ata_pass_16(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
diff --git a/sys/cam/scsi/scsi_all.h b/sys/cam/scsi/scsi_all.h
index 075629b..1fd4540 100644
--- a/sys/cam/scsi/scsi_all.h
+++ b/sys/cam/scsi/scsi_all.h
@@ -1414,6 +1414,7 @@ struct ata_pass_12 {
#define AP_PROTO_UDMA_OUT (0x0b << 1)
#define AP_PROTO_FPDMA (0x0c << 1)
#define AP_PROTO_RESP_INFO (0x0f << 1)
+#define AP_PROTO_MASK 0x1e
#define AP_MULTI 0xe0
u_int8_t flags;
#define AP_T_LEN 0x03
@@ -1955,6 +1956,27 @@ struct ata_pass_16 {
u_int8_t control;
};
+struct ata_pass_32 {
+ uint8_t opcode;
+ uint8_t control;
+ uint8_t reserved1[5];
+ uint8_t length;
+ uint8_t service_action[2];
+#define ATA_PASS_32_SA 0x1ff0
+ uint8_t protocol;
+ uint8_t flags;
+ uint8_t reserved2[2];
+ uint8_t lba[6];
+ uint8_t features[2];
+ uint8_t count[2];
+ uint8_t device;
+ uint8_t command;
+ uint8_t reserved3;
+ uint8_t icc;
+ uint8_t auxiliary[4];
+};
+
+
#define SC_SCSI_1 0x01
#define SC_SCSI_2 0x03
@@ -1997,6 +2019,8 @@ struct ata_pass_16 {
#define MODE_SENSE_10 0x5A
#define PERSISTENT_RES_IN 0x5E
#define PERSISTENT_RES_OUT 0x5F
+#define EXTENDED_CDB 0x7E
+#define VARIABLE_LEN_CDB 0x7F
#define EXTENDED_COPY 0x83
#define RECEIVE_COPY_STATUS 0x84
#define ATA_PASS_16 0x85
@@ -2064,6 +2088,7 @@ struct ata_pass_16 {
#define T_OCRW 0x0f
#define T_OSD 0x11
#define T_ADC 0x12
+#define T_ZBC_HM 0x14
#define T_NODEVICE 0x1f
#define T_ANY 0xff /* Used in Quirk table matches */
@@ -2712,10 +2737,17 @@ struct scsi_vpd_block_device_characteristics
uint8_t flags;
#define SVPD_VBULS 0x01
#define SVPD_FUAB 0x02
-#define SVPD_HAW_ZBC 0x10
+#define SVPD_ZBC_NR 0x00 /* Not Reported */
+#define SVPD_HAW_ZBC 0x10 /* Host Aware */
+#define SVPD_DM_ZBC 0x20 /* Drive Managed */
+#define SVPD_ZBC_MASK 0x30 /* Zoned mask */
uint8_t reserved[55];
};
+#define SBDC_IS_PRESENT(bdc, length, field) \
+ ((length >= offsetof(struct scsi_vpd_block_device_characteristics, \
+ field) + sizeof(bdc->field)) ? 1 : 0)
+
/*
* Logical Block Provisioning VPD Page based on
* T10/1799-D Revision 31
@@ -2774,6 +2806,28 @@ struct scsi_vpd_block_limits
u_int8_t max_atomic_boundary_size[4];
};
+/*
+ * Zoned Block Device Characacteristics VPD page.
+ * From ZBC-r04, dated August 12, 2015.
+ */
+struct scsi_vpd_zoned_bdc {
+ uint8_t device;
+ uint8_t page_code;
+#define SVPD_ZONED_BDC 0xB6
+ uint8_t page_length[2];
+#define SVPD_ZBDC_PL 0x3C
+ uint8_t flags;
+#define SVPD_ZBDC_URSWRZ 0x01
+ uint8_t reserved1[3];
+ uint8_t optimal_seq_zones[4];
+#define SVPD_ZBDC_OPT_SEQ_NR 0xffffffff
+ uint8_t optimal_nonseq_zones[4];
+#define SVPD_ZBDC_OPT_NONSEQ_NR 0xffffffff
+ uint8_t max_seq_req_zones[4];
+#define SVPD_ZBDC_MAX_SEQ_UNLIMITED 0xffffffff
+ uint8_t reserved2[44];
+};
+
struct scsi_read_capacity
{
u_int8_t opcode;
@@ -3345,6 +3399,29 @@ struct scsi_sense_osd_attr_id
};
/*
+ * ATA Return descriptor, used for the SCSI ATA PASS-THROUGH(12), (16) and
+ * (32) commands. Described in SAT-4r05.
+ */
+struct scsi_sense_ata_ret_desc
+{
+ uint8_t desc_type;
+#define SSD_DESC_ATA 0x09
+ uint8_t length;
+ uint8_t flags;
+#define SSD_DESC_ATA_FLAG_EXTEND 0x01
+ uint8_t error;
+ uint8_t count_15_8;
+ uint8_t count_7_0;
+ uint8_t lba_31_24;
+ uint8_t lba_7_0;
+ uint8_t lba_39_32;
+ uint8_t lba_15_8;
+ uint8_t lba_47_40;
+ uint8_t lba_23_16;
+ uint8_t device;
+ uint8_t status;
+};
+/*
* Used with Sense keys No Sense (0x00) and Not Ready (0x02).
*
* Maximum descriptors allowed: 32 (as of SPC-4)
@@ -3960,6 +4037,23 @@ void scsi_ata_trim(struct ccb_scsiio *csio, u_int32_t retries,
u_int8_t *data_ptr, u_int16_t dxfer_len,
u_int8_t sense_len, u_int32_t timeout);
+int scsi_ata_read_log(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, uint32_t log_address,
+ uint32_t page_number, uint16_t block_count,
+ uint8_t protocol, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t sense_len, uint32_t timeout);
+
+int scsi_ata_pass(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint32_t flags, uint8_t tag_action,
+ uint8_t protocol, uint8_t ata_flags, uint16_t features,
+ uint16_t sector_count, uint64_t lba, uint8_t command,
+ uint8_t device, uint8_t icc, uint32_t auxiliary,
+ uint8_t control, u_int8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t *cdb_storage, size_t cdb_storage_len,
+ int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout);
+
void scsi_ata_pass_16(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
u_int32_t flags, u_int8_t tag_action,
diff --git a/sys/cam/scsi/scsi_da.c b/sys/cam/scsi/scsi_da.c
index 40bdeef..aa05b70 100644
--- a/sys/cam/scsi/scsi_da.c
+++ b/sys/cam/scsi/scsi_da.c
@@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include <sys/cons.h>
#include <sys/endian.h>
#include <sys/proc.h>
+#include <sys/sbuf.h>
#include <geom/geom.h>
#include <geom/geom_disk.h>
#endif /* _KERNEL */
@@ -63,12 +64,20 @@ __FBSDID("$FreeBSD$");
#include <cam/cam_iosched.h>
#include <cam/scsi/scsi_message.h>
-
-#ifndef _KERNEL
#include <cam/scsi/scsi_da.h>
-#endif /* !_KERNEL */
#ifdef _KERNEL
+/*
+ * Note that there are probe ordering dependencies here. The order isn't
+ * controlled by this enumeration, but by explicit state transitions in
+ * dastart() and dadone(). Here are some of the dependencies:
+ *
+ * 1. RC should come first, before RC16, unless there is evidence that RC16
+ * is supported.
+ * 2. BDC needs to come before any of the ATA probes, or the ZONE probe.
+ * 3. The ATA probes should go in this order:
+ * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE
+ */
typedef enum {
DA_STATE_PROBE_RC,
DA_STATE_PROBE_RC16,
@@ -76,23 +85,33 @@ typedef enum {
DA_STATE_PROBE_BLK_LIMITS,
DA_STATE_PROBE_BDC,
DA_STATE_PROBE_ATA,
+ DA_STATE_PROBE_ATA_LOGDIR,
+ DA_STATE_PROBE_ATA_IDDIR,
+ DA_STATE_PROBE_ATA_SUP,
+ DA_STATE_PROBE_ATA_ZONE,
+ DA_STATE_PROBE_ZONE,
DA_STATE_NORMAL
} da_state;
typedef enum {
- DA_FLAG_PACK_INVALID = 0x001,
- DA_FLAG_NEW_PACK = 0x002,
- DA_FLAG_PACK_LOCKED = 0x004,
- DA_FLAG_PACK_REMOVABLE = 0x008,
- DA_FLAG_NEED_OTAG = 0x020,
- DA_FLAG_WAS_OTAG = 0x040,
- DA_FLAG_RETRY_UA = 0x080,
- DA_FLAG_OPEN = 0x100,
- DA_FLAG_SCTX_INIT = 0x200,
- DA_FLAG_CAN_RC16 = 0x400,
- DA_FLAG_PROBED = 0x800,
- DA_FLAG_DIRTY = 0x1000,
- DA_FLAG_ANNOUNCED = 0x2000
+ DA_FLAG_PACK_INVALID = 0x000001,
+ DA_FLAG_NEW_PACK = 0x000002,
+ DA_FLAG_PACK_LOCKED = 0x000004,
+ DA_FLAG_PACK_REMOVABLE = 0x000008,
+ DA_FLAG_NEED_OTAG = 0x000020,
+ DA_FLAG_WAS_OTAG = 0x000040,
+ DA_FLAG_RETRY_UA = 0x000080,
+ DA_FLAG_OPEN = 0x000100,
+ DA_FLAG_SCTX_INIT = 0x000200,
+ DA_FLAG_CAN_RC16 = 0x000400,
+ DA_FLAG_PROBED = 0x000800,
+ DA_FLAG_DIRTY = 0x001000,
+ DA_FLAG_ANNOUNCED = 0x002000,
+ DA_FLAG_CAN_ATA_DMA = 0x004000,
+ DA_FLAG_CAN_ATA_LOG = 0x008000,
+ DA_FLAG_CAN_ATA_IDLOG = 0x010000,
+ DA_FLAG_CAN_ATA_SUPCAP = 0x020000,
+ DA_FLAG_CAN_ATA_ZONE = 0x040000
} da_flags;
typedef enum {
@@ -103,7 +122,8 @@ typedef enum {
DA_Q_4K = 0x08,
DA_Q_NO_RC16 = 0x10,
DA_Q_NO_UNMAP = 0x20,
- DA_Q_RETRY_BUSY = 0x40
+ DA_Q_RETRY_BUSY = 0x40,
+ DA_Q_SMR_DM = 0x80
} da_quirks;
#define DA_Q_BIT_STRING \
@@ -114,7 +134,8 @@ typedef enum {
"\0044K" \
"\005NO_RC16" \
"\006NO_UNMAP" \
- "\007RETRY_BUSY"
+ "\007RETRY_BUSY" \
+ "\008SMR_DM"
typedef enum {
DA_CCB_PROBE_RC = 0x01,
@@ -127,8 +148,13 @@ typedef enum {
DA_CCB_DUMP = 0x0A,
DA_CCB_DELETE = 0x0B,
DA_CCB_TUR = 0x0C,
- DA_CCB_TYPE_MASK = 0x0F,
- DA_CCB_RETRY_UA = 0x10
+ DA_CCB_PROBE_ZONE = 0x0D,
+ DA_CCB_PROBE_ATA_LOGDIR = 0x0E,
+ DA_CCB_PROBE_ATA_IDDIR = 0x0F,
+ DA_CCB_PROBE_ATA_SUP = 0x10,
+ DA_CCB_PROBE_ATA_ZONE = 0x11,
+ DA_CCB_TYPE_MASK = 0x1F,
+ DA_CCB_RETRY_UA = 0x20
} da_ccb_state;
/*
@@ -152,6 +178,63 @@ typedef enum {
DA_DELETE_MAX = DA_DELETE_ZERO
} da_delete_methods;
+/*
+ * For SCSI, host managed drives show up as a separate device type. For
+ * ATA, host managed drives also have a different device signature.
+ * XXX KDM figure out the ATA host managed signature.
+ */
+typedef enum {
+ DA_ZONE_NONE = 0x00,
+ DA_ZONE_DRIVE_MANAGED = 0x01,
+ DA_ZONE_HOST_AWARE = 0x02,
+ DA_ZONE_HOST_MANAGED = 0x03
+} da_zone_mode;
+
+/*
+ * We distinguish between these interface cases in addition to the drive type:
+ * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC
+ * o ATA drive behind a SCSI translation layer that does not know about
+ * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this
+ * case, we would need to share the ATA code with the ada(4) driver.
+ * o SCSI drive.
+ */
+typedef enum {
+ DA_ZONE_IF_SCSI,
+ DA_ZONE_IF_ATA_PASS,
+ DA_ZONE_IF_ATA_SAT,
+} da_zone_interface;
+
+typedef enum {
+ DA_ZONE_FLAG_RZ_SUP = 0x0001,
+ DA_ZONE_FLAG_OPEN_SUP = 0x0002,
+ DA_ZONE_FLAG_CLOSE_SUP = 0x0004,
+ DA_ZONE_FLAG_FINISH_SUP = 0x0008,
+ DA_ZONE_FLAG_RWP_SUP = 0x0010,
+ DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP |
+ DA_ZONE_FLAG_OPEN_SUP |
+ DA_ZONE_FLAG_CLOSE_SUP |
+ DA_ZONE_FLAG_FINISH_SUP |
+ DA_ZONE_FLAG_RWP_SUP),
+ DA_ZONE_FLAG_URSWRZ = 0x0020,
+ DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040,
+ DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080,
+ DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100,
+ DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET |
+ DA_ZONE_FLAG_OPT_NONSEQ_SET |
+ DA_ZONE_FLAG_MAX_SEQ_SET)
+} da_zone_flags;
+
+static struct da_zone_desc {
+ da_zone_flags value;
+ const char *desc;
+} da_zone_desc_table[] = {
+ {DA_ZONE_FLAG_RZ_SUP, "Report Zones" },
+ {DA_ZONE_FLAG_OPEN_SUP, "Open" },
+ {DA_ZONE_FLAG_CLOSE_SUP, "Close" },
+ {DA_ZONE_FLAG_FINISH_SUP, "Finish" },
+ {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
+};
+
typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb,
struct bio *bp);
static da_delete_func_t da_delete_trim;
@@ -214,7 +297,17 @@ struct da_softc {
int error_inject;
int trim_max_ranges;
int delete_available; /* Delete methods possibly available */
- u_int maxio;
+ da_zone_mode zone_mode;
+ da_zone_interface zone_interface;
+ da_zone_flags zone_flags;
+ struct ata_gp_log_dir ata_logdir;
+ int valid_logdir_len;
+ struct ata_identify_log_pages ata_iddir;
+ int valid_iddir_len;
+ uint64_t optimal_seq_zones;
+ uint64_t optimal_nonseq_zones;
+ uint64_t max_seq_zones;
+ u_int maxio;
uint32_t unmap_max_ranges;
uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */
uint64_t ws_max_blks;
@@ -1188,6 +1281,15 @@ static struct da_quirk_entry da_quirk_table[] =
},
{
/*
+ * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR)
+ * Drive Managed SATA hard drive. This drive doesn't report
+ * in firmware that it is a drive managed SMR drive.
+ */
+ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS0002*", "*" },
+ /*quirks*/DA_Q_SMR_DM
+ },
+ {
+ /*
* MX-ES USB Drive by Mach Xtreme
*/
{ T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"},
@@ -1204,6 +1306,8 @@ static void dasysctlinit(void *context, int pending);
static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS);
static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
+static int dazonemodesysctl(SYSCTL_HANDLER_ARGS);
+static int dazonesupsysctl(SYSCTL_HANDLER_ARGS);
static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS);
static void dadeletemethodset(struct da_softc *softc,
da_delete_methods delete_method);
@@ -1217,6 +1321,7 @@ static periph_ctor_t daregister;
static periph_dtor_t dacleanup;
static periph_start_t dastart;
static periph_oninv_t daoninvalidate;
+static void dazonedone(struct cam_periph *periph, union ccb *ccb);
static void dadone(struct cam_periph *periph,
union ccb *done_ccb);
static int daerror(union ccb *ccb, u_int32_t cam_flags,
@@ -1447,6 +1552,14 @@ dastrategy(struct bio *bp)
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp));
/*
+ * Zone commands must be ordered, because they can depend on the
+ * effects of previously issued commands, and they may affect
+ * commands after them.
+ */
+ if (bp->bio_cmd == BIO_ZONE)
+ bp->bio_flags |= BIO_ORDERED;
+
+ /*
* Place it in the queue of disk activities for this disk
*/
cam_iosched_queue_work(softc->cam_iosched, bp);
@@ -1678,7 +1791,8 @@ daasync(void *callback_arg, u_int32_t code,
break;
if (SID_TYPE(&cgd->inq_data) != T_DIRECT
&& SID_TYPE(&cgd->inq_data) != T_RBC
- && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
+ && SID_TYPE(&cgd->inq_data) != T_OPTICAL
+ && SID_TYPE(&cgd->inq_data) != T_ZBC_HM)
break;
/*
@@ -1829,6 +1943,29 @@ dasysctlinit(void *context, int pending)
&softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
"Minimum CDB size");
+ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD,
+ softc, 0, dazonemodesysctl, "A",
+ "Zone Mode");
+ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD,
+ softc, 0, dazonesupsysctl, "A",
+ "Zone Support");
+ SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
+ SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
+ "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
+ "Optimal Number of Open Sequential Write Preferred Zones");
+ SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
+ SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
+ "optimal_nonseq_zones", CTLFLAG_RD,
+ &softc->optimal_nonseq_zones,
+ "Optimal Number of Non-Sequentially Written Sequential Write "
+ "Preferred Zones");
+ SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
+ SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
+ "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
+ "Maximum Number of Open Sequential Write Required Zones");
+
SYSCTL_ADD_INT(&softc->sysctl_ctx,
SYSCTL_CHILDREN(softc->sysctl_tree),
OID_AUTO,
@@ -2147,6 +2284,72 @@ dadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
return (0);
}
+static int
+dazonemodesysctl(SYSCTL_HANDLER_ARGS)
+{
+ char tmpbuf[40];
+ struct da_softc *softc;
+ int error;
+
+ softc = (struct da_softc *)arg1;
+
+ switch (softc->zone_mode) {
+ case DA_ZONE_DRIVE_MANAGED:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
+ break;
+ case DA_ZONE_HOST_AWARE:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
+ break;
+ case DA_ZONE_HOST_MANAGED:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
+ break;
+ case DA_ZONE_NONE:
+ default:
+ snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
+ break;
+ }
+
+ error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
+
+ return (error);
+}
+
+static int
+dazonesupsysctl(SYSCTL_HANDLER_ARGS)
+{
+ char tmpbuf[180];
+ struct da_softc *softc;
+ struct sbuf sb;
+ int error, first;
+ unsigned int i;
+
+ softc = (struct da_softc *)arg1;
+
+ error = 0;
+ first = 1;
+ sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0);
+
+ for (i = 0; i < sizeof(da_zone_desc_table) /
+ sizeof(da_zone_desc_table[0]); i++) {
+ if (softc->zone_flags & da_zone_desc_table[i].value) {
+ if (first == 0)
+ sbuf_printf(&sb, ", ");
+ else
+ first = 0;
+ sbuf_cat(&sb, da_zone_desc_table[i].desc);
+ }
+ }
+
+ if (first == 1)
+ sbuf_printf(&sb, "None");
+
+ sbuf_finish(&sb);
+
+ error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
+
+ return (error);
+}
+
static cam_status
daregister(struct cam_periph *periph, void *arg)
{
@@ -2211,6 +2414,23 @@ daregister(struct cam_periph *periph, void *arg)
if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
softc->quirks |= DA_Q_NO_6_BYTE;
+ if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM)
+ softc->zone_mode = DA_ZONE_HOST_MANAGED;
+ else if (softc->quirks & DA_Q_SMR_DM)
+ softc->zone_mode = DA_ZONE_DRIVE_MANAGED;
+ else
+ softc->zone_mode = DA_ZONE_NONE;
+
+ if (softc->zone_mode != DA_ZONE_NONE) {
+ if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
+ if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC))
+ softc->zone_interface = DA_ZONE_IF_ATA_SAT;
+ else
+ softc->zone_interface = DA_ZONE_IF_ATA_PASS;
+ } else
+ softc->zone_interface = DA_ZONE_IF_SCSI;
+ }
+
TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
/*
@@ -2292,7 +2512,7 @@ daregister(struct cam_periph *periph, void *arg)
softc->maxio = cpi.maxio;
softc->disk->d_maxsize = softc->maxio;
softc->disk->d_unit = periph->unit_number;
- softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION;
+ softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0)
softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
if ((cpi.hba_misc & PIM_UNMAPPED) != 0) {
@@ -2360,6 +2580,300 @@ daregister(struct cam_periph *periph, void *arg)
return(CAM_REQ_CMP);
}
+static int
+da_zone_bio_to_scsi(int disk_zone_cmd)
+{
+ switch (disk_zone_cmd) {
+ case DISK_ZONE_OPEN:
+ return ZBC_OUT_SA_OPEN;
+ case DISK_ZONE_CLOSE:
+ return ZBC_OUT_SA_CLOSE;
+ case DISK_ZONE_FINISH:
+ return ZBC_OUT_SA_FINISH;
+ case DISK_ZONE_RWP:
+ return ZBC_OUT_SA_RWP;
+ }
+
+ return -1;
+}
+
+static int
+da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
+ int *queue_ccb)
+{
+ struct da_softc *softc;
+ int error;
+
+ error = 0;
+
+ if (bp->bio_cmd != BIO_ZONE) {
+ error = EINVAL;
+ goto bailout;
+ }
+
+ softc = periph->softc;
+
+ switch (bp->bio_zone.zone_cmd) {
+ case DISK_ZONE_OPEN:
+ case DISK_ZONE_CLOSE:
+ case DISK_ZONE_FINISH:
+ case DISK_ZONE_RWP: {
+ int zone_flags;
+ int zone_sa;
+ uint64_t lba;
+
+ zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd);
+ if (zone_sa == -1) {
+ xpt_print(periph->path, "Cannot translate zone "
+ "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd);
+ error = EINVAL;
+ goto bailout;
+ }
+
+ zone_flags = 0;
+ lba = bp->bio_zone.zone_params.rwp.id;
+
+ if (bp->bio_zone.zone_params.rwp.flags &
+ DISK_ZONE_RWP_FLAG_ALL)
+ zone_flags |= ZBC_OUT_ALL;
+
+ if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
+ scsi_zbc_out(&ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbfcnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*service_action*/ zone_sa,
+ /*zone_id*/ lba,
+ /*zone_flags*/ zone_flags,
+ /*data_ptr*/ NULL,
+ /*dxfer_len*/ 0,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+ } else {
+ /*
+ * Note that in this case, even though we can
+ * technically use NCQ, we don't bother for several
+ * reasons:
+ * 1. It hasn't been tested on a SAT layer that
+ * supports it. This is new as of SAT-4.
+ * 2. Even when there is a SAT layer that supports
+ * it, that SAT layer will also probably support
+ * ZBC -> ZAC translation, since they are both
+ * in the SAT-4 spec.
+ * 3. Translation will likely be preferable to ATA
+ * passthrough. LSI / Avago at least single
+ * steps ATA passthrough commands in the HBA,
+ * regardless of protocol, so unless that
+ * changes, there is a performance penalty for
+ * doing ATA passthrough no matter whether
+ * you're using NCQ/FPDMA, DMA or PIO.
+ * 4. It requires a 32-byte CDB, which at least at
+ * this point in CAM requires a CDB pointer, which
+ * would require us to allocate an additional bit
+ * of storage separate from the CCB.
+ */
+ error = scsi_ata_zac_mgmt_out(&ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbfcnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*use_ncq*/ 0,
+ /*zm_action*/ zone_sa,
+ /*zone_id*/ lba,
+ /*zone_flags*/ zone_flags,
+ /*data_ptr*/ NULL,
+ /*dxfer_len*/ 0,
+ /*cdb_storage*/ NULL,
+ /*cdb_storage_len*/ 0,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+ if (error != 0) {
+ error = EINVAL;
+ xpt_print(periph->path,
+ "scsi_ata_zac_mgmt_out() returned an "
+ "error!");
+ goto bailout;
+ }
+ }
+ *queue_ccb = 1;
+
+ break;
+ }
+ case DISK_ZONE_REPORT_ZONES: {
+ uint8_t *rz_ptr;
+ uint32_t num_entries, alloc_size;
+ struct disk_zone_report *rep;
+
+ rep = &bp->bio_zone.zone_params.report;
+
+ num_entries = rep->entries_allocated;
+ if (num_entries == 0) {
+ xpt_print(periph->path, "No entries allocated for "
+ "Report Zones request\n");
+ error = EINVAL;
+ goto bailout;
+ }
+ alloc_size = sizeof(struct scsi_report_zones_hdr) +
+ (sizeof(struct scsi_report_zones_desc) * num_entries);
+ alloc_size = min(alloc_size, softc->disk->d_maxsize);
+ rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO);
+ if (rz_ptr == NULL) {
+ xpt_print(periph->path, "Unable to allocate memory "
+ "for Report Zones request\n");
+ error = ENOMEM;
+ goto bailout;
+ }
+
+ if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
+ scsi_zbc_in(&ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbcfnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*service_action*/ ZBC_IN_SA_REPORT_ZONES,
+ /*zone_start_lba*/ rep->starting_id,
+ /*zone_options*/ rep->rep_options,
+ /*data_ptr*/ rz_ptr,
+ /*dxfer_len*/ alloc_size,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+ } else {
+ /*
+ * Note that in this case, even though we can
+ * technically use NCQ, we don't bother for several
+ * reasons:
+ * 1. It hasn't been tested on a SAT layer that
+ * supports it. This is new as of SAT-4.
+ * 2. Even when there is a SAT layer that supports
+ * it, that SAT layer will also probably support
+ * ZBC -> ZAC translation, since they are both
+ * in the SAT-4 spec.
+ * 3. Translation will likely be preferable to ATA
+ * passthrough. LSI / Avago at least single
+ * steps ATA passthrough commands in the HBA,
+ * regardless of protocol, so unless that
+ * changes, there is a performance penalty for
+ * doing ATA passthrough no matter whether
+ * you're using NCQ/FPDMA, DMA or PIO.
+ * 4. It requires a 32-byte CDB, which at least at
+ * this point in CAM requires a CDB pointer, which
+ * would require us to allocate an additional bit
+ * of storage separate from the CCB.
+ */
+ error = scsi_ata_zac_mgmt_in(&ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbcfnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*use_ncq*/ 0,
+ /*zm_action*/ ATA_ZM_REPORT_ZONES,
+ /*zone_id*/ rep->starting_id,
+ /*zone_flags*/ rep->rep_options,
+ /*data_ptr*/ rz_ptr,
+ /*dxfer_len*/ alloc_size,
+ /*cdb_storage*/ NULL,
+ /*cdb_storage_len*/ 0,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+ if (error != 0) {
+ error = EINVAL;
+ xpt_print(periph->path,
+ "scsi_ata_zac_mgmt_in() returned an "
+ "error!");
+ goto bailout;
+ }
+ }
+
+ /*
+ * For BIO_ZONE, this isn't normally needed. However, it
+ * is used by devstat_end_transaction_bio() to determine
+ * how much data was transferred.
+ */
+ /*
+ * XXX KDM we have a problem. But I'm not sure how to fix
+ * it. devstat uses bio_bcount - bio_resid to calculate
+ * the amount of data transferred. The GEOM disk code
+ * uses bio_length - bio_resid to calculate the amount of
+ * data in bio_completed. We have different structure
+ * sizes above and below the ada(4) driver. So, if we
+ * use the sizes above, the amount transferred won't be
+ * quite accurate for devstat. If we use different sizes
+ * for bio_bcount and bio_length (above and below
+ * respectively), then the residual needs to match one or
+ * the other. Everything is calculated after the bio
+ * leaves the driver, so changing the values around isn't
+ * really an option. For now, just set the count to the
+ * passed in length. This means that the calculations
+ * above (e.g. bio_completed) will be correct, but the
+ * amount of data reported to devstat will be slightly
+ * under or overstated.
+ */
+ bp->bio_bcount = bp->bio_length;
+
+ *queue_ccb = 1;
+
+ break;
+ }
+ case DISK_ZONE_GET_PARAMS: {
+ struct disk_zone_disk_params *params;
+
+ params = &bp->bio_zone.zone_params.disk_params;
+ bzero(params, sizeof(*params));
+
+ switch (softc->zone_mode) {
+ case DA_ZONE_DRIVE_MANAGED:
+ params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
+ break;
+ case DA_ZONE_HOST_AWARE:
+ params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
+ break;
+ case DA_ZONE_HOST_MANAGED:
+ params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
+ break;
+ default:
+ case DA_ZONE_NONE:
+ params->zone_mode = DISK_ZONE_MODE_NONE;
+ break;
+ }
+
+ if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ)
+ params->flags |= DISK_ZONE_DISK_URSWRZ;
+
+ if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) {
+ params->optimal_seq_zones = softc->optimal_seq_zones;
+ params->flags |= DISK_ZONE_OPT_SEQ_SET;
+ }
+
+ if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) {
+ params->optimal_nonseq_zones =
+ softc->optimal_nonseq_zones;
+ params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
+ }
+
+ if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) {
+ params->max_seq_zones = softc->max_seq_zones;
+ params->flags |= DISK_ZONE_MAX_SEQ_SET;
+ }
+ if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP)
+ params->flags |= DISK_ZONE_RZ_SUP;
+
+ if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP)
+ params->flags |= DISK_ZONE_OPEN_SUP;
+
+ if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP)
+ params->flags |= DISK_ZONE_CLOSE_SUP;
+
+ if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP)
+ params->flags |= DISK_ZONE_FINISH_SUP;
+
+ if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP)
+ params->flags |= DISK_ZONE_RWP_SUP;
+ break;
+ }
+ default:
+ break;
+ }
+bailout:
+ return (error);
+}
+
static void
dastart(struct cam_periph *periph, union ccb *start_ccb)
{
@@ -2473,6 +2987,20 @@ more:
SSD_FULL_SIZE,
da_default_timeout*1000);
break;
+ case BIO_ZONE: {
+ int error, queue_ccb;
+
+ queue_ccb = 0;
+
+ error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb);
+ if ((error != 0)
+ || (queue_ccb == 0)) {
+ biofinish(bp, NULL, error);
+ xpt_release_ccb(start_ccb);
+ return;
+ }
+ break;
+ }
}
start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
start_ccb->ccb_h.flags |= CAM_UNLOCKED;
@@ -2663,15 +3191,28 @@ out:
struct ata_params *ata_params;
if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
+ if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
+ || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
+ /*
+ * Note that if the ATA VPD page isn't
+ * supported, we aren't talking to an ATA
+ * device anyway. Support for that VPD
+ * page is mandatory for SCSI to ATA (SAT)
+ * translation layers.
+ */
+ softc->state = DA_STATE_PROBE_ZONE;
+ goto skipstate;
+ }
daprobedone(periph, start_ccb);
break;
}
ata_params = (struct ata_params*)
- malloc(sizeof(*ata_params), M_SCSIDA, M_NOWAIT|M_ZERO);
+ malloc(sizeof(*ata_params), M_SCSIDA,M_NOWAIT|M_ZERO);
if (ata_params == NULL) {
- printf("dastart: Couldn't malloc ata_params data\n");
+ xpt_print(periph->path, "Couldn't malloc ata_params "
+ "data\n");
/* da_free_periph??? */
break;
}
@@ -2689,6 +3230,252 @@ out:
xpt_action(start_ccb);
break;
}
+ case DA_STATE_PROBE_ATA_LOGDIR:
+ {
+ struct ata_gp_log_dir *log_dir;
+ int retval;
+
+ retval = 0;
+
+ if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) {
+ /*
+ * If we don't have log support, not much point in
+ * trying to probe zone support.
+ */
+ daprobedone(periph, start_ccb);
+ break;
+ }
+
+ /*
+ * If we have an ATA device (the SCSI ATA Information VPD
+ * page should be present and the ATA identify should have
+ * succeeded) and it supports logs, ask for the log directory.
+ */
+
+ log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO);
+ if (log_dir == NULL) {
+ xpt_print(periph->path, "Couldn't malloc log_dir "
+ "data\n");
+ daprobedone(periph, start_ccb);
+ break;
+ }
+
+ retval = scsi_ata_read_log(&start_ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbfcnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*log_address*/ ATA_LOG_DIRECTORY,
+ /*page_number*/ 0,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
+ AP_PROTO_DMA : AP_PROTO_PIO_IN,
+ /*data_ptr*/ (uint8_t *)log_dir,
+ /*dxfer_len*/ sizeof(*log_dir),
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+
+ if (retval != 0) {
+ xpt_print(periph->path, "scsi_ata_read_log() failed!");
+ free(log_dir, M_SCSIDA);
+ daprobedone(periph, start_ccb);
+ break;
+ }
+ start_ccb->ccb_h.ccb_bp = NULL;
+ start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR;
+ xpt_action(start_ccb);
+ break;
+ }
+ case DA_STATE_PROBE_ATA_IDDIR:
+ {
+ struct ata_identify_log_pages *id_dir;
+ int retval;
+
+ retval = 0;
+
+ /*
+ * Check here to see whether the Identify Device log is
+ * supported in the directory of logs. If so, continue
+ * with requesting the log of identify device pages.
+ */
+ if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) {
+ daprobedone(periph, start_ccb);
+ break;
+ }
+
+ id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO);
+ if (id_dir == NULL) {
+ xpt_print(periph->path, "Couldn't malloc id_dir "
+ "data\n");
+ daprobedone(periph, start_ccb);
+ break;
+ }
+
+ retval = scsi_ata_read_log(&start_ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbfcnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*log_address*/ ATA_IDENTIFY_DATA_LOG,
+ /*page_number*/ ATA_IDL_PAGE_LIST,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
+ AP_PROTO_DMA : AP_PROTO_PIO_IN,
+ /*data_ptr*/ (uint8_t *)id_dir,
+ /*dxfer_len*/ sizeof(*id_dir),
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+
+ if (retval != 0) {
+ xpt_print(periph->path, "scsi_ata_read_log() failed!");
+ free(id_dir, M_SCSIDA);
+ daprobedone(periph, start_ccb);
+ break;
+ }
+ start_ccb->ccb_h.ccb_bp = NULL;
+ start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR;
+ xpt_action(start_ccb);
+ break;
+ }
+ case DA_STATE_PROBE_ATA_SUP:
+ {
+ struct ata_identify_log_sup_cap *sup_cap;
+ int retval;
+
+ retval = 0;
+
+ /*
+ * Check here to see whether the Supported Capabilities log
+ * is in the list of Identify Device logs.
+ */
+ if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) {
+ daprobedone(periph, start_ccb);
+ break;
+ }
+
+ sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO);
+ if (sup_cap == NULL) {
+ xpt_print(periph->path, "Couldn't malloc sup_cap "
+ "data\n");
+ daprobedone(periph, start_ccb);
+ break;
+ }
+
+ retval = scsi_ata_read_log(&start_ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbfcnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*log_address*/ ATA_IDENTIFY_DATA_LOG,
+ /*page_number*/ ATA_IDL_SUP_CAP,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
+ AP_PROTO_DMA : AP_PROTO_PIO_IN,
+ /*data_ptr*/ (uint8_t *)sup_cap,
+ /*dxfer_len*/ sizeof(*sup_cap),
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+
+ if (retval != 0) {
+ xpt_print(periph->path, "scsi_ata_read_log() failed!");
+ free(sup_cap, M_SCSIDA);
+ daprobedone(periph, start_ccb);
+ break;
+
+ }
+
+ start_ccb->ccb_h.ccb_bp = NULL;
+ start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP;
+ xpt_action(start_ccb);
+ break;
+ }
+ case DA_STATE_PROBE_ATA_ZONE:
+ {
+ struct ata_zoned_info_log *ata_zone;
+ int retval;
+
+ retval = 0;
+
+ /*
+ * Check here to see whether the zoned device information
+ * page is supported. If so, continue on to request it.
+ * If not, skip to DA_STATE_PROBE_LOG or done.
+ */
+ if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) {
+ daprobedone(periph, start_ccb);
+ break;
+ }
+ ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA,
+ M_NOWAIT|M_ZERO);
+ if (ata_zone == NULL) {
+ xpt_print(periph->path, "Couldn't malloc ata_zone "
+ "data\n");
+ daprobedone(periph, start_ccb);
+ break;
+ }
+
+ retval = scsi_ata_read_log(&start_ccb->csio,
+ /*retries*/ da_retry_count,
+ /*cbfcnp*/ dadone,
+ /*tag_action*/ MSG_SIMPLE_Q_TAG,
+ /*log_address*/ ATA_IDENTIFY_DATA_LOG,
+ /*page_number*/ ATA_IDL_ZDI,
+ /*block_count*/ 1,
+ /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
+ AP_PROTO_DMA : AP_PROTO_PIO_IN,
+ /*data_ptr*/ (uint8_t *)ata_zone,
+ /*dxfer_len*/ sizeof(*ata_zone),
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ da_default_timeout * 1000);
+
+ if (retval != 0) {
+ xpt_print(periph->path, "scsi_ata_read_log() failed!");
+ free(ata_zone, M_SCSIDA);
+ daprobedone(periph, start_ccb);
+ break;
+ }
+ start_ccb->ccb_h.ccb_bp = NULL;
+ start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE;
+ xpt_action(start_ccb);
+
+ break;
+ }
+ case DA_STATE_PROBE_ZONE:
+ {
+ struct scsi_vpd_zoned_bdc *bdc;
+
+ /*
+ * Note that this page will be supported for SCSI protocol
+ * devices that support ZBC (SMR devices), as well as ATA
+ * protocol devices that are behind a SAT (SCSI to ATA
+ * Translation) layer that supports converting ZBC commands
+ * to their ZAC equivalents.
+ */
+ if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) {
+ daprobedone(periph, start_ccb);
+ break;
+ }
+ bdc = (struct scsi_vpd_zoned_bdc *)
+ malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
+
+ if (bdc == NULL) {
+ xpt_release_ccb(start_ccb);
+ xpt_print(periph->path, "Couldn't malloc zone VPD "
+ "data\n");
+ break;
+ }
+ scsi_inquiry(&start_ccb->csio,
+ /*retries*/da_retry_count,
+ /*cbfcnp*/dadone,
+ /*tag_action*/MSG_SIMPLE_Q_TAG,
+ /*inq_buf*/(u_int8_t *)bdc,
+ /*inq_len*/sizeof(*bdc),
+ /*evpd*/TRUE,
+ /*page_code*/SVPD_ZONED_BDC,
+ /*sense_len*/SSD_FULL_SIZE,
+ /*timeout*/da_default_timeout * 1000);
+ start_ccb->ccb_h.ccb_bp = NULL;
+ start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE;
+ xpt_action(start_ccb);
+ break;
+ }
}
}
@@ -3053,6 +3840,153 @@ cmd6workaround(union ccb *ccb)
}
static void
+dazonedone(struct cam_periph *periph, union ccb *ccb)
+{
+ struct da_softc *softc;
+ struct bio *bp;
+
+ softc = periph->softc;
+ bp = (struct bio *)ccb->ccb_h.ccb_bp;
+
+ switch (bp->bio_zone.zone_cmd) {
+ case DISK_ZONE_OPEN:
+ case DISK_ZONE_CLOSE:
+ case DISK_ZONE_FINISH:
+ case DISK_ZONE_RWP:
+ break;
+ case DISK_ZONE_REPORT_ZONES: {
+ uint32_t avail_len;
+ struct disk_zone_report *rep;
+ struct scsi_report_zones_hdr *hdr;
+ struct scsi_report_zones_desc *desc;
+ struct disk_zone_rep_entry *entry;
+ uint32_t num_alloced, hdr_len, num_avail;
+ uint32_t num_to_fill, i;
+ int ata;
+
+ rep = &bp->bio_zone.zone_params.report;
+ avail_len = ccb->csio.dxfer_len - ccb->csio.resid;
+ /*
+ * Note that bio_resid isn't normally used for zone
+ * commands, but it is used by devstat_end_transaction_bio()
+ * to determine how much data was transferred. Because
+ * the size of the SCSI/ATA data structures is different
+ * than the size of the BIO interface structures, the
+ * amount of data actually transferred from the drive will
+ * be different than the amount of data transferred to
+ * the user.
+ */
+ bp->bio_resid = ccb->csio.resid;
+ num_alloced = rep->entries_allocated;
+ hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr;
+ if (avail_len < sizeof(*hdr)) {
+ /*
+ * Is there a better error than EIO here? We asked
+ * for at least the header, and we got less than
+ * that.
+ */
+ bp->bio_error = EIO;
+ bp->bio_flags |= BIO_ERROR;
+ bp->bio_resid = bp->bio_bcount;
+ break;
+ }
+
+ if (softc->zone_interface == DA_ZONE_IF_ATA_PASS)
+ ata = 1;
+ else
+ ata = 0;
+
+ hdr_len = ata ? le32dec(hdr->length) :
+ scsi_4btoul(hdr->length);
+ if (hdr_len > 0)
+ rep->entries_available = hdr_len / sizeof(*desc);
+ else
+ rep->entries_available = 0;
+ /*
+ * NOTE: using the same values for the BIO version of the
+ * same field as the SCSI/ATA values. This means we could
+ * get some additional values that aren't defined in bio.h
+ * if more values of the same field are defined later.
+ */
+ rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
+ rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) :
+ scsi_8btou64(hdr->maximum_lba);
+ /*
+ * If the drive reports no entries that match the query,
+ * we're done.
+ */
+ if (hdr_len == 0) {
+ rep->entries_filled = 0;
+ break;
+ }
+
+ num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
+ hdr_len / sizeof(*desc));
+ /*
+ * If the drive didn't return any data, then we're done.
+ */
+ if (num_avail == 0) {
+ rep->entries_filled = 0;
+ break;
+ }
+
+ num_to_fill = min(num_avail, rep->entries_allocated);
+ /*
+ * If the user didn't allocate any entries for us to fill,
+ * we're done.
+ */
+ if (num_to_fill == 0) {
+ rep->entries_filled = 0;
+ break;
+ }
+
+ for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
+ i < num_to_fill; i++, desc++, entry++) {
+ /*
+ * NOTE: we're mapping the values here directly
+ * from the SCSI/ATA bit definitions to the bio.h
+ * definitons. There is also a warning in
+ * disk_zone.h, but the impact is that if
+ * additional values are added in the SCSI/ATA
+ * specs these will be visible to consumers of
+ * this interface.
+ */
+ entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
+ entry->zone_condition =
+ (desc->zone_flags & SRZ_ZONE_COND_MASK) >>
+ SRZ_ZONE_COND_SHIFT;
+ entry->zone_flags |= desc->zone_flags &
+ (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET);
+ entry->zone_length =
+ ata ? le64dec(desc->zone_length) :
+ scsi_8btou64(desc->zone_length);
+ entry->zone_start_lba =
+ ata ? le64dec(desc->zone_start_lba) :
+ scsi_8btou64(desc->zone_start_lba);
+ entry->write_pointer_lba =
+ ata ? le64dec(desc->write_pointer_lba) :
+ scsi_8btou64(desc->write_pointer_lba);
+ }
+ rep->entries_filled = num_to_fill;
+ break;
+ }
+ case DISK_ZONE_GET_PARAMS:
+ default:
+ /*
+ * In theory we should not get a GET_PARAMS bio, since it
+ * should be handled without queueing the command to the
+ * drive.
+ */
+ panic("%s: Invalid zone command %d", __func__,
+ bp->bio_zone.zone_cmd);
+ break;
+ }
+
+ if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
+ free(ccb->csio.data_ptr, M_SCSIDA);
+}
+
+static void
dadone(struct cam_periph *periph, union ccb *done_ccb)
{
struct da_softc *softc;
@@ -3147,11 +4081,14 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
} else if (bp != NULL) {
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
panic("REQ_CMP with QFRZN");
- if (state == DA_CCB_DELETE)
+ if (bp->bio_cmd == BIO_ZONE)
+ dazonedone(periph, done_ccb);
+ else if (state == DA_CCB_DELETE)
bp->bio_resid = 0;
else
bp->bio_resid = csio->resid;
- if (csio->resid > 0)
+ if ((csio->resid > 0)
+ && (bp->bio_cmd != BIO_ZONE))
bp->bio_flags |= BIO_ERROR;
if (softc->error_inject != 0) {
bp->bio_error = softc->error_inject;
@@ -3569,27 +4506,69 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
}
case DA_CCB_PROBE_BDC:
{
- struct scsi_vpd_block_characteristics *bdc;
+ struct scsi_vpd_block_device_characteristics *bdc;
- bdc = (struct scsi_vpd_block_characteristics *)csio->data_ptr;
+ bdc = (struct scsi_vpd_block_device_characteristics *)
+ csio->data_ptr;
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ uint32_t valid_len;
+
/*
* Disable queue sorting for non-rotational media
* by default.
*/
u_int16_t old_rate = softc->disk->d_rotation_rate;
- softc->disk->d_rotation_rate =
- scsi_2btoul(bdc->medium_rotation_rate);
- if (softc->disk->d_rotation_rate ==
- SVPD_BDC_RATE_NON_ROTATING) {
- cam_iosched_set_sort_queue(softc->cam_iosched, 0);
- softc->rotating = 0;
+ valid_len = csio->dxfer_len - csio->resid;
+ if (SBDC_IS_PRESENT(bdc, valid_len,
+ medium_rotation_rate)) {
+ softc->disk->d_rotation_rate =
+ scsi_2btoul(bdc->medium_rotation_rate);
+ if (softc->disk->d_rotation_rate ==
+ SVPD_BDC_RATE_NON_ROTATING) {
+ cam_iosched_set_sort_queue(
+ softc->cam_iosched, 0);
+ softc->rotating = 0;
+ }
+ if (softc->disk->d_rotation_rate != old_rate) {
+ disk_attr_changed(softc->disk,
+ "GEOM::rotation_rate", M_NOWAIT);
+ }
}
- if (softc->disk->d_rotation_rate != old_rate) {
- disk_attr_changed(softc->disk,
- "GEOM::rotation_rate", M_NOWAIT);
+ if ((SBDC_IS_PRESENT(bdc, valid_len, flags))
+ && (softc->zone_mode == DA_ZONE_NONE)) {
+ int ata_proto;
+
+ if (scsi_vpd_supported_page(periph,
+ SVPD_ATA_INFORMATION))
+ ata_proto = 1;
+ else
+ ata_proto = 0;
+
+ /*
+ * The Zoned field will only be set for
+ * Drive Managed and Host Aware drives. If
+ * they are Host Managed, the device type
+ * in the standard INQUIRY data should be
+ * set to T_ZBC_HM (0x14).
+ */
+ if ((bdc->flags & SVPD_ZBC_MASK) ==
+ SVPD_HAW_ZBC) {
+ softc->zone_mode = DA_ZONE_HOST_AWARE;
+ softc->zone_interface = (ata_proto) ?
+ DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
+ } else if ((bdc->flags & SVPD_ZBC_MASK) ==
+ SVPD_DM_ZBC) {
+ softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
+ softc->zone_interface = (ata_proto) ?
+ DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
+ } else if ((bdc->flags & SVPD_ZBC_MASK) !=
+ SVPD_ZBC_NR) {
+ xpt_print(periph->path, "Unknown zoned "
+ "type %#x",
+ bdc->flags & SVPD_ZBC_MASK);
+ }
}
} else {
int error;
@@ -3619,10 +4598,14 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
{
int i;
struct ata_params *ata_params;
+ int continue_probe;
+ int error;
int16_t *ptr;
ata_params = (struct ata_params *)csio->data_ptr;
ptr = (uint16_t *)ata_params;
+ continue_probe = 0;
+ error = 0;
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
uint16_t old_rate;
@@ -3654,14 +4637,59 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
disk_attr_changed(softc->disk,
"GEOM::rotation_rate", M_NOWAIT);
}
+
+ if (ata_params->capabilities1 & ATA_SUPPORT_DMA)
+ softc->flags |= DA_FLAG_CAN_ATA_DMA;
+
+ if (ata_params->support.extension &
+ ATA_SUPPORT_GENLOG)
+ softc->flags |= DA_FLAG_CAN_ATA_LOG;
+
+ /*
+ * At this point, if we have a SATA host aware drive,
+ * we communicate via ATA passthrough unless the
+ * SAT layer supports ZBC -> ZAC translation. In
+ * that case,
+ */
+ /*
+ * XXX KDM figure out how to detect a host managed
+ * SATA drive.
+ */
+ if (softc->zone_mode == DA_ZONE_NONE) {
+ /*
+ * Note that we don't override the zone
+ * mode or interface if it has already been
+ * set. This is because it has either been
+ * set as a quirk, or when we probed the
+ * SCSI Block Device Characteristics page,
+ * the zoned field was set. The latter
+ * means that the SAT layer supports ZBC to
+ * ZAC translation, and we would prefer to
+ * use that if it is available.
+ */
+ if ((ata_params->support3 &
+ ATA_SUPPORT_ZONE_MASK) ==
+ ATA_SUPPORT_ZONE_HOST_AWARE) {
+ softc->zone_mode = DA_ZONE_HOST_AWARE;
+ softc->zone_interface =
+ DA_ZONE_IF_ATA_PASS;
+ } else if ((ata_params->support3 &
+ ATA_SUPPORT_ZONE_MASK) ==
+ ATA_SUPPORT_ZONE_DEV_MANAGED) {
+ softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
+ softc->zone_interface =
+ DA_ZONE_IF_ATA_PASS;
+ }
+ }
+
} else {
- int error;
error = daerror(done_ccb, CAM_RETRY_SELTO,
SF_RETRY_UA|SF_NO_PRINT);
if (error == ERESTART)
return;
else if (error != 0) {
- if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
/* Don't wedge this device's queue */
cam_release_devq(done_ccb->ccb_h.path,
/*relsim_flags*/0,
@@ -3673,6 +4701,454 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
}
free(ata_params, M_SCSIDA);
+ if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
+ || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
+ /*
+ * If the ATA IDENTIFY failed, we could be talking
+ * to a SCSI drive, although that seems unlikely,
+ * since the drive did report that it supported the
+ * ATA Information VPD page. If the ATA IDENTIFY
+ * succeeded, and the SAT layer doesn't support
+ * ZBC -> ZAC translation, continue on to get the
+ * directory of ATA logs, and complete the rest of
+ * the ZAC probe. If the SAT layer does support
+ * ZBC -> ZAC translation, we want to use that,
+ * and we'll probe the SCSI Zoned Block Device
+ * Characteristics VPD page next.
+ */
+ if ((error == 0)
+ && (softc->flags & DA_FLAG_CAN_ATA_LOG)
+ && (softc->zone_interface == DA_ZONE_IF_ATA_PASS))
+ softc->state = DA_STATE_PROBE_ATA_LOGDIR;
+ else
+ softc->state = DA_STATE_PROBE_ZONE;
+ continue_probe = 1;
+ }
+ if (continue_probe != 0) {
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ } else
+ daprobedone(periph, done_ccb);
+ return;
+ }
+ case DA_CCB_PROBE_ATA_LOGDIR:
+ {
+ int error;
+
+ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ error = 0;
+ softc->valid_logdir_len = 0;
+ bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
+ softc->valid_logdir_len =
+ csio->dxfer_len - csio->resid;
+ if (softc->valid_logdir_len > 0)
+ bcopy(csio->data_ptr, &softc->ata_logdir,
+ min(softc->valid_logdir_len,
+ sizeof(softc->ata_logdir)));
+ /*
+ * Figure out whether the Identify Device log is
+ * supported. The General Purpose log directory
+ * has a header, and lists the number of pages
+ * available for each GP log identified by the
+ * offset into the list.
+ */
+ if ((softc->valid_logdir_len >=
+ ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
+ && (le16dec(softc->ata_logdir.header) ==
+ ATA_GP_LOG_DIR_VERSION)
+ && (le16dec(&softc->ata_logdir.num_pages[
+ (ATA_IDENTIFY_DATA_LOG *
+ sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
+ softc->flags |= DA_FLAG_CAN_ATA_IDLOG;
+ } else {
+ softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
+ }
+ } else {
+ error = daerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ /*
+ * If we can't get the ATA log directory,
+ * then ATA logs are effectively not
+ * supported even if the bit is set in the
+ * identify data.
+ */
+ softc->flags &= ~(DA_FLAG_CAN_ATA_LOG |
+ DA_FLAG_CAN_ATA_IDLOG);
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+ }
+
+ free(csio->data_ptr, M_SCSIDA);
+
+ if ((error == 0)
+ && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) {
+ softc->state = DA_STATE_PROBE_ATA_IDDIR;
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ }
+ daprobedone(periph, done_ccb);
+ return;
+ }
+ case DA_CCB_PROBE_ATA_IDDIR:
+ {
+ int error;
+
+ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ off_t entries_offset, max_entries;
+ error = 0;
+
+ softc->valid_iddir_len = 0;
+ bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
+ softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP |
+ DA_FLAG_CAN_ATA_ZONE);
+ softc->valid_iddir_len =
+ csio->dxfer_len - csio->resid;
+ if (softc->valid_iddir_len > 0)
+ bcopy(csio->data_ptr, &softc->ata_iddir,
+ min(softc->valid_iddir_len,
+ sizeof(softc->ata_iddir)));
+
+ entries_offset =
+ __offsetof(struct ata_identify_log_pages,entries);
+ max_entries = softc->valid_iddir_len - entries_offset;
+ if ((softc->valid_iddir_len > (entries_offset + 1))
+ && (le64dec(softc->ata_iddir.header) ==
+ ATA_IDLOG_REVISION)
+ && (softc->ata_iddir.entry_count > 0)) {
+ int num_entries, i;
+
+ num_entries = softc->ata_iddir.entry_count;
+ num_entries = min(num_entries,
+ softc->valid_iddir_len - entries_offset);
+ for (i = 0; i < num_entries &&
+ i < max_entries; i++) {
+ if (softc->ata_iddir.entries[i] ==
+ ATA_IDL_SUP_CAP)
+ softc->flags |=
+ DA_FLAG_CAN_ATA_SUPCAP;
+ else if (softc->ata_iddir.entries[i]==
+ ATA_IDL_ZDI)
+ softc->flags |=
+ DA_FLAG_CAN_ATA_ZONE;
+
+ if ((softc->flags &
+ DA_FLAG_CAN_ATA_SUPCAP)
+ && (softc->flags &
+ DA_FLAG_CAN_ATA_ZONE))
+ break;
+ }
+ }
+ } else {
+ error = daerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ /*
+ * If we can't get the ATA Identify Data log
+ * directory, then it effectively isn't
+ * supported even if the ATA Log directory
+ * a non-zero number of pages present for
+ * this log.
+ */
+ softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+ }
+
+ free(csio->data_ptr, M_SCSIDA);
+
+ if ((error == 0)
+ && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) {
+ softc->state = DA_STATE_PROBE_ATA_SUP;
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ }
+ daprobedone(periph, done_ccb);
+ return;
+ }
+ case DA_CCB_PROBE_ATA_SUP:
+ {
+ int error;
+
+ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ uint32_t valid_len;
+ size_t needed_size;
+ struct ata_identify_log_sup_cap *sup_cap;
+ error = 0;
+
+ sup_cap = (struct ata_identify_log_sup_cap *)
+ csio->data_ptr;
+ valid_len = csio->dxfer_len - csio->resid;
+ needed_size =
+ __offsetof(struct ata_identify_log_sup_cap,
+ sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
+ if (valid_len >= needed_size) {
+ uint64_t zoned, zac_cap;
+
+ zoned = le64dec(sup_cap->zoned_cap);
+ if (zoned & ATA_ZONED_VALID) {
+ /*
+ * This should have already been
+ * set, because this is also in the
+ * ATA identify data.
+ */
+ if ((zoned & ATA_ZONED_MASK) ==
+ ATA_SUPPORT_ZONE_HOST_AWARE)
+ softc->zone_mode =
+ DA_ZONE_HOST_AWARE;
+ else if ((zoned & ATA_ZONED_MASK) ==
+ ATA_SUPPORT_ZONE_DEV_MANAGED)
+ softc->zone_mode =
+ DA_ZONE_DRIVE_MANAGED;
+ }
+
+ zac_cap = le64dec(sup_cap->sup_zac_cap);
+ if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
+ if (zac_cap & ATA_REPORT_ZONES_SUP)
+ softc->zone_flags |=
+ DA_ZONE_FLAG_RZ_SUP;
+ if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
+ softc->zone_flags |=
+ DA_ZONE_FLAG_OPEN_SUP;
+ if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
+ softc->zone_flags |=
+ DA_ZONE_FLAG_CLOSE_SUP;
+ if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
+ softc->zone_flags |=
+ DA_ZONE_FLAG_FINISH_SUP;
+ if (zac_cap & ATA_ND_RWP_SUP)
+ softc->zone_flags |=
+ DA_ZONE_FLAG_RWP_SUP;
+ } else {
+ /*
+ * This field was introduced in
+ * ACS-4, r08 on April 28th, 2015.
+ * If the drive firmware was written
+ * to an earlier spec, it won't have
+ * the field. So, assume all
+ * commands are supported.
+ */
+ softc->zone_flags |=
+ DA_ZONE_FLAG_SUP_MASK;
+ }
+
+ }
+ } else {
+ error = daerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ /*
+ * If we can't get the ATA Identify Data
+ * Supported Capabilities page, clear the
+ * flag...
+ */
+ softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP;
+ /*
+ * And clear zone capabilities.
+ */
+ softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK;
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+ }
+
+ free(csio->data_ptr, M_SCSIDA);
+
+ if ((error == 0)
+ && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) {
+ softc->state = DA_STATE_PROBE_ATA_ZONE;
+ xpt_release_ccb(done_ccb);
+ xpt_schedule(periph, priority);
+ return;
+ }
+ daprobedone(periph, done_ccb);
+ return;
+ }
+ case DA_CCB_PROBE_ATA_ZONE:
+ {
+ int error;
+
+ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ struct ata_zoned_info_log *zi_log;
+ uint32_t valid_len;
+ size_t needed_size;
+
+ zi_log = (struct ata_zoned_info_log *)csio->data_ptr;
+
+ valid_len = csio->dxfer_len - csio->resid;
+ needed_size = __offsetof(struct ata_zoned_info_log,
+ version_info) + 1 + sizeof(zi_log->version_info);
+ if (valid_len >= needed_size) {
+ uint64_t tmpvar;
+
+ tmpvar = le64dec(zi_log->zoned_cap);
+ if (tmpvar & ATA_ZDI_CAP_VALID) {
+ if (tmpvar & ATA_ZDI_CAP_URSWRZ)
+ softc->zone_flags |=
+ DA_ZONE_FLAG_URSWRZ;
+ else
+ softc->zone_flags &=
+ ~DA_ZONE_FLAG_URSWRZ;
+ }
+ tmpvar = le64dec(zi_log->optimal_seq_zones);
+ if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
+ softc->zone_flags |=
+ DA_ZONE_FLAG_OPT_SEQ_SET;
+ softc->optimal_seq_zones = (tmpvar &
+ ATA_ZDI_OPT_SEQ_MASK);
+ } else {
+ softc->zone_flags &=
+ ~DA_ZONE_FLAG_OPT_SEQ_SET;
+ softc->optimal_seq_zones = 0;
+ }
+
+ tmpvar =le64dec(zi_log->optimal_nonseq_zones);
+ if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
+ softc->zone_flags |=
+ DA_ZONE_FLAG_OPT_NONSEQ_SET;
+ softc->optimal_nonseq_zones =
+ (tmpvar & ATA_ZDI_OPT_NS_MASK);
+ } else {
+ softc->zone_flags &=
+ ~DA_ZONE_FLAG_OPT_NONSEQ_SET;
+ softc->optimal_nonseq_zones = 0;
+ }
+
+ tmpvar = le64dec(zi_log->max_seq_req_zones);
+ if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
+ softc->zone_flags |=
+ DA_ZONE_FLAG_MAX_SEQ_SET;
+ softc->max_seq_zones =
+ (tmpvar & ATA_ZDI_MAX_SEQ_MASK);
+ } else {
+ softc->zone_flags &=
+ ~DA_ZONE_FLAG_MAX_SEQ_SET;
+ softc->max_seq_zones = 0;
+ }
+ }
+ } else {
+ error = daerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ softc->flags &= ~DA_FLAG_CAN_ATA_ZONE;
+ softc->flags &= ~DA_ZONE_FLAG_SET_MASK;
+
+ if ((done_ccb->ccb_h.status &
+ CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+
+ }
+ free(csio->data_ptr, M_SCSIDA);
+
+ daprobedone(periph, done_ccb);
+ return;
+ }
+ case DA_CCB_PROBE_ZONE:
+ {
+ int error;
+
+ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
+ uint32_t valid_len;
+ size_t needed_len;
+ struct scsi_vpd_zoned_bdc *zoned_bdc;
+
+ error = 0;
+ zoned_bdc = (struct scsi_vpd_zoned_bdc *)
+ csio->data_ptr;
+ valid_len = csio->dxfer_len - csio->resid;
+ needed_len = __offsetof(struct scsi_vpd_zoned_bdc,
+ max_seq_req_zones) + 1 +
+ sizeof(zoned_bdc->max_seq_req_zones);
+ if ((valid_len >= needed_len)
+ && (scsi_2btoul(zoned_bdc->page_length) >=
+ SVPD_ZBDC_PL)) {
+ if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ)
+ softc->zone_flags |=
+ DA_ZONE_FLAG_URSWRZ;
+ else
+ softc->zone_flags &=
+ ~DA_ZONE_FLAG_URSWRZ;
+ softc->optimal_seq_zones =
+ scsi_4btoul(zoned_bdc->optimal_seq_zones);
+ softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
+ softc->optimal_nonseq_zones = scsi_4btoul(
+ zoned_bdc->optimal_nonseq_zones);
+ softc->zone_flags |=
+ DA_ZONE_FLAG_OPT_NONSEQ_SET;
+ softc->max_seq_zones =
+ scsi_4btoul(zoned_bdc->max_seq_req_zones);
+ softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
+ }
+ /*
+ * All of the zone commands are mandatory for SCSI
+ * devices.
+ *
+ * XXX KDM this is valid as of September 2015.
+ * Re-check this assumption once the SAT spec is
+ * updated to support SCSI ZBC to ATA ZAC mapping.
+ * Since ATA allows zone commands to be reported
+ * as supported or not, this may not necessarily
+ * be true for an ATA device behind a SAT (SCSI to
+ * ATA Translation) layer.
+ */
+ softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
+ } else {
+ error = daerror(done_ccb, CAM_RETRY_SELTO,
+ SF_RETRY_UA|SF_NO_PRINT);
+ if (error == ERESTART)
+ return;
+ else if (error != 0) {
+ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge this device's queue */
+ cam_release_devq(done_ccb->ccb_h.path,
+ /*relsim_flags*/0,
+ /*reduction*/0,
+ /*timeout*/0,
+ /*getcount_only*/0);
+ }
+ }
+ }
daprobedone(periph, done_ccb);
return;
}
@@ -4167,3 +5643,253 @@ scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries,
}
#endif /* _KERNEL */
+
+void
+scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, uint8_t service_action, uint64_t zone_id,
+ uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t sense_len, uint32_t timeout)
+{
+ struct scsi_zbc_out *scsi_cmd;
+
+ scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes;
+ scsi_cmd->opcode = ZBC_OUT;
+ scsi_cmd->service_action = service_action;
+ scsi_u64to8b(zone_id, scsi_cmd->zone_id);
+ scsi_cmd->zone_flags = zone_flags;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
+ tag_action,
+ data_ptr,
+ dxfer_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+void
+scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba,
+ uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t sense_len, uint32_t timeout)
+{
+ struct scsi_zbc_in *scsi_cmd;
+
+ scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes;
+ scsi_cmd->opcode = ZBC_IN;
+ scsi_cmd->service_action = service_action;
+ scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba);
+ scsi_cmd->zone_options = zone_options;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE,
+ tag_action,
+ data_ptr,
+ dxfer_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+}
+
+int
+scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int use_ncq,
+ uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
+ uint8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t *cdb_storage, size_t cdb_storage_len,
+ uint8_t sense_len, uint32_t timeout)
+{
+ uint8_t command_out, protocol, ata_flags;
+ uint16_t features_out;
+ uint32_t sectors_out, auxiliary;
+ int retval;
+
+ retval = 0;
+
+ if (use_ncq == 0) {
+ command_out = ATA_ZAC_MANAGEMENT_OUT;
+ features_out = (zm_action & 0xf) | (zone_flags << 8),
+ ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
+ if (dxfer_len == 0) {
+ protocol = AP_PROTO_NON_DATA;
+ ata_flags |= AP_FLAG_TLEN_NO_DATA;
+ sectors_out = 0;
+ } else {
+ protocol = AP_PROTO_DMA;
+ ata_flags |= AP_FLAG_TLEN_SECT_CNT |
+ AP_FLAG_TDIR_TO_DEV;
+ sectors_out = ((dxfer_len >> 9) & 0xffff);
+ }
+ auxiliary = 0;
+ } else {
+ ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
+ if (dxfer_len == 0) {
+ command_out = ATA_NCQ_NON_DATA;
+ features_out = ATA_NCQ_ZAC_MGMT_OUT;
+ /*
+ * We're assuming the SCSI to ATA translation layer
+ * will set the NCQ tag number in the tag field.
+ * That isn't clear from the SAT-4 spec (as of rev 05).
+ */
+ sectors_out = 0;
+ ata_flags |= AP_FLAG_TLEN_NO_DATA;
+ } else {
+ command_out = ATA_SEND_FPDMA_QUEUED;
+ /*
+ * Note that we're defaulting to normal priority,
+ * and assuming that the SCSI to ATA translation
+ * layer will insert the NCQ tag number in the tag
+ * field. That isn't clear in the SAT-4 spec (as
+ * of rev 05).
+ */
+ sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8;
+
+ ata_flags |= AP_FLAG_TLEN_FEAT |
+ AP_FLAG_TDIR_TO_DEV;
+
+ /*
+ * For SEND FPDMA QUEUED, the transfer length is
+ * encoded in the FEATURE register, and 0 means
+ * that 65536 512 byte blocks are to be tranferred.
+ * In practice, it seems unlikely that we'll see
+ * a transfer that large, and it may confuse the
+ * the SAT layer, because generally that means that
+ * 0 bytes should be transferred.
+ */
+ if (dxfer_len == (65536 * 512)) {
+ features_out = 0;
+ } else if (dxfer_len <= (65535 * 512)) {
+ features_out = ((dxfer_len >> 9) & 0xffff);
+ } else {
+ /* The transfer is too big. */
+ retval = 1;
+ goto bailout;
+ }
+
+ }
+
+ auxiliary = (zm_action & 0xf) | (zone_flags << 8);
+ protocol = AP_PROTO_FPDMA;
+ }
+
+ protocol |= AP_EXTEND;
+
+ retval = scsi_ata_pass(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
+ tag_action,
+ /*protocol*/ protocol,
+ /*ata_flags*/ ata_flags,
+ /*features*/ features_out,
+ /*sector_count*/ sectors_out,
+ /*lba*/ zone_id,
+ /*command*/ command_out,
+ /*device*/ 0,
+ /*icc*/ 0,
+ /*auxiliary*/ auxiliary,
+ /*control*/ 0,
+ /*data_ptr*/ data_ptr,
+ /*dxfer_len*/ dxfer_len,
+ /*cdb_storage*/ cdb_storage,
+ /*cdb_storage_len*/ cdb_storage_len,
+ /*minimum_cmd_size*/ 0,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ timeout);
+
+bailout:
+
+ return (retval);
+}
+
+int
+scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int use_ncq,
+ uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
+ uint8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t *cdb_storage, size_t cdb_storage_len,
+ uint8_t sense_len, uint32_t timeout)
+{
+ uint8_t command_out, protocol;
+ uint16_t features_out, sectors_out;
+ uint32_t auxiliary;
+ int ata_flags;
+ int retval;
+
+ retval = 0;
+ ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS;
+
+ if (use_ncq == 0) {
+ command_out = ATA_ZAC_MANAGEMENT_IN;
+ /* XXX KDM put a macro here */
+ features_out = (zm_action & 0xf) | (zone_flags << 8),
+ sectors_out = dxfer_len >> 9, /* XXX KDM macro*/
+ protocol = AP_PROTO_DMA;
+ ata_flags |= AP_FLAG_TLEN_SECT_CNT;
+ auxiliary = 0;
+ } else {
+ ata_flags |= AP_FLAG_TLEN_FEAT;
+
+ command_out = ATA_RECV_FPDMA_QUEUED;
+ sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8;
+
+ /*
+ * For RECEIVE FPDMA QUEUED, the transfer length is
+ * encoded in the FEATURE register, and 0 means
+ * that 65536 512 byte blocks are to be tranferred.
+ * In practice, it seems unlikely that we'll see
+ * a transfer that large, and it may confuse the
+ * the SAT layer, because generally that means that
+ * 0 bytes should be transferred.
+ */
+ if (dxfer_len == (65536 * 512)) {
+ features_out = 0;
+ } else if (dxfer_len <= (65535 * 512)) {
+ features_out = ((dxfer_len >> 9) & 0xffff);
+ } else {
+ /* The transfer is too big. */
+ retval = 1;
+ goto bailout;
+ }
+ auxiliary = (zm_action & 0xf) | (zone_flags << 8),
+ protocol = AP_PROTO_FPDMA;
+ }
+
+ protocol |= AP_EXTEND;
+
+ retval = scsi_ata_pass(csio,
+ retries,
+ cbfcnp,
+ /*flags*/ CAM_DIR_IN,
+ tag_action,
+ /*protocol*/ protocol,
+ /*ata_flags*/ ata_flags,
+ /*features*/ features_out,
+ /*sector_count*/ sectors_out,
+ /*lba*/ zone_id,
+ /*command*/ command_out,
+ /*device*/ 0,
+ /*icc*/ 0,
+ /*auxiliary*/ auxiliary,
+ /*control*/ 0,
+ /*data_ptr*/ data_ptr,
+ /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */
+ /*cdb_storage*/ cdb_storage,
+ /*cdb_storage_len*/ cdb_storage_len,
+ /*minimum_cmd_size*/ 0,
+ /*sense_len*/ SSD_FULL_SIZE,
+ /*timeout*/ timeout);
+
+bailout:
+ return (retval);
+}
diff --git a/sys/cam/scsi/scsi_da.h b/sys/cam/scsi/scsi_da.h
index ad4d0db..e6eb95f 100644
--- a/sys/cam/scsi/scsi_da.h
+++ b/sys/cam/scsi/scsi_da.h
@@ -153,6 +153,84 @@ struct scsi_read_defect_data_12
uint8_t control;
};
+struct scsi_zbc_out
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define ZBC_OUT_SA_CLOSE 0x01
+#define ZBC_OUT_SA_FINISH 0x02
+#define ZBC_OUT_SA_OPEN 0x03
+#define ZBC_OUT_SA_RWP 0x04
+ uint8_t zone_id[8];
+ uint8_t reserved[4];
+ uint8_t zone_flags;
+#define ZBC_OUT_ALL 0x01
+ uint8_t control;
+};
+
+struct scsi_zbc_in
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define ZBC_IN_SA_REPORT_ZONES 0x00
+ uint8_t zone_start_lba[8];
+ uint8_t length[4];
+ uint8_t zone_options;
+#define ZBC_IN_PARTIAL 0x80
+#define ZBC_IN_REP_ALL_ZONES 0x00
+#define ZBC_IN_REP_EMPTY 0x01
+#define ZBC_IN_REP_IMP_OPEN 0x02
+#define ZBC_IN_REP_EXP_OPEN 0x03
+#define ZBC_IN_REP_CLOSED 0x04
+#define ZBC_IN_REP_FULL 0x05
+#define ZBC_IN_REP_READONLY 0x06
+#define ZBC_IN_REP_OFFLINE 0x07
+#define ZBC_IN_REP_RESET 0x10
+#define ZBC_IN_REP_NON_SEQ 0x11
+#define ZBC_IN_REP_NON_WP 0x3f
+#define ZBC_IN_REP_MASK 0x3f
+ uint8_t control;
+};
+
+struct scsi_report_zones_desc {
+ uint8_t zone_type;
+#define SRZ_TYPE_CONVENTIONAL 0x01
+#define SRZ_TYPE_SEQ_REQUIRED 0x02
+#define SRZ_TYPE_SEQ_PREFERRED 0x03
+#define SRZ_TYPE_MASK 0x0f
+ uint8_t zone_flags;
+#define SRZ_ZONE_COND_SHIFT 4
+#define SRZ_ZONE_COND_MASK 0xf0
+#define SRZ_ZONE_COND_NWP 0x00
+#define SRZ_ZONE_COND_EMPTY 0x10
+#define SRZ_ZONE_COND_IMP_OPEN 0x20
+#define SRZ_ZONE_COND_EXP_OPEN 0x30
+#define SRZ_ZONE_COND_CLOSED 0x40
+#define SRZ_ZONE_COND_READONLY 0xd0
+#define SRZ_ZONE_COND_FULL 0xe0
+#define SRZ_ZONE_COND_OFFLINE 0xf0
+#define SRZ_ZONE_NON_SEQ 0x02
+#define SRZ_ZONE_RESET 0x01
+ uint8_t reserved[6];
+ uint8_t zone_length[8];
+ uint8_t zone_start_lba[8];
+ uint8_t write_pointer_lba[8];
+ uint8_t reserved2[32];
+};
+
+struct scsi_report_zones_hdr {
+ uint8_t length[4];
+ uint8_t byte4;
+#define SRZ_SAME_ALL_DIFFERENT 0x00 /* Lengths and types vary */
+#define SRZ_SAME_ALL_SAME 0x01 /* Lengths and types the same */
+#define SRZ_SAME_LAST_DIFFERENT 0x02 /* Types same, last length varies */
+#define SRZ_SAME_TYPES_DIFFERENT 0x03 /* Types vary, length the same */
+#define SRZ_SAME_MASK 0x0f
+ uint8_t reserved[3];
+ uint8_t maximum_lba[8];
+ uint8_t reserved2[48];
+ struct scsi_report_zones_desc desc_list[];
+};
/*
* Opcodes
@@ -167,6 +245,8 @@ struct scsi_read_defect_data_12
#define VERIFY 0x2f
#define READ_DEFECT_DATA_10 0x37
#define SANITIZE 0x48
+#define ZBC_OUT 0x94
+#define ZBC_IN 0x95
#define READ_DEFECT_DATA_12 0xb7
struct format_defect_list_header
@@ -581,6 +661,38 @@ void scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries,
u_int32_t timeout);
#endif /* !_KERNEL */
+
+void scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, uint8_t service_action, uint64_t zone_id,
+ uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
+ uint8_t sense_len, uint32_t timeout);
+
+void scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, uint8_t service_action,
+ uint64_t zone_start_lba, uint8_t zone_options,
+ uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len,
+ uint32_t timeout);
+
+int scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int use_ncq,
+ uint8_t zm_action, uint64_t zone_id,
+ uint8_t zone_flags, uint8_t *data_ptr,
+ uint32_t dxfer_len, uint8_t *cdb_storage,
+ size_t cdb_storage_len, uint8_t sense_len,
+ uint32_t timeout);
+
+int scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int use_ncq,
+ uint8_t zm_action, uint64_t zone_id,
+ uint8_t zone_flags, uint8_t *data_ptr,
+ uint32_t dxfer_len, uint8_t *cdb_storage,
+ size_t cdb_storage_len, uint8_t sense_len,
+ uint32_t timeout);
+
__END_DECLS
#endif /* _SCSI_SCSI_DA_H */
OpenPOWER on IntegriCloud