summaryrefslogtreecommitdiffstats
path: root/sys/cam
diff options
context:
space:
mode:
Diffstat (limited to 'sys/cam')
-rw-r--r--sys/cam/ctl/ctl.c6
-rw-r--r--sys/cam/ctl/ctl_cmd_table.c24
-rw-r--r--sys/cam/ctl/ctl_private.h13
-rw-r--r--sys/cam/ctl/ctl_ser_table.c2
-rw-r--r--sys/cam/ctl/ctl_tpc.c895
-rw-r--r--sys/cam/scsi/scsi_all.c31
-rw-r--r--sys/cam/scsi/scsi_all.h174
7 files changed, 1090 insertions, 55 deletions
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index 421360c..fa05d44 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -1025,6 +1025,7 @@ ctl_init(void)
STAILQ_INIT(&softc->port_list);
STAILQ_INIT(&softc->be_list);
STAILQ_INIT(&softc->io_pools);
+ ctl_tpc_init(softc);
if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL,
&internal_pool)!= 0){
@@ -1172,6 +1173,7 @@ ctl_shutdown(void)
mtx_destroy(&softc->queue_lock);
#endif
+ ctl_tpc_shutdown(softc);
mtx_destroy(&softc->pool_lock);
mtx_destroy(&softc->ctl_lock);
@@ -4598,7 +4600,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
TAILQ_INIT(&lun->ooa_queue);
TAILQ_INIT(&lun->blocked_queue);
STAILQ_INIT(&lun->error_list);
- ctl_tpc_init(lun);
+ ctl_tpc_lun_init(lun);
/*
* Initialize the mode page index.
@@ -4750,7 +4752,7 @@ ctl_free_lun(struct ctl_lun *lun)
atomic_subtract_int(&lun->be_lun->be->num_luns, 1);
lun->be_lun->lun_shutdown(lun->be_lun->be_lun);
- ctl_tpc_shutdown(lun);
+ ctl_tpc_lun_shutdown(lun);
mtx_destroy(&lun->lun_lock);
free(lun->lun_devid, M_CTL);
if (lun->flags & CTL_LUN_MALLOCED)
diff --git a/sys/cam/ctl/ctl_cmd_table.c b/sys/cam/ctl/ctl_cmd_table.c
index 1240f2a..3066483 100644
--- a/sys/cam/ctl/ctl_cmd_table.c
+++ b/sys/cam/ctl/ctl_cmd_table.c
@@ -248,10 +248,18 @@ const struct ctl_cmd_entry ctl_cmd_table_83[32] =
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* 10 POPULATE TOKEN */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_populate_token, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_SLUN |
+ CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE,
+ 16, { 0x10, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
/* 11 WRITE USING TOKEN */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_write_using_token, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_SLUN |
+ CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE,
+ 16, { 0x11, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
/* 12 */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -334,10 +342,18 @@ const struct ctl_cmd_entry ctl_cmd_table_84[32] =
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* 07 RECEIVE ROD TOKEN INFORMATION */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_receive_rod_token_information, CTL_SERIDX_RD_CAP,
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 16, {0x07, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
/* 08 REPORT ALL ROD TOKENS */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_report_all_rod_tokens, CTL_SERIDX_RD_CAP,
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 16, {0x08, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
};
/* 9E SERVICE ACTION IN(16) */
diff --git a/sys/cam/ctl/ctl_private.h b/sys/cam/ctl/ctl_private.h
index 23298fa..46527ba 100644
--- a/sys/cam/ctl/ctl_private.h
+++ b/sys/cam/ctl/ctl_private.h
@@ -422,6 +422,7 @@ struct ctl_thread {
STAILQ_HEAD(, ctl_io_hdr) isc_queue;
};
+struct tpc_token;
struct ctl_softc {
struct mtx ctl_lock;
struct cdev *dev;
@@ -460,6 +461,8 @@ struct ctl_softc {
time_t last_print_jiffies;
uint32_t skipped_prints;
struct ctl_thread threads[CTL_MAX_THREADS];
+ TAILQ_HEAD(tpc_tokens, tpc_token) tpc_tokens;
+ struct callout tpc_timeout;
};
#ifdef _KERNEL
@@ -500,8 +503,10 @@ int ctl_report_supported_tmf(struct ctl_scsiio *ctsio);
int ctl_report_timestamp(struct ctl_scsiio *ctsio);
int ctl_isc(struct ctl_scsiio *ctsio);
-void ctl_tpc_init(struct ctl_lun *lun);
-void ctl_tpc_shutdown(struct ctl_lun *lun);
+void ctl_tpc_init(struct ctl_softc *softc);
+void ctl_tpc_shutdown(struct ctl_softc *softc);
+void ctl_tpc_lun_init(struct ctl_lun *lun);
+void ctl_tpc_lun_shutdown(struct ctl_lun *lun);
int ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len);
int ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio);
int ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio);
@@ -510,6 +515,10 @@ int ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio);
int ctl_extended_copy_lid1(struct ctl_scsiio *ctsio);
int ctl_extended_copy_lid4(struct ctl_scsiio *ctsio);
int ctl_copy_operation_abort(struct ctl_scsiio *ctsio);
+int ctl_populate_token(struct ctl_scsiio *ctsio);
+int ctl_write_using_token(struct ctl_scsiio *ctsio);
+int ctl_receive_rod_token_information(struct ctl_scsiio *ctsio);
+int ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio);
#endif /* _KERNEL */
diff --git a/sys/cam/ctl/ctl_ser_table.c b/sys/cam/ctl/ctl_ser_table.c
index c680fb5..2bf39e6 100644
--- a/sys/cam/ctl/ctl_ser_table.c
+++ b/sys/cam/ctl/ctl_ser_table.c
@@ -69,7 +69,7 @@ ctl_serialize_table[CTL_SERIDX_COUNT][CTL_SERIDX_COUNT] = {
/*MD_SEL */{ bK, bK, bK, bK, bK, bK, bK, pS, pS, bK, pS, bK, bK},
/*RQ_SNS */{ pS, pS, pS, pS, pS, pS, bK, pS, pS, bK, pS, bK, bK},
/*INQ */{ pS, pS, pS, pS, pS, pS, bK, pS, pS, pS, pS, bK, bK},
-/*RD_CAP */{ pS, pS, pS, pS, pS, pS, bK, pS, pS, pS, pS, bK, bK},
+/*RD_CAP */{ pS, pS, pS, pS, pS, pS, bK, pS, pS, pS, pS, bK, pS},
/*RES */{ bK, bK, bK, bK, bK, bK, bK, pS, bK, bK, bK, bK, bK},
/*LOG_SNS */{ pS, pS, pS, pS, pS, bK, bK, pS, pS, bK, pS, bK, bK},
/*FORMAT */{ pS, bK, bK, bK, bK, bK, pS, pS, bK, bK, bK, bK, bK},
diff --git a/sys/cam/ctl/ctl_tpc.c b/sys/cam/ctl/ctl_tpc.c
index 06263b3..8e94a12 100644
--- a/sys/cam/ctl/ctl_tpc.c
+++ b/sys/cam/ctl/ctl_tpc.c
@@ -65,6 +65,10 @@ __FBSDID("$FreeBSD$");
#define TPC_MAX_INLINE 0
#define TPC_MAX_LISTS 255
#define TPC_MAX_IO_SIZE (1024 * 1024)
+#define TPC_MAX_IOCHUNK_SIZE (TPC_MAX_IO_SIZE * 16)
+#define TPC_MIN_TOKEN_TIMEOUT 1
+#define TPC_DFL_TOKEN_TIMEOUT 60
+#define TPC_MAX_TOKEN_TIMEOUT 600
MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
@@ -86,6 +90,19 @@ struct tpc_io {
TAILQ_ENTRY(tpc_io) links;
};
+struct tpc_token {
+ uint8_t token[512];
+ uint64_t lun;
+ uint32_t blocksize;
+ uint8_t *params;
+ struct scsi_range_desc *range;
+ int nrange;
+ int active;
+ time_t last_active;
+ uint32_t timeout;
+ TAILQ_ENTRY(tpc_token) links;
+};
+
struct tpc_list {
uint8_t service_action;
int init_port;
@@ -99,43 +116,127 @@ struct tpc_list {
int ncscd;
int nseg;
int leninl;
+ struct tpc_token *token;
+ struct scsi_range_desc *range;
+ int nrange;
+ off_t offset_into_rod;
+
int curseg;
+ off_t cursectors;
off_t curbytes;
int curops;
int stage;
uint8_t *buf;
- int segbytes;
+ off_t segsectors;
+ off_t segbytes;
int tbdio;
int error;
int abort;
int completed;
+ time_t last_active;
TAILQ_HEAD(, tpc_io) allio;
struct scsi_sense_data sense_data;
uint8_t sense_len;
uint8_t scsi_status;
struct ctl_scsiio *ctsio;
struct ctl_lun *lun;
+ int res_token_valid;
+ uint8_t res_token[512];
TAILQ_ENTRY(tpc_list) links;
};
+extern struct ctl_softc *control_softc;
+
+static void
+tpc_timeout(void *arg)
+{
+ struct ctl_softc *softc = arg;
+ struct ctl_lun *lun;
+ struct tpc_token *token, *ttoken;
+ struct tpc_list *list, *tlist;
+
+ /* Free completed lists with expired timeout. */
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ mtx_lock(&lun->lun_lock);
+ TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
+ if (!list->completed || time_uptime < list->last_active +
+ TPC_DFL_TOKEN_TIMEOUT)
+ continue;
+ TAILQ_REMOVE(&lun->tpc_lists, list, links);
+ free(list, M_CTL);
+ }
+ mtx_unlock(&lun->lun_lock);
+ }
+
+ /* Free inactive ROD tokens with expired timeout. */
+ TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
+ if (token->active ||
+ time_uptime < token->last_active + token->timeout + 1)
+ continue;
+ TAILQ_REMOVE(&softc->tpc_tokens, token, links);
+ free(token->params, M_CTL);
+ free(token, M_CTL);
+ }
+ callout_schedule(&softc->tpc_timeout, hz);
+}
+
+void
+ctl_tpc_init(struct ctl_softc *softc)
+{
+
+ TAILQ_INIT(&softc->tpc_tokens);
+ callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0);
+ callout_reset(&softc->tpc_timeout, hz, tpc_timeout, softc);
+}
+
void
-ctl_tpc_init(struct ctl_lun *lun)
+ctl_tpc_shutdown(struct ctl_softc *softc)
+{
+ struct tpc_token *token;
+
+ callout_drain(&softc->tpc_timeout);
+
+ /* Free ROD tokens. */
+ mtx_lock(&softc->ctl_lock);
+ while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) {
+ TAILQ_REMOVE(&softc->tpc_tokens, token, links);
+ free(token->params, M_CTL);
+ free(token, M_CTL);
+ }
+ mtx_unlock(&softc->ctl_lock);
+}
+
+void
+ctl_tpc_lun_init(struct ctl_lun *lun)
{
TAILQ_INIT(&lun->tpc_lists);
}
void
-ctl_tpc_shutdown(struct ctl_lun *lun)
+ctl_tpc_lun_shutdown(struct ctl_lun *lun)
{
struct tpc_list *list;
+ struct tpc_token *token, *ttoken;
+ /* Free lists for this LUN. */
while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
TAILQ_REMOVE(&lun->tpc_lists, list, links);
KASSERT(list->completed,
("Not completed TPC (%p) on shutdown", list));
free(list, M_CTL);
}
+
+ /* Free ROD tokens for this LUN. */
+ mtx_lock(&control_softc->ctl_lock);
+ TAILQ_FOREACH_SAFE(token, &control_softc->tpc_tokens, links, ttoken) {
+ if (token->lun != lun->lun || token->active)
+ continue;
+ TAILQ_REMOVE(&control_softc->tpc_tokens, token, links);
+ free(token->params, M_CTL);
+ free(token, M_CTL);
+ }
+ mtx_unlock(&control_softc->ctl_lock);
}
int
@@ -143,11 +244,16 @@ ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
{
struct scsi_vpd_tpc *tpc_ptr;
struct scsi_vpd_tpc_descriptor *d_ptr;
+ struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr;
struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
+ struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr;
+ struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr;
+ struct scsi_vpd_tpc_descriptor_srt *srt_ptr;
+ struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr;
struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
struct ctl_lun *lun;
int data_len;
@@ -155,11 +261,16 @@ ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
data_len = sizeof(struct scsi_vpd_tpc) +
+ sizeof(struct scsi_vpd_tpc_descriptor_bdrl) +
roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
- 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 7, 4) +
+ 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) +
sizeof(struct scsi_vpd_tpc_descriptor_pd) +
roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
+ sizeof(struct scsi_vpd_tpc_descriptor_rtf) +
+ sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) +
+ sizeof(struct scsi_vpd_tpc_descriptor_srt) +
+ 2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) +
sizeof(struct scsi_vpd_tpc_descriptor_gco);
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
@@ -191,26 +302,44 @@ ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
tpc_ptr->page_code = SVPD_SCSI_TPC;
scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
- /* Supported commands */
+ /* Block Device ROD Limits */
d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
+ bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr;
+ scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type);
+ scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length);
+ scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges);
+ scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
+ bdrl_ptr->maximum_inactivity_timeout);
+ scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT,
+ bdrl_ptr->default_inactivity_timeout);
+ scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size);
+ scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count);
+
+ /* Supported commands */
+ d_ptr = (struct scsi_vpd_tpc_descriptor *)
+ (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
- sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 7;
+ sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11;
scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
scd_ptr = &sc_ptr->descr[0];
scd_ptr->opcode = EXTENDED_COPY;
- scd_ptr->sa_length = 3;
+ scd_ptr->sa_length = 5;
scd_ptr->supported_service_actions[0] = EC_EC_LID1;
scd_ptr->supported_service_actions[1] = EC_EC_LID4;
- scd_ptr->supported_service_actions[2] = EC_COA;
+ scd_ptr->supported_service_actions[2] = EC_PT;
+ scd_ptr->supported_service_actions[3] = EC_WUT;
+ scd_ptr->supported_service_actions[4] = EC_COA;
scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
&scd_ptr->supported_service_actions[scd_ptr->sa_length];
scd_ptr->opcode = RECEIVE_COPY_STATUS;
- scd_ptr->sa_length = 4;
+ scd_ptr->sa_length = 6;
scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
scd_ptr->supported_service_actions[1] = RCS_RCFD;
scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
scd_ptr->supported_service_actions[3] = RCS_RCOP;
+ scd_ptr->supported_service_actions[4] = RCS_RRTI;
+ scd_ptr->supported_service_actions[5] = RCS_RART;
/* Parameter data. */
d_ptr = (struct scsi_vpd_tpc_descriptor *)
@@ -244,6 +373,47 @@ ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
scsi_ulto2b(2, sdid_ptr->list_length);
scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
+ /* ROD Token Features */
+ d_ptr = (struct scsi_vpd_tpc_descriptor *)
+ (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
+ rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr;
+ scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type);
+ scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length);
+ rtf_ptr->remote_tokens = 0;
+ scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime);
+ scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime);
+ scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
+ rtf_ptr->maximum_token_inactivity_timeout);
+ scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length);
+ rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *)
+ &rtf_ptr->type_specific_features;
+ rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK;
+ scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length);
+ scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity);
+ scsi_u64to8b(0, rtfb_ptr->maximum_bytes);
+ scsi_u64to8b(0, rtfb_ptr->optimal_bytes);
+ scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
+ rtfb_ptr->optimal_bytes_to_token_per_segment);
+ scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
+ rtfb_ptr->optimal_bytes_from_token_per_segment);
+
+ /* Supported ROD Tokens */
+ d_ptr = (struct scsi_vpd_tpc_descriptor *)
+ (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
+ srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr;
+ scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type);
+ scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length);
+ scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length);
+ srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *)
+ &srt_ptr->rod_type_descriptors;
+ scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type);
+ srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT;
+ scsi_ulto2b(0, srtd_ptr->preference_indicator);
+ srtd_ptr++;
+ scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type);
+ srtd_ptr->flags = SVPD_TPC_SRTD_TIN;
+ scsi_ulto2b(0, srtd_ptr->preference_indicator);
+
/* General Copy Operations */
d_ptr = (struct scsi_vpd_tpc_descriptor *)
(&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
@@ -267,7 +437,6 @@ ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
int
ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
struct scsi_receive_copy_operating_parameters *cdb;
struct scsi_receive_copy_operating_parameters_data *data;
int retval;
@@ -276,7 +445,6 @@ ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
retval = CTL_RETVAL_COMPLETE;
@@ -661,6 +829,7 @@ complete:
/*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
return (CTL_RETVAL_ERROR);
} else {
+ list->cursectors += list->segsectors;
list->curbytes += list->segbytes;
return (CTL_RETVAL_COMPLETE);
}
@@ -706,6 +875,7 @@ complete:
list->buf = malloc(numbytes, M_CTL, M_WAITOK);
list->segbytes = numbytes;
+ list->segsectors = numbytes / dstblock;
donebytes = 0;
TAILQ_INIT(&run);
prun = &run;
@@ -901,6 +1071,197 @@ complete:
return (CTL_RETVAL_QUEUED);
}
+static off_t
+tpc_ranges_length(struct scsi_range_desc *range, int nrange)
+{
+ off_t length = 0;
+ int r;
+
+ for (r = 0; r < nrange; r++)
+ length += scsi_4btoul(range[r].length);
+ return (length);
+}
+
+static int
+tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip,
+ int *srange, off_t *soffset)
+{
+ off_t off;
+ int r;
+
+ r = 0;
+ off = 0;
+ while (r < nrange) {
+ if (skip - off < scsi_4btoul(range[r].length)) {
+ *srange = r;
+ *soffset = skip - off;
+ return (0);
+ }
+ off += scsi_4btoul(range[r].length);
+ r++;
+ }
+ return (-1);
+}
+
+static int
+tpc_process_wut(struct tpc_list *list)
+{
+ struct tpc_io *tio, *tior, *tiow;
+ struct runl run, *prun;
+ int drange, srange;
+ off_t doffset, soffset;
+ off_t srclba, dstlba, numbytes, donebytes, roundbytes;
+ uint32_t srcblock, dstblock;
+
+ if (list->stage > 0) {
+complete:
+ /* Cleanup after previous rounds. */
+ while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
+ TAILQ_REMOVE(&list->allio, tio, links);
+ ctl_free_io(tio->io);
+ free(tio, M_CTL);
+ }
+ free(list->buf, M_CTL);
+ if (list->abort) {
+ ctl_set_task_aborted(list->ctsio);
+ return (CTL_RETVAL_ERROR);
+ } else if (list->error) {
+ ctl_set_sense(list->ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_COPY_ABORTED,
+ /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
+ return (CTL_RETVAL_ERROR);
+ }
+ list->cursectors += list->segsectors;
+ list->curbytes += list->segbytes;
+ }
+
+ /* Check where we are on destination ranges list. */
+ if (tpc_skip_ranges(list->range, list->nrange, list->cursectors,
+ &drange, &doffset) != 0)
+ return (CTL_RETVAL_COMPLETE);
+ dstblock = list->lun->be_lun->blocksize;
+
+ /* Special case: no token == Block device zero ROD token */
+ if (list->token == NULL) {
+ srcblock = 1;
+ srclba = 0;
+ numbytes = INT64_MAX;
+ goto dstp;
+ }
+
+ /* Check where we are on source ranges list. */
+ srcblock = list->token->blocksize;
+ if (tpc_skip_ranges(list->token->range, list->token->nrange,
+ list->offset_into_rod + list->cursectors * dstblock / srcblock,
+ &srange, &soffset) != 0) {
+ ctl_set_sense(list->ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_COPY_ABORTED,
+ /*asc*/ 0x0d, /*ascq*/ 0x04, SSD_ELEM_NONE);
+ return (CTL_RETVAL_ERROR);
+ }
+
+ srclba = scsi_8btou64(list->token->range[srange].lba) + soffset;
+ numbytes = srcblock * omin(TPC_MAX_IOCHUNK_SIZE / srcblock,
+ (scsi_4btoul(list->token->range[srange].length) - soffset));
+dstp:
+ dstlba = scsi_8btou64(list->range[drange].lba) + doffset;
+ numbytes = omin(numbytes,
+ dstblock * omin(TPC_MAX_IOCHUNK_SIZE / dstblock,
+ (scsi_4btoul(list->range[drange].length) - doffset)));
+
+ if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
+ ctl_set_sense(list->ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_COPY_ABORTED,
+ /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
+ return (CTL_RETVAL_ERROR);
+ }
+
+ list->buf = malloc(numbytes, M_CTL, M_WAITOK |
+ (list->token == NULL ? M_ZERO : 0));
+ list->segbytes = numbytes;
+ list->segsectors = numbytes / dstblock;
+//printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors,
+// srclba, dstlba);
+ donebytes = 0;
+ TAILQ_INIT(&run);
+ prun = &run;
+ list->tbdio = 1;
+ TAILQ_INIT(&list->allio);
+ while (donebytes < numbytes) {
+ roundbytes = MIN(numbytes - donebytes, TPC_MAX_IO_SIZE);
+
+ if (list->token == NULL) {
+ tior = NULL;
+ goto dstw;
+ }
+ tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
+ TAILQ_INIT(&tior->run);
+ tior->list = list;
+ TAILQ_INSERT_TAIL(&list->allio, tior, links);
+ tior->io = tpcl_alloc_io();
+ if (tior->io == NULL) {
+ list->error = 1;
+ goto complete;
+ }
+ ctl_scsi_read_write(tior->io,
+ /*data_ptr*/ &list->buf[donebytes],
+ /*data_len*/ roundbytes,
+ /*read_op*/ 1,
+ /*byte2*/ 0,
+ /*minimum_cdb_size*/ 0,
+ /*lba*/ srclba + donebytes / srcblock,
+ /*num_blocks*/ roundbytes / srcblock,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+ tior->io->io_hdr.retries = 3;
+ tior->lun = list->token->lun;
+ tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
+
+dstw:
+ tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
+ TAILQ_INIT(&tiow->run);
+ tiow->list = list;
+ TAILQ_INSERT_TAIL(&list->allio, tiow, links);
+ tiow->io = tpcl_alloc_io();
+ if (tiow->io == NULL) {
+ list->error = 1;
+ goto complete;
+ }
+ ctl_scsi_read_write(tiow->io,
+ /*data_ptr*/ &list->buf[donebytes],
+ /*data_len*/ roundbytes,
+ /*read_op*/ 0,
+ /*byte2*/ 0,
+ /*minimum_cdb_size*/ 0,
+ /*lba*/ dstlba + donebytes / dstblock,
+ /*num_blocks*/ roundbytes / dstblock,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+ tiow->io->io_hdr.retries = 3;
+ tiow->lun = list->lun->lun;
+ tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
+
+ if (tior) {
+ TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
+ TAILQ_INSERT_TAIL(prun, tior, rlinks);
+ prun = &tior->run;
+ } else {
+ TAILQ_INSERT_TAIL(prun, tiow, rlinks);
+ prun = &tiow->run;
+ }
+ donebytes += roundbytes;
+ }
+
+ while ((tior = TAILQ_FIRST(&run)) != NULL) {
+ TAILQ_REMOVE(&run, tior, rlinks);
+ if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
+ panic("tpcl_queue() error");
+ }
+
+ list->stage++;
+ return (CTL_RETVAL_QUEUED);
+}
+
static void
tpc_process(struct tpc_list *list)
{
@@ -909,33 +1270,43 @@ tpc_process(struct tpc_list *list)
struct ctl_scsiio *ctsio = list->ctsio;
int retval = CTL_RETVAL_COMPLETE;
-//printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
- while (list->curseg < list->nseg) {
- seg = list->seg[list->curseg];
- switch (seg->type_code) {
- case EC_SEG_B2B:
- retval = tpc_process_b2b(list);
- break;
- case EC_SEG_VERIFY:
- retval = tpc_process_verify(list);
- break;
- case EC_SEG_REGISTER_KEY:
- retval = tpc_process_register_key(list);
- break;
- default:
- ctl_set_sense(ctsio, /*current_error*/ 1,
- /*sense_key*/ SSD_KEY_COPY_ABORTED,
- /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
- goto done;
- }
+ if (list->service_action == EC_WUT) {
+ retval = tpc_process_wut(list);
if (retval == CTL_RETVAL_QUEUED)
return;
if (retval == CTL_RETVAL_ERROR) {
list->error = 1;
goto done;
}
- list->curseg++;
- list->stage = 0;
+ } else {
+//printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
+ while (list->curseg < list->nseg) {
+ seg = list->seg[list->curseg];
+ switch (seg->type_code) {
+ case EC_SEG_B2B:
+ retval = tpc_process_b2b(list);
+ break;
+ case EC_SEG_VERIFY:
+ retval = tpc_process_verify(list);
+ break;
+ case EC_SEG_REGISTER_KEY:
+ retval = tpc_process_register_key(list);
+ break;
+ default:
+ ctl_set_sense(ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_COPY_ABORTED,
+ /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
+ goto done;
+ }
+ if (retval == CTL_RETVAL_QUEUED)
+ return;
+ if (retval == CTL_RETVAL_ERROR) {
+ list->error = 1;
+ goto done;
+ }
+ list->curseg++;
+ list->stage = 0;
+ }
}
ctl_set_success(ctsio);
@@ -944,12 +1315,20 @@ done:
//printf("ZZZ done\n");
free(list->params, M_CTL);
list->params = NULL;
+ if (list->token) {
+ mtx_lock(&control_softc->ctl_lock);
+ if (--list->token->active == 0)
+ list->token->last_active = time_uptime;
+ mtx_unlock(&control_softc->ctl_lock);
+ list->token = NULL;
+ }
mtx_lock(&lun->lun_lock);
if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
TAILQ_REMOVE(&lun->tpc_lists, list, links);
free(list, M_CTL);
} else {
list->completed = 1;
+ list->last_active = time_uptime;
list->sense_data = ctsio->sense_data;
list->sense_len = ctsio->sense_len;
list->scsi_status = ctsio->scsi_status;
@@ -1361,3 +1740,455 @@ done:
return (CTL_RETVAL_COMPLETE);
}
+static void
+tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len,
+ struct scsi_token *token)
+{
+ static int id = 0;
+ struct scsi_vpd_id_descriptor *idd = NULL;
+ int targid_len;
+
+ scsi_ulto4b(ROD_TYPE_AUR, token->type);
+ scsi_ulto2b(0x01f8, token->length);
+ scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]);
+ if (lun->lun_devid)
+ idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
+ lun->lun_devid->data, lun->lun_devid->len,
+ scsi_devid_is_lun_naa);
+ if (idd == NULL && lun->lun_devid)
+ idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
+ lun->lun_devid->data, lun->lun_devid->len,
+ scsi_devid_is_lun_eui64);
+ if (idd != NULL)
+ memcpy(&token->body[8], idd, 4 + idd->length);
+ scsi_u64to8b(0, &token->body[40]);
+ scsi_u64to8b(len, &token->body[48]);
+ if (port->target_devid) {
+ targid_len = port->target_devid->len;
+ memcpy(&token->body[120], port->target_devid->data, targid_len);
+ } else
+ targid_len = 32;
+ arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0);
+};
+
+int
+ctl_populate_token(struct ctl_scsiio *ctsio)
+{
+ struct scsi_populate_token *cdb;
+ struct scsi_populate_token_data *data;
+ struct ctl_lun *lun;
+ struct ctl_port *port;
+ struct tpc_list *list, *tlist;
+ struct tpc_token *token;
+ int len, lendesc;
+
+ CTL_DEBUG_PRINT(("ctl_populate_token\n"));
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ port = control_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
+ cdb = (struct scsi_populate_token *)ctsio->cdb;
+ len = scsi_4btoul(cdb->length);
+
+ if (len < sizeof(struct scsi_populate_token_data) ||
+ len > sizeof(struct scsi_populate_token_data) +
+ TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
+ /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
+ goto done;
+ }
+
+ /*
+ * If we've got a kernel request that hasn't been malloced yet,
+ * malloc it and tell the caller the data buffer is here.
+ */
+ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
+ ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
+ ctsio->kern_data_len = len;
+ ctsio->kern_total_len = len;
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr;
+ lendesc = scsi_2btoul(data->range_descriptor_length);
+ if (len < sizeof(struct scsi_populate_token_data) + lendesc) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
+ /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
+ goto done;
+ }
+/*
+ printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n",
+ scsi_4btoul(cdb->list_identifier),
+ data->flags, scsi_4btoul(data->inactivity_timeout),
+ scsi_4btoul(data->rod_type),
+ scsi_2btoul(data->range_descriptor_length));
+*/
+ if ((data->flags & EC_PT_RTV) &&
+ scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
+ /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0);
+ goto done;
+ }
+
+ list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
+ list->service_action = cdb->service_action;
+ list->init_port = ctsio->io_hdr.nexus.targ_port;
+ list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ list->list_id = scsi_4btoul(cdb->list_identifier);
+ list->flags = data->flags;
+ list->ctsio = ctsio;
+ list->lun = lun;
+ mtx_lock(&lun->lun_lock);
+ tlist = tpc_find_list(lun, list->list_id, list->init_idx);
+ if (tlist != NULL && !tlist->completed) {
+ mtx_unlock(&lun->lun_lock);
+ free(list, M_CTL);
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+ /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
+ /*bit*/ 0);
+ goto done;
+ }
+ if (tlist != NULL) {
+ TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
+ free(tlist, M_CTL);
+ }
+ TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
+ mtx_unlock(&lun->lun_lock);
+
+ token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO);
+ token->lun = lun->lun;
+ token->blocksize = lun->be_lun->blocksize;
+ token->params = ctsio->kern_data_ptr;
+ token->range = &data->desc[0];
+ token->nrange = scsi_2btoul(data->range_descriptor_length) /
+ sizeof(struct scsi_range_desc);
+ tpc_create_token(lun, port, list->curbytes,
+ (struct scsi_token *)token->token);
+ token->active = 0;
+ token->last_active = time_uptime;
+ token->timeout = scsi_4btoul(data->inactivity_timeout);
+ if (token->timeout == 0)
+ token->timeout = TPC_DFL_TOKEN_TIMEOUT;
+ else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT)
+ token->timeout = TPC_MIN_TOKEN_TIMEOUT;
+ else if (token->timeout > TPC_MAX_TOKEN_TIMEOUT) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+ /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0,
+ /*bit*/ 0);
+ }
+ memcpy(list->res_token, token->token, sizeof(list->res_token));
+ list->res_token_valid = 1;
+ list->cursectors = tpc_ranges_length(token->range, token->nrange);
+ list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize;
+ list->curseg = 0;
+ list->completed = 1;
+ list->last_active = time_uptime;
+ mtx_lock(&control_softc->ctl_lock);
+ TAILQ_INSERT_TAIL(&control_softc->tpc_tokens, token, links);
+ mtx_unlock(&control_softc->ctl_lock);
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+
+done:
+ if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED)
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_write_using_token(struct ctl_scsiio *ctsio)
+{
+ struct scsi_write_using_token *cdb;
+ struct scsi_write_using_token_data *data;
+ struct ctl_lun *lun;
+ struct tpc_list *list, *tlist;
+ struct tpc_token *token;
+ int len, lendesc;
+
+ CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ cdb = (struct scsi_write_using_token *)ctsio->cdb;
+ len = scsi_4btoul(cdb->length);
+
+ if (len < sizeof(struct scsi_populate_token_data) ||
+ len > sizeof(struct scsi_populate_token_data) +
+ TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
+ /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
+ goto done;
+ }
+
+ /*
+ * If we've got a kernel request that hasn't been malloced yet,
+ * malloc it and tell the caller the data buffer is here.
+ */
+ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
+ ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
+ ctsio->kern_data_len = len;
+ ctsio->kern_total_len = len;
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr;
+ lendesc = scsi_2btoul(data->range_descriptor_length);
+ if (len < sizeof(struct scsi_populate_token_data) + lendesc) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
+ /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
+ goto done;
+ }
+/*
+ printf("WUT(list=%u) flags=%x off=%ju len=%x\n",
+ scsi_4btoul(cdb->list_identifier),
+ data->flags, scsi_8btou64(data->offset_into_rod),
+ scsi_2btoul(data->range_descriptor_length));
+*/
+ list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
+ list->service_action = cdb->service_action;
+ list->init_port = ctsio->io_hdr.nexus.targ_port;
+ list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ list->list_id = scsi_4btoul(cdb->list_identifier);
+ list->flags = data->flags;
+ list->params = ctsio->kern_data_ptr;
+ list->range = &data->desc[0];
+ list->nrange = scsi_2btoul(data->range_descriptor_length) /
+ sizeof(struct scsi_range_desc);
+ list->offset_into_rod = scsi_8btou64(data->offset_into_rod);
+ list->ctsio = ctsio;
+ list->lun = lun;
+ mtx_lock(&lun->lun_lock);
+ tlist = tpc_find_list(lun, list->list_id, list->init_idx);
+ if (tlist != NULL && !tlist->completed) {
+ mtx_unlock(&lun->lun_lock);
+ free(list, M_CTL);
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+ /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
+ /*bit*/ 0);
+ goto done;
+ }
+ if (tlist != NULL) {
+ TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
+ free(tlist, M_CTL);
+ }
+ TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
+ mtx_unlock(&lun->lun_lock);
+
+ /* Block device zero ROD token -> no token. */
+ if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) {
+ tpc_process(list);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ mtx_lock(&control_softc->ctl_lock);
+ TAILQ_FOREACH(token, &control_softc->tpc_tokens, links) {
+ if (memcmp(token->token, data->rod_token,
+ sizeof(data->rod_token)) == 0)
+ break;
+ }
+ if (token != NULL) {
+ token->active++;
+ list->token = token;
+ if (data->flags & EC_WUT_DEL_TKN)
+ token->timeout = 0;
+ }
+ mtx_unlock(&control_softc->ctl_lock);
+ if (token == NULL) {
+ mtx_lock(&lun->lun_lock);
+ TAILQ_REMOVE(&lun->tpc_lists, list, links);
+ mtx_unlock(&lun->lun_lock);
+ free(list, M_CTL);
+ ctl_set_sense(ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE);
+ goto done;
+ }
+
+ tpc_process(list);
+ return (CTL_RETVAL_COMPLETE);
+
+done:
+ if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED)
+ free(ctsio->kern_data_ptr, M_CTL);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct scsi_receive_rod_token_information *cdb;
+ struct scsi_receive_copy_status_lid4_data *data;
+ struct tpc_list *list;
+ struct tpc_list list_copy;
+ uint8_t *ptr;
+ int retval;
+ int alloc_len, total_len, token_len;
+ uint32_t list_id;
+
+ CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
+
+ cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ list_id = scsi_4btoul(cdb->list_identifier);
+ mtx_lock(&lun->lun_lock);
+ list = tpc_find_list(lun, list_id,
+ ctl_get_resindex(&ctsio->io_hdr.nexus));
+ if (list == NULL) {
+ mtx_unlock(&lun->lun_lock);
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+ /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (retval);
+ }
+ list_copy = *list;
+ if (list->completed) {
+ TAILQ_REMOVE(&lun->tpc_lists, list, links);
+ free(list, M_CTL);
+ }
+ mtx_unlock(&lun->lun_lock);
+
+ token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0;
+ total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len;
+ alloc_len = scsi_4btoul(cdb->length);
+
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+
+ ctsio->kern_sg_entries = 0;
+
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+
+ data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
+ scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len +
+ 4 + token_len, data->available_data);
+ data->response_to_service_action = list_copy.service_action;
+ if (list_copy.completed) {
+ if (list_copy.error)
+ data->copy_command_status = RCS_CCS_ERROR;
+ else if (list_copy.abort)
+ data->copy_command_status = RCS_CCS_ABORTED;
+ else
+ data->copy_command_status = RCS_CCS_COMPLETED;
+ } else
+ data->copy_command_status = RCS_CCS_INPROG_FG;
+ scsi_ulto2b(list_copy.curops, data->operation_counter);
+ scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
+ data->transfer_count_units = RCS_TC_LBAS;
+ scsi_u64to8b(list_copy.cursectors, data->transfer_count);
+ scsi_ulto2b(list_copy.curseg, data->segments_processed);
+ data->length_of_the_sense_data_field = list_copy.sense_len;
+ data->sense_data_length = list_copy.sense_len;
+ memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
+
+ ptr = &data->sense_data[data->length_of_the_sense_data_field];
+ scsi_ulto4b(token_len, &ptr[0]);
+ if (list_copy.res_token_valid) {
+ scsi_ulto2b(0, &ptr[4]);
+ memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token));
+ }
+/*
+ printf("RRTI(list=%u) valid=%d\n",
+ scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid);
+*/
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+
+ ctl_datamove((union ctl_io *)ctsio);
+ return (retval);
+}
+
+int
+ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct scsi_report_all_rod_tokens *cdb;
+ struct scsi_report_all_rod_tokens_data *data;
+ struct tpc_token *token;
+ int retval;
+ int alloc_len, total_len, tokens, i;
+
+ CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
+
+ cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ tokens = 0;
+ mtx_lock(&control_softc->ctl_lock);
+ TAILQ_FOREACH(token, &control_softc->tpc_tokens, links)
+ tokens++;
+ mtx_unlock(&control_softc->ctl_lock);
+ if (tokens > 512)
+ tokens = 512;
+
+ total_len = sizeof(*data) + tokens * 96;
+ alloc_len = scsi_4btoul(cdb->length);
+
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+
+ ctsio->kern_sg_entries = 0;
+
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+
+ data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;
+ i = 0;
+ mtx_lock(&control_softc->ctl_lock);
+ TAILQ_FOREACH(token, &control_softc->tpc_tokens, links) {
+ if (i >= tokens)
+ break;
+ memcpy(&data->rod_management_token_list[i * 96],
+ token->token, 96);
+ i++;
+ }
+ mtx_unlock(&control_softc->ctl_lock);
+ scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data);
+/*
+ printf("RART tokens=%d\n", i);
+*/
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+
+ ctl_datamove((union ctl_io *)ctsio);
+ return (retval);
+}
+
diff --git a/sys/cam/scsi/scsi_all.c b/sys/cam/scsi/scsi_all.c
index 8351aa8..0f80532 100644
--- a/sys/cam/scsi/scsi_all.c
+++ b/sys/cam/scsi/scsi_all.c
@@ -5435,32 +5435,37 @@ scsi_devid_is_lun_name(uint8_t *bufp)
}
struct scsi_vpd_id_descriptor *
-scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t page_len,
+scsi_get_devid_desc(struct scsi_vpd_id_descriptor *desc, uint32_t len,
scsi_devid_checkfn_t ck_fn)
{
- struct scsi_vpd_id_descriptor *desc;
- uint8_t *page_end;
uint8_t *desc_buf_end;
- page_end = (uint8_t *)id + page_len;
- if (page_end < id->desc_list)
- return (NULL);
-
- desc_buf_end = MIN(id->desc_list + scsi_2btoul(id->length), page_end);
+ desc_buf_end = (uint8_t *)desc + len;
- for (desc = (struct scsi_vpd_id_descriptor *)id->desc_list;
- desc->identifier <= desc_buf_end
- && desc->identifier + desc->length <= desc_buf_end;
- desc = (struct scsi_vpd_id_descriptor *)(desc->identifier
+ for (; desc->identifier <= desc_buf_end &&
+ desc->identifier + desc->length <= desc_buf_end;
+ desc = (struct scsi_vpd_id_descriptor *)(desc->identifier
+ desc->length)) {
if (ck_fn == NULL || ck_fn((uint8_t *)desc) != 0)
return (desc);
}
-
return (NULL);
}
+struct scsi_vpd_id_descriptor *
+scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t page_len,
+ scsi_devid_checkfn_t ck_fn)
+{
+ uint32_t len;
+
+ if (page_len < sizeof(*id))
+ return (NULL);
+ len = MIN(scsi_2btoul(id->length), page_len - sizeof(*id));
+ return (scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
+ id->desc_list, len, ck_fn));
+}
+
int
scsi_transportid_sbuf(struct sbuf *sb, struct scsi_transportid_header *hdr,
uint32_t valid_len)
diff --git a/sys/cam/scsi/scsi_all.h b/sys/cam/scsi/scsi_all.h
index 1e0cc97..6f92df1 100644
--- a/sys/cam/scsi/scsi_all.h
+++ b/sys/cam/scsi/scsi_all.h
@@ -1351,7 +1351,7 @@ struct scsi_receive_copy_status_lid4_data
uint8_t transfer_count_units;
uint8_t transfer_count[8];
uint8_t segments_processed[2];
- uint8_t reserved[2];
+ uint8_t reserved[6];
uint8_t sense_data[];
};
@@ -1533,6 +1533,110 @@ struct scsi_copy_operation_abort
uint8_t control;
};
+struct scsi_populate_token
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define EC_PT 0x10
+ uint8_t reserved[4];
+ uint8_t list_identifier[4];
+ uint8_t length[4];
+ uint8_t group_number;
+ uint8_t control;
+};
+
+struct scsi_range_desc
+{
+ uint8_t lba[8];
+ uint8_t length[4];
+ uint8_t reserved[4];
+};
+
+struct scsi_populate_token_data
+{
+ uint8_t length[2];
+ uint8_t flags;
+#define EC_PT_IMMED 0x01
+#define EC_PT_RTV 0x02
+ uint8_t reserved;
+ uint8_t inactivity_timeout[4];
+ uint8_t rod_type[4];
+ uint8_t reserved2[2];
+ uint8_t range_descriptor_length[2];
+ struct scsi_range_desc desc[];
+};
+
+struct scsi_write_using_token
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define EC_WUT 0x11
+ uint8_t reserved[4];
+ uint8_t list_identifier[4];
+ uint8_t length[4];
+ uint8_t group_number;
+ uint8_t control;
+};
+
+struct scsi_write_using_token_data
+{
+ uint8_t length[2];
+ uint8_t flags;
+#define EC_WUT_IMMED 0x01
+#define EC_WUT_DEL_TKN 0x02
+ uint8_t reserved[5];
+ uint8_t offset_into_rod[8];
+ uint8_t rod_token[512];
+ uint8_t reserved2[6];
+ uint8_t range_descriptor_length[2];
+ struct scsi_range_desc desc[];
+};
+
+struct scsi_receive_rod_token_information
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define RCS_RRTI 0x07
+ uint8_t list_identifier[4];
+ uint8_t reserved[4];
+ uint8_t length[4];
+ uint8_t reserved2;
+ uint8_t control;
+};
+
+struct scsi_token
+{
+ uint8_t type[4];
+#define ROD_TYPE_INTERNAL 0x00000000
+#define ROD_TYPE_AUR 0x00001000
+#define ROD_TYPE_PIT_DEF 0x00080000
+#define ROD_TYPE_PIT_VULN 0x00080001
+#define ROD_TYPE_PIT_PERS 0x00080002
+#define ROD_TYPE_PIT_ANY 0x0008FFFF
+#define ROD_TYPE_BLOCK_ZERO 0xFFFF0001
+ uint8_t reserved[2];
+ uint8_t length[2];
+ uint8_t body[0];
+};
+
+struct scsi_report_all_rod_tokens
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define RCS_RART 0x08
+ uint8_t reserved[8];
+ uint8_t length[4];
+ uint8_t reserved2;
+ uint8_t control;
+};
+
+struct scsi_report_all_rod_tokens_data
+{
+ uint8_t available_data[4];
+ uint8_t reserved[4];
+ uint8_t rod_management_token_list[];
+};
+
struct ata_pass_16 {
u_int8_t opcode;
u_int8_t protocol;
@@ -2052,6 +2156,19 @@ struct scsi_vpd_tpc_descriptor
uint8_t parameters[];
};
+struct scsi_vpd_tpc_descriptor_bdrl
+{
+ uint8_t desc_type[2];
+#define SVPD_TPC_BDRL 0x0000
+ uint8_t desc_length[2];
+ uint8_t vendor_specific[6];
+ uint8_t maximum_ranges[2];
+ uint8_t maximum_inactivity_timeout[4];
+ uint8_t default_inactivity_timeout[4];
+ uint8_t maximum_token_transfer_size[8];
+ uint8_t optimal_transfer_count[8];
+};
+
struct scsi_vpd_tpc_descriptor_sc_descr
{
uint8_t opcode;
@@ -2099,6 +2216,58 @@ struct scsi_vpd_tpc_descriptor_sdid
uint8_t supported_descriptor_ids[];
};
+struct scsi_vpd_tpc_descriptor_rtf_block
+{
+ uint8_t type_format;
+#define SVPD_TPC_RTF_BLOCK 0x00
+ uint8_t reserved;
+ uint8_t desc_length[2];
+ uint8_t reserved2[2];
+ uint8_t optimal_length_granularity[2];
+ uint8_t maximum_bytes[8];
+ uint8_t optimal_bytes[8];
+ uint8_t optimal_bytes_to_token_per_segment[8];
+ uint8_t optimal_bytes_from_token_per_segment[8];
+ uint8_t reserved3[8];
+};
+
+struct scsi_vpd_tpc_descriptor_rtf
+{
+ uint8_t desc_type[2];
+#define SVPD_TPC_RTF 0x0106
+ uint8_t desc_length[2];
+ uint8_t remote_tokens;
+ uint8_t reserved[11];
+ uint8_t minimum_token_lifetime[4];
+ uint8_t maximum_token_lifetime[4];
+ uint8_t maximum_token_inactivity_timeout[4];
+ uint8_t reserved2[18];
+ uint8_t type_specific_features_length[2];
+ uint8_t type_specific_features[0];
+};
+
+struct scsi_vpd_tpc_descriptor_srtd
+{
+ uint8_t rod_type[4];
+ uint8_t flags;
+#define SVPD_TPC_SRTD_TOUT 0x01
+#define SVPD_TPC_SRTD_TIN 0x02
+#define SVPD_TPC_SRTD_ECPY 0x80
+ uint8_t reserved;
+ uint8_t preference_indicator[2];
+ uint8_t reserved2[56];
+};
+
+struct scsi_vpd_tpc_descriptor_srt
+{
+ uint8_t desc_type[2];
+#define SVPD_TPC_SRT 0x0108
+ uint8_t desc_length[2];
+ uint8_t reserved[2];
+ uint8_t rod_type_descriptors_length[2];
+ uint8_t rod_type_descriptors[0];
+};
+
struct scsi_vpd_tpc_descriptor_gco
{
uint8_t desc_type[2];
@@ -3027,6 +3196,9 @@ int scsi_devid_is_lun_t10(uint8_t *bufp);
struct scsi_vpd_id_descriptor *
scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t len,
scsi_devid_checkfn_t ck_fn);
+struct scsi_vpd_id_descriptor *
+ scsi_get_devid_desc(struct scsi_vpd_id_descriptor *desc, uint32_t len,
+ scsi_devid_checkfn_t ck_fn);
int scsi_transportid_sbuf(struct sbuf *sb,
struct scsi_transportid_header *hdr,
OpenPOWER on IntegriCloud